Merge tag 'linux-kselftest-next-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-2.6-microblaze.git] / drivers / thunderbolt / xdomain.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt XDomain discovery protocol support
4  *
5  * Copyright (C) 2017, Intel Corporation
6  * Authors: Michael Jamet <michael.jamet@intel.com>
7  *          Mika Westerberg <mika.westerberg@linux.intel.com>
8  */
9
10 #include <linux/device.h>
11 #include <linux/delay.h>
12 #include <linux/kmod.h>
13 #include <linux/module.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/prandom.h>
16 #include <linux/utsname.h>
17 #include <linux/uuid.h>
18 #include <linux/workqueue.h>
19
20 #include "tb.h"
21
22 #define XDOMAIN_DEFAULT_TIMEOUT                 1000 /* ms */
23 #define XDOMAIN_UUID_RETRIES                    10
24 #define XDOMAIN_PROPERTIES_RETRIES              10
25 #define XDOMAIN_PROPERTIES_CHANGED_RETRIES      10
26 #define XDOMAIN_BONDING_WAIT                    100  /* ms */
27 #define XDOMAIN_DEFAULT_MAX_HOPID               15
28
29 struct xdomain_request_work {
30         struct work_struct work;
31         struct tb_xdp_header *pkg;
32         struct tb *tb;
33 };
34
35 static bool tb_xdomain_enabled = true;
36 module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
37 MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
38
39 /*
40  * Serializes access to the properties and protocol handlers below. If
41  * you need to take both this lock and the struct tb_xdomain lock, take
42  * this one first.
43  */
44 static DEFINE_MUTEX(xdomain_lock);
45
46 /* Properties exposed to the remote domains */
47 static struct tb_property_dir *xdomain_property_dir;
48 static u32 xdomain_property_block_gen;
49
50 /* Additional protocol handlers */
51 static LIST_HEAD(protocol_handlers);
52
53 /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
54 static const uuid_t tb_xdp_uuid =
55         UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
56                   0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
57
58 bool tb_is_xdomain_enabled(void)
59 {
60         return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
61 }
62
63 static bool tb_xdomain_match(const struct tb_cfg_request *req,
64                              const struct ctl_pkg *pkg)
65 {
66         switch (pkg->frame.eof) {
67         case TB_CFG_PKG_ERROR:
68                 return true;
69
70         case TB_CFG_PKG_XDOMAIN_RESP: {
71                 const struct tb_xdp_header *res_hdr = pkg->buffer;
72                 const struct tb_xdp_header *req_hdr = req->request;
73
74                 if (pkg->frame.size < req->response_size / 4)
75                         return false;
76
77                 /* Make sure route matches */
78                 if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
79                      req_hdr->xd_hdr.route_hi)
80                         return false;
81                 if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
82                         return false;
83
84                 /* Check that the XDomain protocol matches */
85                 if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
86                         return false;
87
88                 return true;
89         }
90
91         default:
92                 return false;
93         }
94 }
95
96 static bool tb_xdomain_copy(struct tb_cfg_request *req,
97                             const struct ctl_pkg *pkg)
98 {
99         memcpy(req->response, pkg->buffer, req->response_size);
100         req->result.err = 0;
101         return true;
102 }
103
104 static void response_ready(void *data)
105 {
106         tb_cfg_request_put(data);
107 }
108
109 static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
110                                  size_t size, enum tb_cfg_pkg_type type)
111 {
112         struct tb_cfg_request *req;
113
114         req = tb_cfg_request_alloc();
115         if (!req)
116                 return -ENOMEM;
117
118         req->match = tb_xdomain_match;
119         req->copy = tb_xdomain_copy;
120         req->request = response;
121         req->request_size = size;
122         req->request_type = type;
123
124         return tb_cfg_request(ctl, req, response_ready, req);
125 }
126
127 /**
128  * tb_xdomain_response() - Send a XDomain response message
129  * @xd: XDomain to send the message
130  * @response: Response to send
131  * @size: Size of the response
132  * @type: PDF type of the response
133  *
134  * This can be used to send a XDomain response message to the other
135  * domain. No response for the message is expected.
136  *
137  * Return: %0 in case of success and negative errno in case of failure
138  */
139 int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
140                         size_t size, enum tb_cfg_pkg_type type)
141 {
142         return __tb_xdomain_response(xd->tb->ctl, response, size, type);
143 }
144 EXPORT_SYMBOL_GPL(tb_xdomain_response);
145
146 static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
147         size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
148         size_t response_size, enum tb_cfg_pkg_type response_type,
149         unsigned int timeout_msec)
150 {
151         struct tb_cfg_request *req;
152         struct tb_cfg_result res;
153
154         req = tb_cfg_request_alloc();
155         if (!req)
156                 return -ENOMEM;
157
158         req->match = tb_xdomain_match;
159         req->copy = tb_xdomain_copy;
160         req->request = request;
161         req->request_size = request_size;
162         req->request_type = request_type;
163         req->response = response;
164         req->response_size = response_size;
165         req->response_type = response_type;
166
167         res = tb_cfg_request_sync(ctl, req, timeout_msec);
168
169         tb_cfg_request_put(req);
170
171         return res.err == 1 ? -EIO : res.err;
172 }
173
174 /**
175  * tb_xdomain_request() - Send a XDomain request
176  * @xd: XDomain to send the request
177  * @request: Request to send
178  * @request_size: Size of the request in bytes
179  * @request_type: PDF type of the request
180  * @response: Response is copied here
181  * @response_size: Expected size of the response in bytes
182  * @response_type: Expected PDF type of the response
183  * @timeout_msec: Timeout in milliseconds to wait for the response
184  *
185  * This function can be used to send XDomain control channel messages to
186  * the other domain. The function waits until the response is received
187  * or when timeout triggers. Whichever comes first.
188  *
189  * Return: %0 in case of success and negative errno in case of failure
190  */
191 int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
192         size_t request_size, enum tb_cfg_pkg_type request_type,
193         void *response, size_t response_size,
194         enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
195 {
196         return __tb_xdomain_request(xd->tb->ctl, request, request_size,
197                                     request_type, response, response_size,
198                                     response_type, timeout_msec);
199 }
200 EXPORT_SYMBOL_GPL(tb_xdomain_request);
201
202 static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
203         u8 sequence, enum tb_xdp_type type, size_t size)
204 {
205         u32 length_sn;
206
207         length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
208         length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
209
210         hdr->xd_hdr.route_hi = upper_32_bits(route);
211         hdr->xd_hdr.route_lo = lower_32_bits(route);
212         hdr->xd_hdr.length_sn = length_sn;
213         hdr->type = type;
214         memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
215 }
216
217 static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
218 {
219         const struct tb_xdp_error_response *error;
220
221         if (hdr->type != ERROR_RESPONSE)
222                 return 0;
223
224         error = (const struct tb_xdp_error_response *)hdr;
225
226         switch (error->error) {
227         case ERROR_UNKNOWN_PACKET:
228         case ERROR_UNKNOWN_DOMAIN:
229                 return -EIO;
230         case ERROR_NOT_SUPPORTED:
231                 return -ENOTSUPP;
232         case ERROR_NOT_READY:
233                 return -EAGAIN;
234         default:
235                 break;
236         }
237
238         return 0;
239 }
240
241 static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
242                                uuid_t *uuid)
243 {
244         struct tb_xdp_uuid_response res;
245         struct tb_xdp_uuid req;
246         int ret;
247
248         memset(&req, 0, sizeof(req));
249         tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
250                            sizeof(req));
251
252         memset(&res, 0, sizeof(res));
253         ret = __tb_xdomain_request(ctl, &req, sizeof(req),
254                                    TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
255                                    TB_CFG_PKG_XDOMAIN_RESP,
256                                    XDOMAIN_DEFAULT_TIMEOUT);
257         if (ret)
258                 return ret;
259
260         ret = tb_xdp_handle_error(&res.hdr);
261         if (ret)
262                 return ret;
263
264         uuid_copy(uuid, &res.src_uuid);
265         return 0;
266 }
267
268 static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
269                                 const uuid_t *uuid)
270 {
271         struct tb_xdp_uuid_response res;
272
273         memset(&res, 0, sizeof(res));
274         tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
275                            sizeof(res));
276
277         uuid_copy(&res.src_uuid, uuid);
278         res.src_route_hi = upper_32_bits(route);
279         res.src_route_lo = lower_32_bits(route);
280
281         return __tb_xdomain_response(ctl, &res, sizeof(res),
282                                      TB_CFG_PKG_XDOMAIN_RESP);
283 }
284
285 static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
286                                  enum tb_xdp_error error)
287 {
288         struct tb_xdp_error_response res;
289
290         memset(&res, 0, sizeof(res));
291         tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
292                            sizeof(res));
293         res.error = error;
294
295         return __tb_xdomain_response(ctl, &res, sizeof(res),
296                                      TB_CFG_PKG_XDOMAIN_RESP);
297 }
298
299 static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
300         const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
301         u32 **block, u32 *generation)
302 {
303         struct tb_xdp_properties_response *res;
304         struct tb_xdp_properties req;
305         u16 data_len, len;
306         size_t total_size;
307         u32 *data = NULL;
308         int ret;
309
310         total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
311         res = kzalloc(total_size, GFP_KERNEL);
312         if (!res)
313                 return -ENOMEM;
314
315         memset(&req, 0, sizeof(req));
316         tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
317                            sizeof(req));
318         memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
319         memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
320
321         len = 0;
322         data_len = 0;
323
324         do {
325                 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
326                                            TB_CFG_PKG_XDOMAIN_REQ, res,
327                                            total_size, TB_CFG_PKG_XDOMAIN_RESP,
328                                            XDOMAIN_DEFAULT_TIMEOUT);
329                 if (ret)
330                         goto err;
331
332                 ret = tb_xdp_handle_error(&res->hdr);
333                 if (ret)
334                         goto err;
335
336                 /*
337                  * Package length includes the whole payload without the
338                  * XDomain header. Validate first that the package is at
339                  * least size of the response structure.
340                  */
341                 len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
342                 if (len < sizeof(*res) / 4) {
343                         ret = -EINVAL;
344                         goto err;
345                 }
346
347                 len += sizeof(res->hdr.xd_hdr) / 4;
348                 len -= sizeof(*res) / 4;
349
350                 if (res->offset != req.offset) {
351                         ret = -EINVAL;
352                         goto err;
353                 }
354
355                 /*
356                  * First time allocate block that has enough space for
357                  * the whole properties block.
358                  */
359                 if (!data) {
360                         data_len = res->data_length;
361                         if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
362                                 ret = -E2BIG;
363                                 goto err;
364                         }
365
366                         data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
367                         if (!data) {
368                                 ret = -ENOMEM;
369                                 goto err;
370                         }
371                 }
372
373                 memcpy(data + req.offset, res->data, len * 4);
374                 req.offset += len;
375         } while (!data_len || req.offset < data_len);
376
377         *block = data;
378         *generation = res->generation;
379
380         kfree(res);
381
382         return data_len;
383
384 err:
385         kfree(data);
386         kfree(res);
387
388         return ret;
389 }
390
391 static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
392         struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req)
393 {
394         struct tb_xdp_properties_response *res;
395         size_t total_size;
396         u16 len;
397         int ret;
398
399         /*
400          * Currently we expect all requests to be directed to us. The
401          * protocol supports forwarding, though which we might add
402          * support later on.
403          */
404         if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) {
405                 tb_xdp_error_response(ctl, xd->route, sequence,
406                                       ERROR_UNKNOWN_DOMAIN);
407                 return 0;
408         }
409
410         mutex_lock(&xd->lock);
411
412         if (req->offset >= xd->local_property_block_len) {
413                 mutex_unlock(&xd->lock);
414                 return -EINVAL;
415         }
416
417         len = xd->local_property_block_len - req->offset;
418         len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
419         total_size = sizeof(*res) + len * 4;
420
421         res = kzalloc(total_size, GFP_KERNEL);
422         if (!res) {
423                 mutex_unlock(&xd->lock);
424                 return -ENOMEM;
425         }
426
427         tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE,
428                            total_size);
429         res->generation = xd->local_property_block_gen;
430         res->data_length = xd->local_property_block_len;
431         res->offset = req->offset;
432         uuid_copy(&res->src_uuid, xd->local_uuid);
433         uuid_copy(&res->dst_uuid, &req->src_uuid);
434         memcpy(res->data, &xd->local_property_block[req->offset], len * 4);
435
436         mutex_unlock(&xd->lock);
437
438         ret = __tb_xdomain_response(ctl, res, total_size,
439                                     TB_CFG_PKG_XDOMAIN_RESP);
440
441         kfree(res);
442         return ret;
443 }
444
445 static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
446                                              int retry, const uuid_t *uuid)
447 {
448         struct tb_xdp_properties_changed_response res;
449         struct tb_xdp_properties_changed req;
450         int ret;
451
452         memset(&req, 0, sizeof(req));
453         tb_xdp_fill_header(&req.hdr, route, retry % 4,
454                            PROPERTIES_CHANGED_REQUEST, sizeof(req));
455         uuid_copy(&req.src_uuid, uuid);
456
457         memset(&res, 0, sizeof(res));
458         ret = __tb_xdomain_request(ctl, &req, sizeof(req),
459                                    TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
460                                    TB_CFG_PKG_XDOMAIN_RESP,
461                                    XDOMAIN_DEFAULT_TIMEOUT);
462         if (ret)
463                 return ret;
464
465         return tb_xdp_handle_error(&res.hdr);
466 }
467
468 static int
469 tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
470 {
471         struct tb_xdp_properties_changed_response res;
472
473         memset(&res, 0, sizeof(res));
474         tb_xdp_fill_header(&res.hdr, route, sequence,
475                            PROPERTIES_CHANGED_RESPONSE, sizeof(res));
476         return __tb_xdomain_response(ctl, &res, sizeof(res),
477                                      TB_CFG_PKG_XDOMAIN_RESP);
478 }
479
480 /**
481  * tb_register_protocol_handler() - Register protocol handler
482  * @handler: Handler to register
483  *
484  * This allows XDomain service drivers to hook into incoming XDomain
485  * messages. After this function is called the service driver needs to
486  * be able to handle calls to callback whenever a package with the
487  * registered protocol is received.
488  */
489 int tb_register_protocol_handler(struct tb_protocol_handler *handler)
490 {
491         if (!handler->uuid || !handler->callback)
492                 return -EINVAL;
493         if (uuid_equal(handler->uuid, &tb_xdp_uuid))
494                 return -EINVAL;
495
496         mutex_lock(&xdomain_lock);
497         list_add_tail(&handler->list, &protocol_handlers);
498         mutex_unlock(&xdomain_lock);
499
500         return 0;
501 }
502 EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
503
504 /**
505  * tb_unregister_protocol_handler() - Unregister protocol handler
506  * @handler: Handler to unregister
507  *
508  * Removes the previously registered protocol handler.
509  */
510 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
511 {
512         mutex_lock(&xdomain_lock);
513         list_del_init(&handler->list);
514         mutex_unlock(&xdomain_lock);
515 }
516 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
517
518 static void update_property_block(struct tb_xdomain *xd)
519 {
520         mutex_lock(&xdomain_lock);
521         mutex_lock(&xd->lock);
522         /*
523          * If the local property block is not up-to-date, rebuild it now
524          * based on the global property template.
525          */
526         if (!xd->local_property_block ||
527             xd->local_property_block_gen < xdomain_property_block_gen) {
528                 struct tb_property_dir *dir;
529                 int ret, block_len;
530                 u32 *block;
531
532                 dir = tb_property_copy_dir(xdomain_property_dir);
533                 if (!dir) {
534                         dev_warn(&xd->dev, "failed to copy properties\n");
535                         goto out_unlock;
536                 }
537
538                 /* Fill in non-static properties now */
539                 tb_property_add_text(dir, "deviceid", utsname()->nodename);
540                 tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid);
541
542                 ret = tb_property_format_dir(dir, NULL, 0);
543                 if (ret < 0) {
544                         dev_warn(&xd->dev, "local property block creation failed\n");
545                         tb_property_free_dir(dir);
546                         goto out_unlock;
547                 }
548
549                 block_len = ret;
550                 block = kcalloc(block_len, sizeof(*block), GFP_KERNEL);
551                 if (!block) {
552                         tb_property_free_dir(dir);
553                         goto out_unlock;
554                 }
555
556                 ret = tb_property_format_dir(dir, block, block_len);
557                 if (ret) {
558                         dev_warn(&xd->dev, "property block generation failed\n");
559                         tb_property_free_dir(dir);
560                         kfree(block);
561                         goto out_unlock;
562                 }
563
564                 tb_property_free_dir(dir);
565                 /* Release the previous block */
566                 kfree(xd->local_property_block);
567                 /* Assign new one */
568                 xd->local_property_block = block;
569                 xd->local_property_block_len = block_len;
570                 xd->local_property_block_gen = xdomain_property_block_gen;
571         }
572
573 out_unlock:
574         mutex_unlock(&xd->lock);
575         mutex_unlock(&xdomain_lock);
576 }
577
578 static void tb_xdp_handle_request(struct work_struct *work)
579 {
580         struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
581         const struct tb_xdp_header *pkg = xw->pkg;
582         const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
583         struct tb *tb = xw->tb;
584         struct tb_ctl *ctl = tb->ctl;
585         struct tb_xdomain *xd;
586         const uuid_t *uuid;
587         int ret = 0;
588         u32 sequence;
589         u64 route;
590
591         route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
592         sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
593         sequence >>= TB_XDOMAIN_SN_SHIFT;
594
595         mutex_lock(&tb->lock);
596         if (tb->root_switch)
597                 uuid = tb->root_switch->uuid;
598         else
599                 uuid = NULL;
600         mutex_unlock(&tb->lock);
601
602         if (!uuid) {
603                 tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
604                 goto out;
605         }
606
607         tb_dbg(tb, "%llx: received XDomain request %#x\n", route, pkg->type);
608
609         xd = tb_xdomain_find_by_route_locked(tb, route);
610         if (xd)
611                 update_property_block(xd);
612
613         switch (pkg->type) {
614         case PROPERTIES_REQUEST:
615                 if (xd) {
616                         ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
617                                 (const struct tb_xdp_properties *)pkg);
618                 }
619                 break;
620
621         case PROPERTIES_CHANGED_REQUEST:
622                 ret = tb_xdp_properties_changed_response(ctl, route, sequence);
623
624                 /*
625                  * Since the properties have been changed, let's update
626                  * the xdomain related to this connection as well in
627                  * case there is a change in services it offers.
628                  */
629                 if (xd && device_is_registered(&xd->dev)) {
630                         queue_delayed_work(tb->wq, &xd->get_properties_work,
631                                            msecs_to_jiffies(50));
632                 }
633                 break;
634
635         case UUID_REQUEST_OLD:
636         case UUID_REQUEST:
637                 ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
638                 break;
639
640         default:
641                 tb_xdp_error_response(ctl, route, sequence,
642                                       ERROR_NOT_SUPPORTED);
643                 break;
644         }
645
646         tb_xdomain_put(xd);
647
648         if (ret) {
649                 tb_warn(tb, "failed to send XDomain response for %#x\n",
650                         pkg->type);
651         }
652
653 out:
654         kfree(xw->pkg);
655         kfree(xw);
656
657         tb_domain_put(tb);
658 }
659
660 static bool
661 tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
662                         size_t size)
663 {
664         struct xdomain_request_work *xw;
665
666         xw = kmalloc(sizeof(*xw), GFP_KERNEL);
667         if (!xw)
668                 return false;
669
670         INIT_WORK(&xw->work, tb_xdp_handle_request);
671         xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
672         if (!xw->pkg) {
673                 kfree(xw);
674                 return false;
675         }
676         xw->tb = tb_domain_get(tb);
677
678         schedule_work(&xw->work);
679         return true;
680 }
681
682 /**
683  * tb_register_service_driver() - Register XDomain service driver
684  * @drv: Driver to register
685  *
686  * Registers new service driver from @drv to the bus.
687  */
688 int tb_register_service_driver(struct tb_service_driver *drv)
689 {
690         drv->driver.bus = &tb_bus_type;
691         return driver_register(&drv->driver);
692 }
693 EXPORT_SYMBOL_GPL(tb_register_service_driver);
694
695 /**
696  * tb_unregister_service_driver() - Unregister XDomain service driver
697  * @drv: Driver to unregister
698  *
699  * Unregisters XDomain service driver from the bus.
700  */
701 void tb_unregister_service_driver(struct tb_service_driver *drv)
702 {
703         driver_unregister(&drv->driver);
704 }
705 EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
706
707 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
708                         char *buf)
709 {
710         struct tb_service *svc = container_of(dev, struct tb_service, dev);
711
712         /*
713          * It should be null terminated but anything else is pretty much
714          * allowed.
715          */
716         return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
717 }
718 static DEVICE_ATTR_RO(key);
719
720 static int get_modalias(struct tb_service *svc, char *buf, size_t size)
721 {
722         return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
723                         svc->prtcid, svc->prtcvers, svc->prtcrevs);
724 }
725
726 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
727                              char *buf)
728 {
729         struct tb_service *svc = container_of(dev, struct tb_service, dev);
730
731         /* Full buffer size except new line and null termination */
732         get_modalias(svc, buf, PAGE_SIZE - 2);
733         return sprintf(buf, "%s\n", buf);
734 }
735 static DEVICE_ATTR_RO(modalias);
736
737 static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
738                            char *buf)
739 {
740         struct tb_service *svc = container_of(dev, struct tb_service, dev);
741
742         return sprintf(buf, "%u\n", svc->prtcid);
743 }
744 static DEVICE_ATTR_RO(prtcid);
745
746 static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
747                              char *buf)
748 {
749         struct tb_service *svc = container_of(dev, struct tb_service, dev);
750
751         return sprintf(buf, "%u\n", svc->prtcvers);
752 }
753 static DEVICE_ATTR_RO(prtcvers);
754
755 static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
756                              char *buf)
757 {
758         struct tb_service *svc = container_of(dev, struct tb_service, dev);
759
760         return sprintf(buf, "%u\n", svc->prtcrevs);
761 }
762 static DEVICE_ATTR_RO(prtcrevs);
763
764 static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
765                              char *buf)
766 {
767         struct tb_service *svc = container_of(dev, struct tb_service, dev);
768
769         return sprintf(buf, "0x%08x\n", svc->prtcstns);
770 }
771 static DEVICE_ATTR_RO(prtcstns);
772
773 static struct attribute *tb_service_attrs[] = {
774         &dev_attr_key.attr,
775         &dev_attr_modalias.attr,
776         &dev_attr_prtcid.attr,
777         &dev_attr_prtcvers.attr,
778         &dev_attr_prtcrevs.attr,
779         &dev_attr_prtcstns.attr,
780         NULL,
781 };
782
783 static const struct attribute_group tb_service_attr_group = {
784         .attrs = tb_service_attrs,
785 };
786
787 static const struct attribute_group *tb_service_attr_groups[] = {
788         &tb_service_attr_group,
789         NULL,
790 };
791
792 static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
793 {
794         struct tb_service *svc = container_of(dev, struct tb_service, dev);
795         char modalias[64];
796
797         get_modalias(svc, modalias, sizeof(modalias));
798         return add_uevent_var(env, "MODALIAS=%s", modalias);
799 }
800
801 static void tb_service_release(struct device *dev)
802 {
803         struct tb_service *svc = container_of(dev, struct tb_service, dev);
804         struct tb_xdomain *xd = tb_service_parent(svc);
805
806         tb_service_debugfs_remove(svc);
807         ida_simple_remove(&xd->service_ids, svc->id);
808         kfree(svc->key);
809         kfree(svc);
810 }
811
812 struct device_type tb_service_type = {
813         .name = "thunderbolt_service",
814         .groups = tb_service_attr_groups,
815         .uevent = tb_service_uevent,
816         .release = tb_service_release,
817 };
818 EXPORT_SYMBOL_GPL(tb_service_type);
819
820 static int remove_missing_service(struct device *dev, void *data)
821 {
822         struct tb_xdomain *xd = data;
823         struct tb_service *svc;
824
825         svc = tb_to_service(dev);
826         if (!svc)
827                 return 0;
828
829         if (!tb_property_find(xd->remote_properties, svc->key,
830                               TB_PROPERTY_TYPE_DIRECTORY))
831                 device_unregister(dev);
832
833         return 0;
834 }
835
836 static int find_service(struct device *dev, void *data)
837 {
838         const struct tb_property *p = data;
839         struct tb_service *svc;
840
841         svc = tb_to_service(dev);
842         if (!svc)
843                 return 0;
844
845         return !strcmp(svc->key, p->key);
846 }
847
848 static int populate_service(struct tb_service *svc,
849                             struct tb_property *property)
850 {
851         struct tb_property_dir *dir = property->value.dir;
852         struct tb_property *p;
853
854         /* Fill in standard properties */
855         p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
856         if (p)
857                 svc->prtcid = p->value.immediate;
858         p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
859         if (p)
860                 svc->prtcvers = p->value.immediate;
861         p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
862         if (p)
863                 svc->prtcrevs = p->value.immediate;
864         p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
865         if (p)
866                 svc->prtcstns = p->value.immediate;
867
868         svc->key = kstrdup(property->key, GFP_KERNEL);
869         if (!svc->key)
870                 return -ENOMEM;
871
872         return 0;
873 }
874
875 static void enumerate_services(struct tb_xdomain *xd)
876 {
877         struct tb_service *svc;
878         struct tb_property *p;
879         struct device *dev;
880         int id;
881
882         /*
883          * First remove all services that are not available anymore in
884          * the updated property block.
885          */
886         device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
887
888         /* Then re-enumerate properties creating new services as we go */
889         tb_property_for_each(xd->remote_properties, p) {
890                 if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
891                         continue;
892
893                 /* If the service exists already we are fine */
894                 dev = device_find_child(&xd->dev, p, find_service);
895                 if (dev) {
896                         put_device(dev);
897                         continue;
898                 }
899
900                 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
901                 if (!svc)
902                         break;
903
904                 if (populate_service(svc, p)) {
905                         kfree(svc);
906                         break;
907                 }
908
909                 id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
910                 if (id < 0) {
911                         kfree(svc->key);
912                         kfree(svc);
913                         break;
914                 }
915                 svc->id = id;
916                 svc->dev.bus = &tb_bus_type;
917                 svc->dev.type = &tb_service_type;
918                 svc->dev.parent = &xd->dev;
919                 dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
920
921                 tb_service_debugfs_init(svc);
922
923                 if (device_register(&svc->dev)) {
924                         put_device(&svc->dev);
925                         break;
926                 }
927         }
928 }
929
930 static int populate_properties(struct tb_xdomain *xd,
931                                struct tb_property_dir *dir)
932 {
933         const struct tb_property *p;
934
935         /* Required properties */
936         p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
937         if (!p)
938                 return -EINVAL;
939         xd->device = p->value.immediate;
940
941         p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
942         if (!p)
943                 return -EINVAL;
944         xd->vendor = p->value.immediate;
945
946         p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE);
947         /*
948          * USB4 inter-domain spec suggests using 15 as HopID if the
949          * other end does not announce it in a property. This is for
950          * TBT3 compatibility.
951          */
952         xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID;
953
954         kfree(xd->device_name);
955         xd->device_name = NULL;
956         kfree(xd->vendor_name);
957         xd->vendor_name = NULL;
958
959         /* Optional properties */
960         p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
961         if (p)
962                 xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
963         p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
964         if (p)
965                 xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
966
967         return 0;
968 }
969
970 static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
971 {
972         return tb_to_switch(xd->dev.parent);
973 }
974
975 static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
976 {
977         bool change = false;
978         struct tb_port *port;
979         int ret;
980
981         port = tb_port_at(xd->route, tb_xdomain_parent(xd));
982
983         ret = tb_port_get_link_speed(port);
984         if (ret < 0)
985                 return ret;
986
987         if (xd->link_speed != ret)
988                 change = true;
989
990         xd->link_speed = ret;
991
992         ret = tb_port_get_link_width(port);
993         if (ret < 0)
994                 return ret;
995
996         if (xd->link_width != ret)
997                 change = true;
998
999         xd->link_width = ret;
1000
1001         if (change)
1002                 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1003
1004         return 0;
1005 }
1006
1007 static void tb_xdomain_get_uuid(struct work_struct *work)
1008 {
1009         struct tb_xdomain *xd = container_of(work, typeof(*xd),
1010                                              get_uuid_work.work);
1011         struct tb *tb = xd->tb;
1012         uuid_t uuid;
1013         int ret;
1014
1015         dev_dbg(&xd->dev, "requesting remote UUID\n");
1016
1017         ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
1018         if (ret < 0) {
1019                 if (xd->uuid_retries-- > 0) {
1020                         dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
1021                         queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
1022                                            msecs_to_jiffies(100));
1023                 } else {
1024                         dev_dbg(&xd->dev, "failed to read remote UUID\n");
1025                 }
1026                 return;
1027         }
1028
1029         dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
1030
1031         if (uuid_equal(&uuid, xd->local_uuid))
1032                 dev_dbg(&xd->dev, "intra-domain loop detected\n");
1033
1034         /*
1035          * If the UUID is different, there is another domain connected
1036          * so mark this one unplugged and wait for the connection
1037          * manager to replace it.
1038          */
1039         if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
1040                 dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
1041                 xd->is_unplugged = true;
1042                 return;
1043         }
1044
1045         /* First time fill in the missing UUID */
1046         if (!xd->remote_uuid) {
1047                 xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
1048                 if (!xd->remote_uuid)
1049                         return;
1050         }
1051
1052         /* Now we can start the normal properties exchange */
1053         queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1054                            msecs_to_jiffies(100));
1055         queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1056                            msecs_to_jiffies(1000));
1057 }
1058
1059 static void tb_xdomain_get_properties(struct work_struct *work)
1060 {
1061         struct tb_xdomain *xd = container_of(work, typeof(*xd),
1062                                              get_properties_work.work);
1063         struct tb_property_dir *dir;
1064         struct tb *tb = xd->tb;
1065         bool update = false;
1066         u32 *block = NULL;
1067         u32 gen = 0;
1068         int ret;
1069
1070         dev_dbg(&xd->dev, "requesting remote properties\n");
1071
1072         ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
1073                                         xd->remote_uuid, xd->properties_retries,
1074                                         &block, &gen);
1075         if (ret < 0) {
1076                 if (xd->properties_retries-- > 0) {
1077                         dev_dbg(&xd->dev,
1078                                 "failed to request remote properties, retrying\n");
1079                         queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1080                                            msecs_to_jiffies(1000));
1081                 } else {
1082                         /* Give up now */
1083                         dev_err(&xd->dev,
1084                                 "failed read XDomain properties from %pUb\n",
1085                                 xd->remote_uuid);
1086                 }
1087                 return;
1088         }
1089
1090         xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1091
1092         mutex_lock(&xd->lock);
1093
1094         /* Only accept newer generation properties */
1095         if (xd->remote_properties && gen <= xd->remote_property_block_gen)
1096                 goto err_free_block;
1097
1098         dir = tb_property_parse_dir(block, ret);
1099         if (!dir) {
1100                 dev_err(&xd->dev, "failed to parse XDomain properties\n");
1101                 goto err_free_block;
1102         }
1103
1104         ret = populate_properties(xd, dir);
1105         if (ret) {
1106                 dev_err(&xd->dev, "missing XDomain properties in response\n");
1107                 goto err_free_dir;
1108         }
1109
1110         /* Release the existing one */
1111         if (xd->remote_properties) {
1112                 tb_property_free_dir(xd->remote_properties);
1113                 update = true;
1114         }
1115
1116         xd->remote_properties = dir;
1117         xd->remote_property_block_gen = gen;
1118
1119         tb_xdomain_update_link_attributes(xd);
1120
1121         mutex_unlock(&xd->lock);
1122
1123         kfree(block);
1124
1125         /*
1126          * Now the device should be ready enough so we can add it to the
1127          * bus and let userspace know about it. If the device is already
1128          * registered, we notify the userspace that it has changed.
1129          */
1130         if (!update) {
1131                 if (device_add(&xd->dev)) {
1132                         dev_err(&xd->dev, "failed to add XDomain device\n");
1133                         return;
1134                 }
1135                 dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
1136                          xd->vendor, xd->device);
1137                 if (xd->vendor_name && xd->device_name)
1138                         dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
1139                                  xd->device_name);
1140         } else {
1141                 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1142         }
1143
1144         enumerate_services(xd);
1145         return;
1146
1147 err_free_dir:
1148         tb_property_free_dir(dir);
1149 err_free_block:
1150         kfree(block);
1151         mutex_unlock(&xd->lock);
1152 }
1153
1154 static void tb_xdomain_properties_changed(struct work_struct *work)
1155 {
1156         struct tb_xdomain *xd = container_of(work, typeof(*xd),
1157                                              properties_changed_work.work);
1158         int ret;
1159
1160         dev_dbg(&xd->dev, "sending properties changed notification\n");
1161
1162         ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
1163                                 xd->properties_changed_retries, xd->local_uuid);
1164         if (ret) {
1165                 if (xd->properties_changed_retries-- > 0) {
1166                         dev_dbg(&xd->dev,
1167                                 "failed to send properties changed notification, retrying\n");
1168                         queue_delayed_work(xd->tb->wq,
1169                                            &xd->properties_changed_work,
1170                                            msecs_to_jiffies(1000));
1171                 }
1172                 dev_err(&xd->dev, "failed to send properties changed notification\n");
1173                 return;
1174         }
1175
1176         xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1177 }
1178
1179 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1180                            char *buf)
1181 {
1182         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1183
1184         return sprintf(buf, "%#x\n", xd->device);
1185 }
1186 static DEVICE_ATTR_RO(device);
1187
1188 static ssize_t
1189 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1190 {
1191         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1192         int ret;
1193
1194         if (mutex_lock_interruptible(&xd->lock))
1195                 return -ERESTARTSYS;
1196         ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
1197         mutex_unlock(&xd->lock);
1198
1199         return ret;
1200 }
1201 static DEVICE_ATTR_RO(device_name);
1202
1203 static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
1204                              char *buf)
1205 {
1206         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1207
1208         return sprintf(buf, "%d\n", xd->remote_max_hopid);
1209 }
1210 static DEVICE_ATTR_RO(maxhopid);
1211
1212 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1213                            char *buf)
1214 {
1215         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1216
1217         return sprintf(buf, "%#x\n", xd->vendor);
1218 }
1219 static DEVICE_ATTR_RO(vendor);
1220
1221 static ssize_t
1222 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1223 {
1224         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1225         int ret;
1226
1227         if (mutex_lock_interruptible(&xd->lock))
1228                 return -ERESTARTSYS;
1229         ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
1230         mutex_unlock(&xd->lock);
1231
1232         return ret;
1233 }
1234 static DEVICE_ATTR_RO(vendor_name);
1235
1236 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1237                               char *buf)
1238 {
1239         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1240
1241         return sprintf(buf, "%pUb\n", xd->remote_uuid);
1242 }
1243 static DEVICE_ATTR_RO(unique_id);
1244
1245 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1246                           char *buf)
1247 {
1248         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1249
1250         return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed);
1251 }
1252
1253 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1254 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1255
1256 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1257                           char *buf)
1258 {
1259         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1260
1261         return sprintf(buf, "%u\n", xd->link_width);
1262 }
1263
1264 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1265 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1266
1267 static struct attribute *xdomain_attrs[] = {
1268         &dev_attr_device.attr,
1269         &dev_attr_device_name.attr,
1270         &dev_attr_maxhopid.attr,
1271         &dev_attr_rx_lanes.attr,
1272         &dev_attr_rx_speed.attr,
1273         &dev_attr_tx_lanes.attr,
1274         &dev_attr_tx_speed.attr,
1275         &dev_attr_unique_id.attr,
1276         &dev_attr_vendor.attr,
1277         &dev_attr_vendor_name.attr,
1278         NULL,
1279 };
1280
1281 static const struct attribute_group xdomain_attr_group = {
1282         .attrs = xdomain_attrs,
1283 };
1284
1285 static const struct attribute_group *xdomain_attr_groups[] = {
1286         &xdomain_attr_group,
1287         NULL,
1288 };
1289
1290 static void tb_xdomain_release(struct device *dev)
1291 {
1292         struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1293
1294         put_device(xd->dev.parent);
1295
1296         kfree(xd->local_property_block);
1297         tb_property_free_dir(xd->remote_properties);
1298         ida_destroy(&xd->out_hopids);
1299         ida_destroy(&xd->in_hopids);
1300         ida_destroy(&xd->service_ids);
1301
1302         kfree(xd->local_uuid);
1303         kfree(xd->remote_uuid);
1304         kfree(xd->device_name);
1305         kfree(xd->vendor_name);
1306         kfree(xd);
1307 }
1308
1309 static void start_handshake(struct tb_xdomain *xd)
1310 {
1311         xd->uuid_retries = XDOMAIN_UUID_RETRIES;
1312         xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1313         xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1314
1315         if (xd->needs_uuid) {
1316                 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
1317                                    msecs_to_jiffies(100));
1318         } else {
1319                 /* Start exchanging properties with the other host */
1320                 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1321                                    msecs_to_jiffies(100));
1322                 queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1323                                    msecs_to_jiffies(1000));
1324         }
1325 }
1326
1327 static void stop_handshake(struct tb_xdomain *xd)
1328 {
1329         xd->uuid_retries = 0;
1330         xd->properties_retries = 0;
1331         xd->properties_changed_retries = 0;
1332
1333         cancel_delayed_work_sync(&xd->get_uuid_work);
1334         cancel_delayed_work_sync(&xd->get_properties_work);
1335         cancel_delayed_work_sync(&xd->properties_changed_work);
1336 }
1337
1338 static int __maybe_unused tb_xdomain_suspend(struct device *dev)
1339 {
1340         stop_handshake(tb_to_xdomain(dev));
1341         return 0;
1342 }
1343
1344 static int __maybe_unused tb_xdomain_resume(struct device *dev)
1345 {
1346         start_handshake(tb_to_xdomain(dev));
1347         return 0;
1348 }
1349
1350 static const struct dev_pm_ops tb_xdomain_pm_ops = {
1351         SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
1352 };
1353
1354 struct device_type tb_xdomain_type = {
1355         .name = "thunderbolt_xdomain",
1356         .release = tb_xdomain_release,
1357         .pm = &tb_xdomain_pm_ops,
1358 };
1359 EXPORT_SYMBOL_GPL(tb_xdomain_type);
1360
1361 /**
1362  * tb_xdomain_alloc() - Allocate new XDomain object
1363  * @tb: Domain where the XDomain belongs
1364  * @parent: Parent device (the switch through the connection to the
1365  *          other domain is reached).
1366  * @route: Route string used to reach the other domain
1367  * @local_uuid: Our local domain UUID
1368  * @remote_uuid: UUID of the other domain (optional)
1369  *
1370  * Allocates new XDomain structure and returns pointer to that. The
1371  * object must be released by calling tb_xdomain_put().
1372  */
1373 struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1374                                     u64 route, const uuid_t *local_uuid,
1375                                     const uuid_t *remote_uuid)
1376 {
1377         struct tb_switch *parent_sw = tb_to_switch(parent);
1378         struct tb_xdomain *xd;
1379         struct tb_port *down;
1380
1381         /* Make sure the downstream domain is accessible */
1382         down = tb_port_at(route, parent_sw);
1383         tb_port_unlock(down);
1384
1385         xd = kzalloc(sizeof(*xd), GFP_KERNEL);
1386         if (!xd)
1387                 return NULL;
1388
1389         xd->tb = tb;
1390         xd->route = route;
1391         xd->local_max_hopid = down->config.max_in_hop_id;
1392         ida_init(&xd->service_ids);
1393         ida_init(&xd->in_hopids);
1394         ida_init(&xd->out_hopids);
1395         mutex_init(&xd->lock);
1396         INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
1397         INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
1398         INIT_DELAYED_WORK(&xd->properties_changed_work,
1399                           tb_xdomain_properties_changed);
1400
1401         xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
1402         if (!xd->local_uuid)
1403                 goto err_free;
1404
1405         if (remote_uuid) {
1406                 xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
1407                                           GFP_KERNEL);
1408                 if (!xd->remote_uuid)
1409                         goto err_free_local_uuid;
1410         } else {
1411                 xd->needs_uuid = true;
1412         }
1413
1414         device_initialize(&xd->dev);
1415         xd->dev.parent = get_device(parent);
1416         xd->dev.bus = &tb_bus_type;
1417         xd->dev.type = &tb_xdomain_type;
1418         xd->dev.groups = xdomain_attr_groups;
1419         dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
1420
1421         dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid);
1422         if (remote_uuid)
1423                 dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid);
1424
1425         /*
1426          * This keeps the DMA powered on as long as we have active
1427          * connection to another host.
1428          */
1429         pm_runtime_set_active(&xd->dev);
1430         pm_runtime_get_noresume(&xd->dev);
1431         pm_runtime_enable(&xd->dev);
1432
1433         return xd;
1434
1435 err_free_local_uuid:
1436         kfree(xd->local_uuid);
1437 err_free:
1438         kfree(xd);
1439
1440         return NULL;
1441 }
1442
1443 /**
1444  * tb_xdomain_add() - Add XDomain to the bus
1445  * @xd: XDomain to add
1446  *
1447  * This function starts XDomain discovery protocol handshake and
1448  * eventually adds the XDomain to the bus. After calling this function
1449  * the caller needs to call tb_xdomain_remove() in order to remove and
1450  * release the object regardless whether the handshake succeeded or not.
1451  */
1452 void tb_xdomain_add(struct tb_xdomain *xd)
1453 {
1454         /* Start exchanging properties with the other host */
1455         start_handshake(xd);
1456 }
1457
1458 static int unregister_service(struct device *dev, void *data)
1459 {
1460         device_unregister(dev);
1461         return 0;
1462 }
1463
1464 /**
1465  * tb_xdomain_remove() - Remove XDomain from the bus
1466  * @xd: XDomain to remove
1467  *
1468  * This will stop all ongoing configuration work and remove the XDomain
1469  * along with any services from the bus. When the last reference to @xd
1470  * is released the object will be released as well.
1471  */
1472 void tb_xdomain_remove(struct tb_xdomain *xd)
1473 {
1474         stop_handshake(xd);
1475
1476         device_for_each_child_reverse(&xd->dev, xd, unregister_service);
1477
1478         /*
1479          * Undo runtime PM here explicitly because it is possible that
1480          * the XDomain was never added to the bus and thus device_del()
1481          * is not called for it (device_del() would handle this otherwise).
1482          */
1483         pm_runtime_disable(&xd->dev);
1484         pm_runtime_put_noidle(&xd->dev);
1485         pm_runtime_set_suspended(&xd->dev);
1486
1487         if (!device_is_registered(&xd->dev)) {
1488                 put_device(&xd->dev);
1489         } else {
1490                 dev_info(&xd->dev, "host disconnected\n");
1491                 device_unregister(&xd->dev);
1492         }
1493 }
1494
1495 /**
1496  * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain
1497  * @xd: XDomain connection
1498  *
1499  * Lane bonding is disabled by default for XDomains. This function tries
1500  * to enable bonding by first enabling the port and waiting for the CL0
1501  * state.
1502  *
1503  * Return: %0 in case of success and negative errno in case of error.
1504  */
1505 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
1506 {
1507         struct tb_port *port;
1508         int ret;
1509
1510         port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1511         if (!port->dual_link_port)
1512                 return -ENODEV;
1513
1514         ret = tb_port_enable(port->dual_link_port);
1515         if (ret)
1516                 return ret;
1517
1518         ret = tb_wait_for_port(port->dual_link_port, true);
1519         if (ret < 0)
1520                 return ret;
1521         if (!ret)
1522                 return -ENOTCONN;
1523
1524         ret = tb_port_lane_bonding_enable(port);
1525         if (ret) {
1526                 tb_port_warn(port, "failed to enable lane bonding\n");
1527                 return ret;
1528         }
1529
1530         ret = tb_port_wait_for_link_width(port, 2, 100);
1531         if (ret) {
1532                 tb_port_warn(port, "timeout enabling lane bonding\n");
1533                 return ret;
1534         }
1535
1536         tb_port_update_credits(port);
1537         tb_xdomain_update_link_attributes(xd);
1538
1539         dev_dbg(&xd->dev, "lane bonding enabled\n");
1540         return 0;
1541 }
1542 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
1543
1544 /**
1545  * tb_xdomain_lane_bonding_disable() - Disable lane bonding
1546  * @xd: XDomain connection
1547  *
1548  * Lane bonding is disabled by default for XDomains. If bonding has been
1549  * enabled, this function can be used to disable it.
1550  */
1551 void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
1552 {
1553         struct tb_port *port;
1554
1555         port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1556         if (port->dual_link_port) {
1557                 tb_port_lane_bonding_disable(port);
1558                 if (tb_port_wait_for_link_width(port, 1, 100) == -ETIMEDOUT)
1559                         tb_port_warn(port, "timeout disabling lane bonding\n");
1560                 tb_port_disable(port->dual_link_port);
1561                 tb_port_update_credits(port);
1562                 tb_xdomain_update_link_attributes(xd);
1563
1564                 dev_dbg(&xd->dev, "lane bonding disabled\n");
1565         }
1566 }
1567 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
1568
1569 /**
1570  * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling
1571  * @xd: XDomain connection
1572  * @hopid: Preferred HopID or %-1 for next available
1573  *
1574  * Returns allocated HopID or negative errno. Specifically returns
1575  * %-ENOSPC if there are no more available HopIDs. Returned HopID is
1576  * guaranteed to be within range supported by the input lane adapter.
1577  * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
1578  */
1579 int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid)
1580 {
1581         if (hopid < 0)
1582                 hopid = TB_PATH_MIN_HOPID;
1583         if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid)
1584                 return -EINVAL;
1585
1586         return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid,
1587                                GFP_KERNEL);
1588 }
1589 EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid);
1590
1591 /**
1592  * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling
1593  * @xd: XDomain connection
1594  * @hopid: Preferred HopID or %-1 for next available
1595  *
1596  * Returns allocated HopID or negative errno. Specifically returns
1597  * %-ENOSPC if there are no more available HopIDs. Returned HopID is
1598  * guaranteed to be within range supported by the output lane adapter.
1599  * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
1600  */
1601 int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid)
1602 {
1603         if (hopid < 0)
1604                 hopid = TB_PATH_MIN_HOPID;
1605         if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid)
1606                 return -EINVAL;
1607
1608         return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid,
1609                                GFP_KERNEL);
1610 }
1611 EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid);
1612
1613 /**
1614  * tb_xdomain_release_in_hopid() - Release input HopID
1615  * @xd: XDomain connection
1616  * @hopid: HopID to release
1617  */
1618 void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid)
1619 {
1620         ida_free(&xd->in_hopids, hopid);
1621 }
1622 EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid);
1623
1624 /**
1625  * tb_xdomain_release_out_hopid() - Release output HopID
1626  * @xd: XDomain connection
1627  * @hopid: HopID to release
1628  */
1629 void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid)
1630 {
1631         ida_free(&xd->out_hopids, hopid);
1632 }
1633 EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid);
1634
1635 /**
1636  * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
1637  * @xd: XDomain connection
1638  * @transmit_path: HopID we are using to send out packets
1639  * @transmit_ring: DMA ring used to send out packets
1640  * @receive_path: HopID the other end is using to send packets to us
1641  * @receive_ring: DMA ring used to receive packets from @receive_path
1642  *
1643  * The function enables DMA paths accordingly so that after successful
1644  * return the caller can send and receive packets using high-speed DMA
1645  * path. If a transmit or receive path is not needed, pass %-1 for those
1646  * parameters.
1647  *
1648  * Return: %0 in case of success and negative errno in case of error
1649  */
1650 int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
1651                             int transmit_ring, int receive_path,
1652                             int receive_ring)
1653 {
1654         return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path,
1655                                                transmit_ring, receive_path,
1656                                                receive_ring);
1657 }
1658 EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
1659
1660 /**
1661  * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
1662  * @xd: XDomain connection
1663  * @transmit_path: HopID we are using to send out packets
1664  * @transmit_ring: DMA ring used to send out packets
1665  * @receive_path: HopID the other end is using to send packets to us
1666  * @receive_ring: DMA ring used to receive packets from @receive_path
1667  *
1668  * This does the opposite of tb_xdomain_enable_paths(). After call to
1669  * this the caller is not expected to use the rings anymore. Passing %-1
1670  * as path/ring parameter means don't care. Normally the callers should
1671  * pass the same values here as they do when paths are enabled.
1672  *
1673  * Return: %0 in case of success and negative errno in case of error
1674  */
1675 int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
1676                              int transmit_ring, int receive_path,
1677                              int receive_ring)
1678 {
1679         return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path,
1680                                                   transmit_ring, receive_path,
1681                                                   receive_ring);
1682 }
1683 EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
1684
1685 struct tb_xdomain_lookup {
1686         const uuid_t *uuid;
1687         u8 link;
1688         u8 depth;
1689         u64 route;
1690 };
1691
1692 static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
1693         const struct tb_xdomain_lookup *lookup)
1694 {
1695         struct tb_port *port;
1696
1697         tb_switch_for_each_port(sw, port) {
1698                 struct tb_xdomain *xd;
1699
1700                 if (port->xdomain) {
1701                         xd = port->xdomain;
1702
1703                         if (lookup->uuid) {
1704                                 if (xd->remote_uuid &&
1705                                     uuid_equal(xd->remote_uuid, lookup->uuid))
1706                                         return xd;
1707                         } else if (lookup->link &&
1708                                    lookup->link == xd->link &&
1709                                    lookup->depth == xd->depth) {
1710                                 return xd;
1711                         } else if (lookup->route &&
1712                                    lookup->route == xd->route) {
1713                                 return xd;
1714                         }
1715                 } else if (tb_port_has_remote(port)) {
1716                         xd = switch_find_xdomain(port->remote->sw, lookup);
1717                         if (xd)
1718                                 return xd;
1719                 }
1720         }
1721
1722         return NULL;
1723 }
1724
1725 /**
1726  * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
1727  * @tb: Domain where the XDomain belongs to
1728  * @uuid: UUID to look for
1729  *
1730  * Finds XDomain by walking through the Thunderbolt topology below @tb.
1731  * The returned XDomain will have its reference count increased so the
1732  * caller needs to call tb_xdomain_put() when it is done with the
1733  * object.
1734  *
1735  * This will find all XDomains including the ones that are not yet added
1736  * to the bus (handshake is still in progress).
1737  *
1738  * The caller needs to hold @tb->lock.
1739  */
1740 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1741 {
1742         struct tb_xdomain_lookup lookup;
1743         struct tb_xdomain *xd;
1744
1745         memset(&lookup, 0, sizeof(lookup));
1746         lookup.uuid = uuid;
1747
1748         xd = switch_find_xdomain(tb->root_switch, &lookup);
1749         return tb_xdomain_get(xd);
1750 }
1751 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
1752
1753 /**
1754  * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
1755  * @tb: Domain where the XDomain belongs to
1756  * @link: Root switch link number
1757  * @depth: Depth in the link
1758  *
1759  * Finds XDomain by walking through the Thunderbolt topology below @tb.
1760  * The returned XDomain will have its reference count increased so the
1761  * caller needs to call tb_xdomain_put() when it is done with the
1762  * object.
1763  *
1764  * This will find all XDomains including the ones that are not yet added
1765  * to the bus (handshake is still in progress).
1766  *
1767  * The caller needs to hold @tb->lock.
1768  */
1769 struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
1770                                                  u8 depth)
1771 {
1772         struct tb_xdomain_lookup lookup;
1773         struct tb_xdomain *xd;
1774
1775         memset(&lookup, 0, sizeof(lookup));
1776         lookup.link = link;
1777         lookup.depth = depth;
1778
1779         xd = switch_find_xdomain(tb->root_switch, &lookup);
1780         return tb_xdomain_get(xd);
1781 }
1782
1783 /**
1784  * tb_xdomain_find_by_route() - Find an XDomain by route string
1785  * @tb: Domain where the XDomain belongs to
1786  * @route: XDomain route string
1787  *
1788  * Finds XDomain by walking through the Thunderbolt topology below @tb.
1789  * The returned XDomain will have its reference count increased so the
1790  * caller needs to call tb_xdomain_put() when it is done with the
1791  * object.
1792  *
1793  * This will find all XDomains including the ones that are not yet added
1794  * to the bus (handshake is still in progress).
1795  *
1796  * The caller needs to hold @tb->lock.
1797  */
1798 struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
1799 {
1800         struct tb_xdomain_lookup lookup;
1801         struct tb_xdomain *xd;
1802
1803         memset(&lookup, 0, sizeof(lookup));
1804         lookup.route = route;
1805
1806         xd = switch_find_xdomain(tb->root_switch, &lookup);
1807         return tb_xdomain_get(xd);
1808 }
1809 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
1810
1811 bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
1812                                const void *buf, size_t size)
1813 {
1814         const struct tb_protocol_handler *handler, *tmp;
1815         const struct tb_xdp_header *hdr = buf;
1816         unsigned int length;
1817         int ret = 0;
1818
1819         /* We expect the packet is at least size of the header */
1820         length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
1821         if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
1822                 return true;
1823         if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
1824                 return true;
1825
1826         /*
1827          * Handle XDomain discovery protocol packets directly here. For
1828          * other protocols (based on their UUID) we call registered
1829          * handlers in turn.
1830          */
1831         if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
1832                 if (type == TB_CFG_PKG_XDOMAIN_REQ)
1833                         return tb_xdp_schedule_request(tb, hdr, size);
1834                 return false;
1835         }
1836
1837         mutex_lock(&xdomain_lock);
1838         list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
1839                 if (!uuid_equal(&hdr->uuid, handler->uuid))
1840                         continue;
1841
1842                 mutex_unlock(&xdomain_lock);
1843                 ret = handler->callback(buf, size, handler->data);
1844                 mutex_lock(&xdomain_lock);
1845
1846                 if (ret)
1847                         break;
1848         }
1849         mutex_unlock(&xdomain_lock);
1850
1851         return ret > 0;
1852 }
1853
1854 static int update_xdomain(struct device *dev, void *data)
1855 {
1856         struct tb_xdomain *xd;
1857
1858         xd = tb_to_xdomain(dev);
1859         if (xd) {
1860                 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1861                                    msecs_to_jiffies(50));
1862         }
1863
1864         return 0;
1865 }
1866
1867 static void update_all_xdomains(void)
1868 {
1869         bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
1870 }
1871
1872 static bool remove_directory(const char *key, const struct tb_property_dir *dir)
1873 {
1874         struct tb_property *p;
1875
1876         p = tb_property_find(xdomain_property_dir, key,
1877                              TB_PROPERTY_TYPE_DIRECTORY);
1878         if (p && p->value.dir == dir) {
1879                 tb_property_remove(p);
1880                 return true;
1881         }
1882         return false;
1883 }
1884
1885 /**
1886  * tb_register_property_dir() - Register property directory to the host
1887  * @key: Key (name) of the directory to add
1888  * @dir: Directory to add
1889  *
1890  * Service drivers can use this function to add new property directory
1891  * to the host available properties. The other connected hosts are
1892  * notified so they can re-read properties of this host if they are
1893  * interested.
1894  *
1895  * Return: %0 on success and negative errno on failure
1896  */
1897 int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
1898 {
1899         int ret;
1900
1901         if (WARN_ON(!xdomain_property_dir))
1902                 return -EAGAIN;
1903
1904         if (!key || strlen(key) > 8)
1905                 return -EINVAL;
1906
1907         mutex_lock(&xdomain_lock);
1908         if (tb_property_find(xdomain_property_dir, key,
1909                              TB_PROPERTY_TYPE_DIRECTORY)) {
1910                 ret = -EEXIST;
1911                 goto err_unlock;
1912         }
1913
1914         ret = tb_property_add_dir(xdomain_property_dir, key, dir);
1915         if (ret)
1916                 goto err_unlock;
1917
1918         xdomain_property_block_gen++;
1919
1920         mutex_unlock(&xdomain_lock);
1921         update_all_xdomains();
1922         return 0;
1923
1924 err_unlock:
1925         mutex_unlock(&xdomain_lock);
1926         return ret;
1927 }
1928 EXPORT_SYMBOL_GPL(tb_register_property_dir);
1929
1930 /**
1931  * tb_unregister_property_dir() - Removes property directory from host
1932  * @key: Key (name) of the directory
1933  * @dir: Directory to remove
1934  *
1935  * This will remove the existing directory from this host and notify the
1936  * connected hosts about the change.
1937  */
1938 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
1939 {
1940         int ret = 0;
1941
1942         mutex_lock(&xdomain_lock);
1943         if (remove_directory(key, dir))
1944                 xdomain_property_block_gen++;
1945         mutex_unlock(&xdomain_lock);
1946
1947         if (!ret)
1948                 update_all_xdomains();
1949 }
1950 EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
1951
1952 int tb_xdomain_init(void)
1953 {
1954         xdomain_property_dir = tb_property_create_dir(NULL);
1955         if (!xdomain_property_dir)
1956                 return -ENOMEM;
1957
1958         /*
1959          * Initialize standard set of properties without any service
1960          * directories. Those will be added by service drivers
1961          * themselves when they are loaded.
1962          *
1963          * Rest of the properties are filled dynamically based on these
1964          * when the P2P connection is made.
1965          */
1966         tb_property_add_immediate(xdomain_property_dir, "vendorid",
1967                                   PCI_VENDOR_ID_INTEL);
1968         tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
1969         tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
1970         tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
1971
1972         xdomain_property_block_gen = prandom_u32();
1973         return 0;
1974 }
1975
1976 void tb_xdomain_exit(void)
1977 {
1978         tb_property_free_dir(xdomain_property_dir);
1979 }