1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt XDomain discovery protocol support
5 * Copyright (C) 2017, Intel Corporation
6 * Authors: Michael Jamet <michael.jamet@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
10 #include <linux/device.h>
11 #include <linux/delay.h>
12 #include <linux/kmod.h>
13 #include <linux/module.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/utsname.h>
16 #include <linux/uuid.h>
17 #include <linux/workqueue.h>
21 #define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */
22 #define XDOMAIN_UUID_RETRIES 10
23 #define XDOMAIN_PROPERTIES_RETRIES 60
24 #define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
25 #define XDOMAIN_BONDING_WAIT 100 /* ms */
27 struct xdomain_request_work {
28 struct work_struct work;
29 struct tb_xdp_header *pkg;
33 static bool tb_xdomain_enabled = true;
34 module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
35 MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
37 /* Serializes access to the properties and protocol handlers below */
38 static DEFINE_MUTEX(xdomain_lock);
40 /* Properties exposed to the remote domains */
41 static struct tb_property_dir *xdomain_property_dir;
42 static u32 *xdomain_property_block;
43 static u32 xdomain_property_block_len;
44 static u32 xdomain_property_block_gen;
46 /* Additional protocol handlers */
47 static LIST_HEAD(protocol_handlers);
49 /* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
50 static const uuid_t tb_xdp_uuid =
51 UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
52 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
54 bool tb_is_xdomain_enabled(void)
56 return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
59 static bool tb_xdomain_match(const struct tb_cfg_request *req,
60 const struct ctl_pkg *pkg)
62 switch (pkg->frame.eof) {
63 case TB_CFG_PKG_ERROR:
66 case TB_CFG_PKG_XDOMAIN_RESP: {
67 const struct tb_xdp_header *res_hdr = pkg->buffer;
68 const struct tb_xdp_header *req_hdr = req->request;
70 if (pkg->frame.size < req->response_size / 4)
73 /* Make sure route matches */
74 if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
75 req_hdr->xd_hdr.route_hi)
77 if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
80 /* Check that the XDomain protocol matches */
81 if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
92 static bool tb_xdomain_copy(struct tb_cfg_request *req,
93 const struct ctl_pkg *pkg)
95 memcpy(req->response, pkg->buffer, req->response_size);
100 static void response_ready(void *data)
102 tb_cfg_request_put(data);
105 static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
106 size_t size, enum tb_cfg_pkg_type type)
108 struct tb_cfg_request *req;
110 req = tb_cfg_request_alloc();
114 req->match = tb_xdomain_match;
115 req->copy = tb_xdomain_copy;
116 req->request = response;
117 req->request_size = size;
118 req->request_type = type;
120 return tb_cfg_request(ctl, req, response_ready, req);
124 * tb_xdomain_response() - Send a XDomain response message
125 * @xd: XDomain to send the message
126 * @response: Response to send
127 * @size: Size of the response
128 * @type: PDF type of the response
130 * This can be used to send a XDomain response message to the other
131 * domain. No response for the message is expected.
133 * Return: %0 in case of success and negative errno in case of failure
135 int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
136 size_t size, enum tb_cfg_pkg_type type)
138 return __tb_xdomain_response(xd->tb->ctl, response, size, type);
140 EXPORT_SYMBOL_GPL(tb_xdomain_response);
142 static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
143 size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
144 size_t response_size, enum tb_cfg_pkg_type response_type,
145 unsigned int timeout_msec)
147 struct tb_cfg_request *req;
148 struct tb_cfg_result res;
150 req = tb_cfg_request_alloc();
154 req->match = tb_xdomain_match;
155 req->copy = tb_xdomain_copy;
156 req->request = request;
157 req->request_size = request_size;
158 req->request_type = request_type;
159 req->response = response;
160 req->response_size = response_size;
161 req->response_type = response_type;
163 res = tb_cfg_request_sync(ctl, req, timeout_msec);
165 tb_cfg_request_put(req);
167 return res.err == 1 ? -EIO : res.err;
171 * tb_xdomain_request() - Send a XDomain request
172 * @xd: XDomain to send the request
173 * @request: Request to send
174 * @request_size: Size of the request in bytes
175 * @request_type: PDF type of the request
176 * @response: Response is copied here
177 * @response_size: Expected size of the response in bytes
178 * @response_type: Expected PDF type of the response
179 * @timeout_msec: Timeout in milliseconds to wait for the response
181 * This function can be used to send XDomain control channel messages to
182 * the other domain. The function waits until the response is received
183 * or when timeout triggers. Whichever comes first.
185 * Return: %0 in case of success and negative errno in case of failure
187 int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
188 size_t request_size, enum tb_cfg_pkg_type request_type,
189 void *response, size_t response_size,
190 enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
192 return __tb_xdomain_request(xd->tb->ctl, request, request_size,
193 request_type, response, response_size,
194 response_type, timeout_msec);
196 EXPORT_SYMBOL_GPL(tb_xdomain_request);
198 static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
199 u8 sequence, enum tb_xdp_type type, size_t size)
203 length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
204 length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
206 hdr->xd_hdr.route_hi = upper_32_bits(route);
207 hdr->xd_hdr.route_lo = lower_32_bits(route);
208 hdr->xd_hdr.length_sn = length_sn;
210 memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
213 static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
215 const struct tb_xdp_error_response *error;
217 if (hdr->type != ERROR_RESPONSE)
220 error = (const struct tb_xdp_error_response *)hdr;
222 switch (error->error) {
223 case ERROR_UNKNOWN_PACKET:
224 case ERROR_UNKNOWN_DOMAIN:
226 case ERROR_NOT_SUPPORTED:
228 case ERROR_NOT_READY:
237 static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
240 struct tb_xdp_uuid_response res;
241 struct tb_xdp_uuid req;
244 memset(&req, 0, sizeof(req));
245 tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
248 memset(&res, 0, sizeof(res));
249 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
250 TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
251 TB_CFG_PKG_XDOMAIN_RESP,
252 XDOMAIN_DEFAULT_TIMEOUT);
256 ret = tb_xdp_handle_error(&res.hdr);
260 uuid_copy(uuid, &res.src_uuid);
264 static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
267 struct tb_xdp_uuid_response res;
269 memset(&res, 0, sizeof(res));
270 tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
273 uuid_copy(&res.src_uuid, uuid);
274 res.src_route_hi = upper_32_bits(route);
275 res.src_route_lo = lower_32_bits(route);
277 return __tb_xdomain_response(ctl, &res, sizeof(res),
278 TB_CFG_PKG_XDOMAIN_RESP);
281 static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
282 enum tb_xdp_error error)
284 struct tb_xdp_error_response res;
286 memset(&res, 0, sizeof(res));
287 tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
291 return __tb_xdomain_response(ctl, &res, sizeof(res),
292 TB_CFG_PKG_XDOMAIN_RESP);
295 static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
296 const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
297 u32 **block, u32 *generation)
299 struct tb_xdp_properties_response *res;
300 struct tb_xdp_properties req;
306 total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
307 res = kzalloc(total_size, GFP_KERNEL);
311 memset(&req, 0, sizeof(req));
312 tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
314 memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
315 memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
321 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
322 TB_CFG_PKG_XDOMAIN_REQ, res,
323 total_size, TB_CFG_PKG_XDOMAIN_RESP,
324 XDOMAIN_DEFAULT_TIMEOUT);
328 ret = tb_xdp_handle_error(&res->hdr);
333 * Package length includes the whole payload without the
334 * XDomain header. Validate first that the package is at
335 * least size of the response structure.
337 len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
338 if (len < sizeof(*res) / 4) {
343 len += sizeof(res->hdr.xd_hdr) / 4;
344 len -= sizeof(*res) / 4;
346 if (res->offset != req.offset) {
352 * First time allocate block that has enough space for
353 * the whole properties block.
356 data_len = res->data_length;
357 if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
362 data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
369 memcpy(data + req.offset, res->data, len * 4);
371 } while (!data_len || req.offset < data_len);
374 *generation = res->generation;
387 static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
388 u64 route, u8 sequence, const uuid_t *src_uuid,
389 const struct tb_xdp_properties *req)
391 struct tb_xdp_properties_response *res;
397 * Currently we expect all requests to be directed to us. The
398 * protocol supports forwarding, though which we might add
401 if (!uuid_equal(src_uuid, &req->dst_uuid)) {
402 tb_xdp_error_response(ctl, route, sequence,
403 ERROR_UNKNOWN_DOMAIN);
407 mutex_lock(&xdomain_lock);
409 if (req->offset >= xdomain_property_block_len) {
410 mutex_unlock(&xdomain_lock);
414 len = xdomain_property_block_len - req->offset;
415 len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
416 total_size = sizeof(*res) + len * 4;
418 res = kzalloc(total_size, GFP_KERNEL);
420 mutex_unlock(&xdomain_lock);
424 tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE,
426 res->generation = xdomain_property_block_gen;
427 res->data_length = xdomain_property_block_len;
428 res->offset = req->offset;
429 uuid_copy(&res->src_uuid, src_uuid);
430 uuid_copy(&res->dst_uuid, &req->src_uuid);
431 memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
433 mutex_unlock(&xdomain_lock);
435 ret = __tb_xdomain_response(ctl, res, total_size,
436 TB_CFG_PKG_XDOMAIN_RESP);
442 static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
443 int retry, const uuid_t *uuid)
445 struct tb_xdp_properties_changed_response res;
446 struct tb_xdp_properties_changed req;
449 memset(&req, 0, sizeof(req));
450 tb_xdp_fill_header(&req.hdr, route, retry % 4,
451 PROPERTIES_CHANGED_REQUEST, sizeof(req));
452 uuid_copy(&req.src_uuid, uuid);
454 memset(&res, 0, sizeof(res));
455 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
456 TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
457 TB_CFG_PKG_XDOMAIN_RESP,
458 XDOMAIN_DEFAULT_TIMEOUT);
462 return tb_xdp_handle_error(&res.hdr);
466 tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
468 struct tb_xdp_properties_changed_response res;
470 memset(&res, 0, sizeof(res));
471 tb_xdp_fill_header(&res.hdr, route, sequence,
472 PROPERTIES_CHANGED_RESPONSE, sizeof(res));
473 return __tb_xdomain_response(ctl, &res, sizeof(res),
474 TB_CFG_PKG_XDOMAIN_RESP);
478 * tb_register_protocol_handler() - Register protocol handler
479 * @handler: Handler to register
481 * This allows XDomain service drivers to hook into incoming XDomain
482 * messages. After this function is called the service driver needs to
483 * be able to handle calls to callback whenever a package with the
484 * registered protocol is received.
486 int tb_register_protocol_handler(struct tb_protocol_handler *handler)
488 if (!handler->uuid || !handler->callback)
490 if (uuid_equal(handler->uuid, &tb_xdp_uuid))
493 mutex_lock(&xdomain_lock);
494 list_add_tail(&handler->list, &protocol_handlers);
495 mutex_unlock(&xdomain_lock);
499 EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
502 * tb_unregister_protocol_handler() - Unregister protocol handler
503 * @handler: Handler to unregister
505 * Removes the previously registered protocol handler.
507 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
509 mutex_lock(&xdomain_lock);
510 list_del_init(&handler->list);
511 mutex_unlock(&xdomain_lock);
513 EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
515 static int rebuild_property_block(void)
520 ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
526 block = kcalloc(len, sizeof(u32), GFP_KERNEL);
530 ret = tb_property_format_dir(xdomain_property_dir, block, len);
536 kfree(xdomain_property_block);
537 xdomain_property_block = block;
538 xdomain_property_block_len = len;
539 xdomain_property_block_gen++;
544 static void finalize_property_block(void)
546 const struct tb_property *nodename;
549 * On first XDomain connection we set up the the system
550 * nodename. This delayed here because userspace may not have it
551 * set when the driver is first probed.
553 mutex_lock(&xdomain_lock);
554 nodename = tb_property_find(xdomain_property_dir, "deviceid",
555 TB_PROPERTY_TYPE_TEXT);
557 tb_property_add_text(xdomain_property_dir, "deviceid",
558 utsname()->nodename);
559 rebuild_property_block();
561 mutex_unlock(&xdomain_lock);
564 static void tb_xdp_handle_request(struct work_struct *work)
566 struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
567 const struct tb_xdp_header *pkg = xw->pkg;
568 const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
569 struct tb *tb = xw->tb;
570 struct tb_ctl *ctl = tb->ctl;
576 route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
577 sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
578 sequence >>= TB_XDOMAIN_SN_SHIFT;
580 mutex_lock(&tb->lock);
582 uuid = tb->root_switch->uuid;
585 mutex_unlock(&tb->lock);
588 tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
592 finalize_property_block();
595 case PROPERTIES_REQUEST:
596 ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
597 (const struct tb_xdp_properties *)pkg);
600 case PROPERTIES_CHANGED_REQUEST: {
601 struct tb_xdomain *xd;
603 ret = tb_xdp_properties_changed_response(ctl, route, sequence);
606 * Since the properties have been changed, let's update
607 * the xdomain related to this connection as well in
608 * case there is a change in services it offers.
610 xd = tb_xdomain_find_by_route_locked(tb, route);
612 if (device_is_registered(&xd->dev)) {
613 queue_delayed_work(tb->wq, &xd->get_properties_work,
614 msecs_to_jiffies(50));
622 case UUID_REQUEST_OLD:
624 ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
628 tb_xdp_error_response(ctl, route, sequence,
629 ERROR_NOT_SUPPORTED);
634 tb_warn(tb, "failed to send XDomain response for %#x\n",
646 tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
649 struct xdomain_request_work *xw;
651 xw = kmalloc(sizeof(*xw), GFP_KERNEL);
655 INIT_WORK(&xw->work, tb_xdp_handle_request);
656 xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
661 xw->tb = tb_domain_get(tb);
663 schedule_work(&xw->work);
668 * tb_register_service_driver() - Register XDomain service driver
669 * @drv: Driver to register
671 * Registers new service driver from @drv to the bus.
673 int tb_register_service_driver(struct tb_service_driver *drv)
675 drv->driver.bus = &tb_bus_type;
676 return driver_register(&drv->driver);
678 EXPORT_SYMBOL_GPL(tb_register_service_driver);
681 * tb_unregister_service_driver() - Unregister XDomain service driver
682 * @drv: Driver to unregister
684 * Unregisters XDomain service driver from the bus.
686 void tb_unregister_service_driver(struct tb_service_driver *drv)
688 driver_unregister(&drv->driver);
690 EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
692 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
695 struct tb_service *svc = container_of(dev, struct tb_service, dev);
698 * It should be null terminated but anything else is pretty much
701 return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
703 static DEVICE_ATTR_RO(key);
705 static int get_modalias(struct tb_service *svc, char *buf, size_t size)
707 return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
708 svc->prtcid, svc->prtcvers, svc->prtcrevs);
711 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
714 struct tb_service *svc = container_of(dev, struct tb_service, dev);
716 /* Full buffer size except new line and null termination */
717 get_modalias(svc, buf, PAGE_SIZE - 2);
718 return sprintf(buf, "%s\n", buf);
720 static DEVICE_ATTR_RO(modalias);
722 static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
725 struct tb_service *svc = container_of(dev, struct tb_service, dev);
727 return sprintf(buf, "%u\n", svc->prtcid);
729 static DEVICE_ATTR_RO(prtcid);
731 static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
734 struct tb_service *svc = container_of(dev, struct tb_service, dev);
736 return sprintf(buf, "%u\n", svc->prtcvers);
738 static DEVICE_ATTR_RO(prtcvers);
740 static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
743 struct tb_service *svc = container_of(dev, struct tb_service, dev);
745 return sprintf(buf, "%u\n", svc->prtcrevs);
747 static DEVICE_ATTR_RO(prtcrevs);
749 static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
752 struct tb_service *svc = container_of(dev, struct tb_service, dev);
754 return sprintf(buf, "0x%08x\n", svc->prtcstns);
756 static DEVICE_ATTR_RO(prtcstns);
758 static struct attribute *tb_service_attrs[] = {
760 &dev_attr_modalias.attr,
761 &dev_attr_prtcid.attr,
762 &dev_attr_prtcvers.attr,
763 &dev_attr_prtcrevs.attr,
764 &dev_attr_prtcstns.attr,
768 static const struct attribute_group tb_service_attr_group = {
769 .attrs = tb_service_attrs,
772 static const struct attribute_group *tb_service_attr_groups[] = {
773 &tb_service_attr_group,
777 static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
779 struct tb_service *svc = container_of(dev, struct tb_service, dev);
782 get_modalias(svc, modalias, sizeof(modalias));
783 return add_uevent_var(env, "MODALIAS=%s", modalias);
786 static void tb_service_release(struct device *dev)
788 struct tb_service *svc = container_of(dev, struct tb_service, dev);
789 struct tb_xdomain *xd = tb_service_parent(svc);
791 tb_service_debugfs_remove(svc);
792 ida_simple_remove(&xd->service_ids, svc->id);
797 struct device_type tb_service_type = {
798 .name = "thunderbolt_service",
799 .groups = tb_service_attr_groups,
800 .uevent = tb_service_uevent,
801 .release = tb_service_release,
803 EXPORT_SYMBOL_GPL(tb_service_type);
805 static int remove_missing_service(struct device *dev, void *data)
807 struct tb_xdomain *xd = data;
808 struct tb_service *svc;
810 svc = tb_to_service(dev);
814 if (!tb_property_find(xd->properties, svc->key,
815 TB_PROPERTY_TYPE_DIRECTORY))
816 device_unregister(dev);
821 static int find_service(struct device *dev, void *data)
823 const struct tb_property *p = data;
824 struct tb_service *svc;
826 svc = tb_to_service(dev);
830 return !strcmp(svc->key, p->key);
833 static int populate_service(struct tb_service *svc,
834 struct tb_property *property)
836 struct tb_property_dir *dir = property->value.dir;
837 struct tb_property *p;
839 /* Fill in standard properties */
840 p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
842 svc->prtcid = p->value.immediate;
843 p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
845 svc->prtcvers = p->value.immediate;
846 p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
848 svc->prtcrevs = p->value.immediate;
849 p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
851 svc->prtcstns = p->value.immediate;
853 svc->key = kstrdup(property->key, GFP_KERNEL);
860 static void enumerate_services(struct tb_xdomain *xd)
862 struct tb_service *svc;
863 struct tb_property *p;
868 * First remove all services that are not available anymore in
869 * the updated property block.
871 device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
873 /* Then re-enumerate properties creating new services as we go */
874 tb_property_for_each(xd->properties, p) {
875 if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
878 /* If the service exists already we are fine */
879 dev = device_find_child(&xd->dev, p, find_service);
885 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
889 if (populate_service(svc, p)) {
894 id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
901 svc->dev.bus = &tb_bus_type;
902 svc->dev.type = &tb_service_type;
903 svc->dev.parent = &xd->dev;
904 dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
906 tb_service_debugfs_init(svc);
908 if (device_register(&svc->dev)) {
909 put_device(&svc->dev);
915 static int populate_properties(struct tb_xdomain *xd,
916 struct tb_property_dir *dir)
918 const struct tb_property *p;
920 /* Required properties */
921 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
924 xd->device = p->value.immediate;
926 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
929 xd->vendor = p->value.immediate;
931 kfree(xd->device_name);
932 xd->device_name = NULL;
933 kfree(xd->vendor_name);
934 xd->vendor_name = NULL;
936 /* Optional properties */
937 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
939 xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
940 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
942 xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
947 /* Called with @xd->lock held */
948 static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
954 if (xd->transmit_path) {
955 dev_dbg(&xd->dev, "re-establishing DMA path\n");
956 tb_domain_approve_xdomain_paths(xd->tb, xd);
960 static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
962 return tb_to_switch(xd->dev.parent);
965 static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
968 struct tb_port *port;
971 port = tb_port_at(xd->route, tb_xdomain_parent(xd));
973 ret = tb_port_get_link_speed(port);
977 if (xd->link_speed != ret)
980 xd->link_speed = ret;
982 ret = tb_port_get_link_width(port);
986 if (xd->link_width != ret)
989 xd->link_width = ret;
992 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
997 static void tb_xdomain_get_uuid(struct work_struct *work)
999 struct tb_xdomain *xd = container_of(work, typeof(*xd),
1000 get_uuid_work.work);
1001 struct tb *tb = xd->tb;
1005 ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
1007 if (xd->uuid_retries-- > 0) {
1008 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
1009 msecs_to_jiffies(100));
1011 dev_dbg(&xd->dev, "failed to read remote UUID\n");
1016 if (uuid_equal(&uuid, xd->local_uuid))
1017 dev_dbg(&xd->dev, "intra-domain loop detected\n");
1020 * If the UUID is different, there is another domain connected
1021 * so mark this one unplugged and wait for the connection
1022 * manager to replace it.
1024 if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
1025 dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
1026 xd->is_unplugged = true;
1030 /* First time fill in the missing UUID */
1031 if (!xd->remote_uuid) {
1032 xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
1033 if (!xd->remote_uuid)
1037 /* Now we can start the normal properties exchange */
1038 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1039 msecs_to_jiffies(100));
1040 queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1041 msecs_to_jiffies(1000));
1044 static void tb_xdomain_get_properties(struct work_struct *work)
1046 struct tb_xdomain *xd = container_of(work, typeof(*xd),
1047 get_properties_work.work);
1048 struct tb_property_dir *dir;
1049 struct tb *tb = xd->tb;
1050 bool update = false;
1055 ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
1056 xd->remote_uuid, xd->properties_retries,
1059 if (xd->properties_retries-- > 0) {
1060 queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1061 msecs_to_jiffies(1000));
1065 "failed read XDomain properties from %pUb\n",
1071 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1073 mutex_lock(&xd->lock);
1075 /* Only accept newer generation properties */
1076 if (xd->properties && gen <= xd->property_block_gen) {
1078 * On resume it is likely that the properties block is
1079 * not changed (unless the other end added or removed
1080 * services). However, we need to make sure the existing
1081 * DMA paths are restored properly.
1083 tb_xdomain_restore_paths(xd);
1084 goto err_free_block;
1087 dir = tb_property_parse_dir(block, ret);
1089 dev_err(&xd->dev, "failed to parse XDomain properties\n");
1090 goto err_free_block;
1093 ret = populate_properties(xd, dir);
1095 dev_err(&xd->dev, "missing XDomain properties in response\n");
1099 /* Release the existing one */
1100 if (xd->properties) {
1101 tb_property_free_dir(xd->properties);
1105 xd->properties = dir;
1106 xd->property_block_gen = gen;
1108 tb_xdomain_update_link_attributes(xd);
1110 tb_xdomain_restore_paths(xd);
1112 mutex_unlock(&xd->lock);
1117 * Now the device should be ready enough so we can add it to the
1118 * bus and let userspace know about it. If the device is already
1119 * registered, we notify the userspace that it has changed.
1122 if (device_add(&xd->dev)) {
1123 dev_err(&xd->dev, "failed to add XDomain device\n");
1127 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1130 enumerate_services(xd);
1134 tb_property_free_dir(dir);
1137 mutex_unlock(&xd->lock);
1140 static void tb_xdomain_properties_changed(struct work_struct *work)
1142 struct tb_xdomain *xd = container_of(work, typeof(*xd),
1143 properties_changed_work.work);
1146 ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
1147 xd->properties_changed_retries, xd->local_uuid);
1149 if (xd->properties_changed_retries-- > 0)
1150 queue_delayed_work(xd->tb->wq,
1151 &xd->properties_changed_work,
1152 msecs_to_jiffies(1000));
1156 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1159 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1162 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1164 return sprintf(buf, "%#x\n", xd->device);
1166 static DEVICE_ATTR_RO(device);
1169 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1171 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1174 if (mutex_lock_interruptible(&xd->lock))
1175 return -ERESTARTSYS;
1176 ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
1177 mutex_unlock(&xd->lock);
1181 static DEVICE_ATTR_RO(device_name);
1183 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1186 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1188 return sprintf(buf, "%#x\n", xd->vendor);
1190 static DEVICE_ATTR_RO(vendor);
1193 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1195 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1198 if (mutex_lock_interruptible(&xd->lock))
1199 return -ERESTARTSYS;
1200 ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
1201 mutex_unlock(&xd->lock);
1205 static DEVICE_ATTR_RO(vendor_name);
1207 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1210 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1212 return sprintf(buf, "%pUb\n", xd->remote_uuid);
1214 static DEVICE_ATTR_RO(unique_id);
1216 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1219 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1221 return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed);
1224 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1225 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1227 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1230 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1232 return sprintf(buf, "%u\n", xd->link_width);
1235 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1236 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1238 static struct attribute *xdomain_attrs[] = {
1239 &dev_attr_device.attr,
1240 &dev_attr_device_name.attr,
1241 &dev_attr_rx_lanes.attr,
1242 &dev_attr_rx_speed.attr,
1243 &dev_attr_tx_lanes.attr,
1244 &dev_attr_tx_speed.attr,
1245 &dev_attr_unique_id.attr,
1246 &dev_attr_vendor.attr,
1247 &dev_attr_vendor_name.attr,
1251 static const struct attribute_group xdomain_attr_group = {
1252 .attrs = xdomain_attrs,
1255 static const struct attribute_group *xdomain_attr_groups[] = {
1256 &xdomain_attr_group,
1260 static void tb_xdomain_release(struct device *dev)
1262 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1264 put_device(xd->dev.parent);
1266 tb_property_free_dir(xd->properties);
1267 ida_destroy(&xd->service_ids);
1269 kfree(xd->local_uuid);
1270 kfree(xd->remote_uuid);
1271 kfree(xd->device_name);
1272 kfree(xd->vendor_name);
1276 static void start_handshake(struct tb_xdomain *xd)
1278 xd->uuid_retries = XDOMAIN_UUID_RETRIES;
1279 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1280 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1282 if (xd->needs_uuid) {
1283 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
1284 msecs_to_jiffies(100));
1286 /* Start exchanging properties with the other host */
1287 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1288 msecs_to_jiffies(100));
1289 queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1290 msecs_to_jiffies(1000));
1294 static void stop_handshake(struct tb_xdomain *xd)
1296 xd->uuid_retries = 0;
1297 xd->properties_retries = 0;
1298 xd->properties_changed_retries = 0;
1300 cancel_delayed_work_sync(&xd->get_uuid_work);
1301 cancel_delayed_work_sync(&xd->get_properties_work);
1302 cancel_delayed_work_sync(&xd->properties_changed_work);
1305 static int __maybe_unused tb_xdomain_suspend(struct device *dev)
1307 stop_handshake(tb_to_xdomain(dev));
1311 static int __maybe_unused tb_xdomain_resume(struct device *dev)
1313 struct tb_xdomain *xd = tb_to_xdomain(dev);
1316 * Ask tb_xdomain_get_properties() restore any existing DMA
1317 * paths after properties are re-read.
1320 start_handshake(xd);
1325 static const struct dev_pm_ops tb_xdomain_pm_ops = {
1326 SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
1329 struct device_type tb_xdomain_type = {
1330 .name = "thunderbolt_xdomain",
1331 .release = tb_xdomain_release,
1332 .pm = &tb_xdomain_pm_ops,
1334 EXPORT_SYMBOL_GPL(tb_xdomain_type);
1337 * tb_xdomain_alloc() - Allocate new XDomain object
1338 * @tb: Domain where the XDomain belongs
1339 * @parent: Parent device (the switch through the connection to the
1340 * other domain is reached).
1341 * @route: Route string used to reach the other domain
1342 * @local_uuid: Our local domain UUID
1343 * @remote_uuid: UUID of the other domain (optional)
1345 * Allocates new XDomain structure and returns pointer to that. The
1346 * object must be released by calling tb_xdomain_put().
1348 struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1349 u64 route, const uuid_t *local_uuid,
1350 const uuid_t *remote_uuid)
1352 struct tb_switch *parent_sw = tb_to_switch(parent);
1353 struct tb_xdomain *xd;
1354 struct tb_port *down;
1356 /* Make sure the downstream domain is accessible */
1357 down = tb_port_at(route, parent_sw);
1358 tb_port_unlock(down);
1360 xd = kzalloc(sizeof(*xd), GFP_KERNEL);
1366 ida_init(&xd->service_ids);
1367 mutex_init(&xd->lock);
1368 INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
1369 INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
1370 INIT_DELAYED_WORK(&xd->properties_changed_work,
1371 tb_xdomain_properties_changed);
1373 xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
1374 if (!xd->local_uuid)
1378 xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
1380 if (!xd->remote_uuid)
1381 goto err_free_local_uuid;
1383 xd->needs_uuid = true;
1386 device_initialize(&xd->dev);
1387 xd->dev.parent = get_device(parent);
1388 xd->dev.bus = &tb_bus_type;
1389 xd->dev.type = &tb_xdomain_type;
1390 xd->dev.groups = xdomain_attr_groups;
1391 dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
1394 * This keeps the DMA powered on as long as we have active
1395 * connection to another host.
1397 pm_runtime_set_active(&xd->dev);
1398 pm_runtime_get_noresume(&xd->dev);
1399 pm_runtime_enable(&xd->dev);
1403 err_free_local_uuid:
1404 kfree(xd->local_uuid);
1412 * tb_xdomain_add() - Add XDomain to the bus
1413 * @xd: XDomain to add
1415 * This function starts XDomain discovery protocol handshake and
1416 * eventually adds the XDomain to the bus. After calling this function
1417 * the caller needs to call tb_xdomain_remove() in order to remove and
1418 * release the object regardless whether the handshake succeeded or not.
1420 void tb_xdomain_add(struct tb_xdomain *xd)
1422 /* Start exchanging properties with the other host */
1423 start_handshake(xd);
1426 static int unregister_service(struct device *dev, void *data)
1428 device_unregister(dev);
1433 * tb_xdomain_remove() - Remove XDomain from the bus
1434 * @xd: XDomain to remove
1436 * This will stop all ongoing configuration work and remove the XDomain
1437 * along with any services from the bus. When the last reference to @xd
1438 * is released the object will be released as well.
1440 void tb_xdomain_remove(struct tb_xdomain *xd)
1444 device_for_each_child_reverse(&xd->dev, xd, unregister_service);
1447 * Undo runtime PM here explicitly because it is possible that
1448 * the XDomain was never added to the bus and thus device_del()
1449 * is not called for it (device_del() would handle this otherwise).
1451 pm_runtime_disable(&xd->dev);
1452 pm_runtime_put_noidle(&xd->dev);
1453 pm_runtime_set_suspended(&xd->dev);
1455 if (!device_is_registered(&xd->dev))
1456 put_device(&xd->dev);
1458 device_unregister(&xd->dev);
1462 * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain
1463 * @xd: XDomain connection
1465 * Lane bonding is disabled by default for XDomains. This function tries
1466 * to enable bonding by first enabling the port and waiting for the CL0
1469 * Return: %0 in case of success and negative errno in case of error.
1471 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
1473 struct tb_port *port;
1476 port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1477 if (!port->dual_link_port)
1480 ret = tb_port_enable(port->dual_link_port);
1484 ret = tb_wait_for_port(port->dual_link_port, true);
1490 ret = tb_port_lane_bonding_enable(port);
1492 tb_port_warn(port, "failed to enable lane bonding\n");
1496 tb_xdomain_update_link_attributes(xd);
1498 dev_dbg(&xd->dev, "lane bonding enabled\n");
1501 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
1504 * tb_xdomain_lane_bonding_disable() - Disable lane bonding
1505 * @xd: XDomain connection
1507 * Lane bonding is disabled by default for XDomains. If bonding has been
1508 * enabled, this function can be used to disable it.
1510 void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
1512 struct tb_port *port;
1514 port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1515 if (port->dual_link_port) {
1516 tb_port_lane_bonding_disable(port);
1517 tb_port_disable(port->dual_link_port);
1518 tb_xdomain_update_link_attributes(xd);
1520 dev_dbg(&xd->dev, "lane bonding disabled\n");
1523 EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
1526 * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
1527 * @xd: XDomain connection
1528 * @transmit_path: HopID of the transmit path the other end is using to
1530 * @transmit_ring: DMA ring used to receive packets from the other end
1531 * @receive_path: HopID of the receive path the other end is using to
1533 * @receive_ring: DMA ring used to send packets to the other end
1535 * The function enables DMA paths accordingly so that after successful
1536 * return the caller can send and receive packets using high-speed DMA
1539 * Return: %0 in case of success and negative errno in case of error
1541 int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
1542 u16 transmit_ring, u16 receive_path,
1547 mutex_lock(&xd->lock);
1549 if (xd->transmit_path) {
1550 ret = xd->transmit_path == transmit_path ? 0 : -EBUSY;
1554 xd->transmit_path = transmit_path;
1555 xd->transmit_ring = transmit_ring;
1556 xd->receive_path = receive_path;
1557 xd->receive_ring = receive_ring;
1559 ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
1562 mutex_unlock(&xd->lock);
1566 EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
1569 * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
1570 * @xd: XDomain connection
1572 * This does the opposite of tb_xdomain_enable_paths(). After call to
1573 * this the caller is not expected to use the rings anymore.
1575 * Return: %0 in case of success and negative errno in case of error
1577 int tb_xdomain_disable_paths(struct tb_xdomain *xd)
1581 mutex_lock(&xd->lock);
1582 if (xd->transmit_path) {
1583 xd->transmit_path = 0;
1584 xd->transmit_ring = 0;
1585 xd->receive_path = 0;
1586 xd->receive_ring = 0;
1588 ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
1590 mutex_unlock(&xd->lock);
1594 EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
1596 struct tb_xdomain_lookup {
1603 static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
1604 const struct tb_xdomain_lookup *lookup)
1606 struct tb_port *port;
1608 tb_switch_for_each_port(sw, port) {
1609 struct tb_xdomain *xd;
1611 if (port->xdomain) {
1615 if (xd->remote_uuid &&
1616 uuid_equal(xd->remote_uuid, lookup->uuid))
1618 } else if (lookup->link &&
1619 lookup->link == xd->link &&
1620 lookup->depth == xd->depth) {
1622 } else if (lookup->route &&
1623 lookup->route == xd->route) {
1626 } else if (tb_port_has_remote(port)) {
1627 xd = switch_find_xdomain(port->remote->sw, lookup);
1637 * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
1638 * @tb: Domain where the XDomain belongs to
1639 * @uuid: UUID to look for
1641 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1642 * The returned XDomain will have its reference count increased so the
1643 * caller needs to call tb_xdomain_put() when it is done with the
1646 * This will find all XDomains including the ones that are not yet added
1647 * to the bus (handshake is still in progress).
1649 * The caller needs to hold @tb->lock.
1651 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1653 struct tb_xdomain_lookup lookup;
1654 struct tb_xdomain *xd;
1656 memset(&lookup, 0, sizeof(lookup));
1659 xd = switch_find_xdomain(tb->root_switch, &lookup);
1660 return tb_xdomain_get(xd);
1662 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
1665 * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
1666 * @tb: Domain where the XDomain belongs to
1667 * @link: Root switch link number
1668 * @depth: Depth in the link
1670 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1671 * The returned XDomain will have its reference count increased so the
1672 * caller needs to call tb_xdomain_put() when it is done with the
1675 * This will find all XDomains including the ones that are not yet added
1676 * to the bus (handshake is still in progress).
1678 * The caller needs to hold @tb->lock.
1680 struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
1683 struct tb_xdomain_lookup lookup;
1684 struct tb_xdomain *xd;
1686 memset(&lookup, 0, sizeof(lookup));
1688 lookup.depth = depth;
1690 xd = switch_find_xdomain(tb->root_switch, &lookup);
1691 return tb_xdomain_get(xd);
1695 * tb_xdomain_find_by_route() - Find an XDomain by route string
1696 * @tb: Domain where the XDomain belongs to
1697 * @route: XDomain route string
1699 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1700 * The returned XDomain will have its reference count increased so the
1701 * caller needs to call tb_xdomain_put() when it is done with the
1704 * This will find all XDomains including the ones that are not yet added
1705 * to the bus (handshake is still in progress).
1707 * The caller needs to hold @tb->lock.
1709 struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
1711 struct tb_xdomain_lookup lookup;
1712 struct tb_xdomain *xd;
1714 memset(&lookup, 0, sizeof(lookup));
1715 lookup.route = route;
1717 xd = switch_find_xdomain(tb->root_switch, &lookup);
1718 return tb_xdomain_get(xd);
1720 EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
1722 bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
1723 const void *buf, size_t size)
1725 const struct tb_protocol_handler *handler, *tmp;
1726 const struct tb_xdp_header *hdr = buf;
1727 unsigned int length;
1730 /* We expect the packet is at least size of the header */
1731 length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
1732 if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
1734 if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
1738 * Handle XDomain discovery protocol packets directly here. For
1739 * other protocols (based on their UUID) we call registered
1742 if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
1743 if (type == TB_CFG_PKG_XDOMAIN_REQ)
1744 return tb_xdp_schedule_request(tb, hdr, size);
1748 mutex_lock(&xdomain_lock);
1749 list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
1750 if (!uuid_equal(&hdr->uuid, handler->uuid))
1753 mutex_unlock(&xdomain_lock);
1754 ret = handler->callback(buf, size, handler->data);
1755 mutex_lock(&xdomain_lock);
1760 mutex_unlock(&xdomain_lock);
1765 static int update_xdomain(struct device *dev, void *data)
1767 struct tb_xdomain *xd;
1769 xd = tb_to_xdomain(dev);
1771 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1772 msecs_to_jiffies(50));
1778 static void update_all_xdomains(void)
1780 bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
1783 static bool remove_directory(const char *key, const struct tb_property_dir *dir)
1785 struct tb_property *p;
1787 p = tb_property_find(xdomain_property_dir, key,
1788 TB_PROPERTY_TYPE_DIRECTORY);
1789 if (p && p->value.dir == dir) {
1790 tb_property_remove(p);
1797 * tb_register_property_dir() - Register property directory to the host
1798 * @key: Key (name) of the directory to add
1799 * @dir: Directory to add
1801 * Service drivers can use this function to add new property directory
1802 * to the host available properties. The other connected hosts are
1803 * notified so they can re-read properties of this host if they are
1806 * Return: %0 on success and negative errno on failure
1808 int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
1812 if (WARN_ON(!xdomain_property_dir))
1815 if (!key || strlen(key) > 8)
1818 mutex_lock(&xdomain_lock);
1819 if (tb_property_find(xdomain_property_dir, key,
1820 TB_PROPERTY_TYPE_DIRECTORY)) {
1825 ret = tb_property_add_dir(xdomain_property_dir, key, dir);
1829 ret = rebuild_property_block();
1831 remove_directory(key, dir);
1835 mutex_unlock(&xdomain_lock);
1836 update_all_xdomains();
1840 mutex_unlock(&xdomain_lock);
1843 EXPORT_SYMBOL_GPL(tb_register_property_dir);
1846 * tb_unregister_property_dir() - Removes property directory from host
1847 * @key: Key (name) of the directory
1848 * @dir: Directory to remove
1850 * This will remove the existing directory from this host and notify the
1851 * connected hosts about the change.
1853 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
1857 mutex_lock(&xdomain_lock);
1858 if (remove_directory(key, dir))
1859 ret = rebuild_property_block();
1860 mutex_unlock(&xdomain_lock);
1863 update_all_xdomains();
1865 EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
1867 int tb_xdomain_init(void)
1869 xdomain_property_dir = tb_property_create_dir(NULL);
1870 if (!xdomain_property_dir)
1874 * Initialize standard set of properties without any service
1875 * directories. Those will be added by service drivers
1876 * themselves when they are loaded.
1878 * We also add node name later when first connection is made.
1880 tb_property_add_immediate(xdomain_property_dir, "vendorid",
1881 PCI_VENDOR_ID_INTEL);
1882 tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
1883 tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
1884 tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
1889 void tb_xdomain_exit(void)
1891 kfree(xdomain_property_block);
1892 tb_property_free_dir(xdomain_property_dir);