1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Thunderbolt service API
5 * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2017, Intel Corporation
7 * Authors: Michael Jamet <michael.jamet@intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
11 #ifndef THUNDERBOLT_H_
12 #define THUNDERBOLT_H_
14 #include <linux/device.h>
15 #include <linux/idr.h>
16 #include <linux/list.h>
17 #include <linux/mutex.h>
18 #include <linux/mod_devicetable.h>
19 #include <linux/pci.h>
20 #include <linux/uuid.h>
21 #include <linux/workqueue.h>
23 enum tb_cfg_pkg_type {
27 TB_CFG_PKG_NOTIFY_ACK = 4,
29 TB_CFG_PKG_XDOMAIN_REQ = 6,
30 TB_CFG_PKG_XDOMAIN_RESP = 7,
31 TB_CFG_PKG_OVERRIDE = 8,
33 TB_CFG_PKG_ICM_EVENT = 10,
34 TB_CFG_PKG_ICM_CMD = 11,
35 TB_CFG_PKG_ICM_RESP = 12,
36 TB_CFG_PKG_PREPARE_TO_SLEEP = 13,
40 * enum tb_security_level - Thunderbolt security level
41 * @TB_SECURITY_NONE: No security, legacy mode
42 * @TB_SECURITY_USER: User approval required at minimum
43 * @TB_SECURITY_SECURE: One time saved key required at minimum
44 * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
45 * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected
46 * Thunderbolt dock (and Display Port). All PCIe
47 * links downstream of the dock are removed.
48 * @TB_SECURITY_NOPCIE: For USB4 systems this level is used when the
49 * PCIe tunneling is disabled from the BIOS.
51 enum tb_security_level {
61 * struct tb - main thunderbolt bus structure
63 * @lock: Big lock. Must be held when accessing any struct
64 * tb_switch / struct tb_port.
65 * @nhi: Pointer to the NHI structure
66 * @ctl: Control channel for this domain
67 * @wq: Ordered workqueue for all domain specific work
68 * @root_switch: Root switch of this domain
69 * @cm_ops: Connection manager specific operations vector
70 * @index: Linux assigned domain number
71 * @security_level: Current security level
72 * @nboot_acl: Number of boot ACLs the domain supports
73 * @privdata: Private connection manager specific data
80 struct workqueue_struct *wq;
81 struct tb_switch *root_switch;
82 const struct tb_cm_ops *cm_ops;
84 enum tb_security_level security_level;
86 unsigned long privdata[];
89 extern struct bus_type tb_bus_type;
90 extern struct device_type tb_service_type;
91 extern struct device_type tb_xdomain_type;
93 #define TB_LINKS_PER_PHY_PORT 2
95 static inline unsigned int tb_phy_port_from_link(unsigned int link)
97 return (link - 1) / TB_LINKS_PER_PHY_PORT;
101 * struct tb_property_dir - XDomain property directory
102 * @uuid: Directory UUID or %NULL if root directory
103 * @properties: List of properties in this directory
105 * User needs to provide serialization if needed.
107 struct tb_property_dir {
109 struct list_head properties;
112 enum tb_property_type {
113 TB_PROPERTY_TYPE_UNKNOWN = 0x00,
114 TB_PROPERTY_TYPE_DIRECTORY = 0x44,
115 TB_PROPERTY_TYPE_DATA = 0x64,
116 TB_PROPERTY_TYPE_TEXT = 0x74,
117 TB_PROPERTY_TYPE_VALUE = 0x76,
120 #define TB_PROPERTY_KEY_SIZE 8
123 * struct tb_property - XDomain property
124 * @list: Used to link properties together in a directory
125 * @key: Key for the property (always terminated).
126 * @type: Type of the property
127 * @length: Length of the property data in dwords
128 * @value: Property value
130 * Users use @type to determine which field in @value is filled.
133 struct list_head list;
134 char key[TB_PROPERTY_KEY_SIZE + 1];
135 enum tb_property_type type;
138 struct tb_property_dir *dir;
145 struct tb_property_dir *tb_property_parse_dir(const u32 *block,
147 ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
149 struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
150 void tb_property_free_dir(struct tb_property_dir *dir);
151 int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
153 int tb_property_add_data(struct tb_property_dir *parent, const char *key,
154 const void *buf, size_t buflen);
155 int tb_property_add_text(struct tb_property_dir *parent, const char *key,
157 int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
158 struct tb_property_dir *dir);
159 void tb_property_remove(struct tb_property *tb_property);
160 struct tb_property *tb_property_find(struct tb_property_dir *dir,
161 const char *key, enum tb_property_type type);
162 struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
163 struct tb_property *prev);
165 #define tb_property_for_each(dir, property) \
166 for (property = tb_property_get_next(dir, NULL); \
168 property = tb_property_get_next(dir, property))
170 int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
171 void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
174 * struct tb_xdomain - Cross-domain (XDomain) connection
175 * @dev: XDomain device
176 * @tb: Pointer to the domain
177 * @remote_uuid: UUID of the remote domain (host)
178 * @local_uuid: Cached local UUID
179 * @route: Route string the other domain can be reached
180 * @vendor: Vendor ID of the remote domain
181 * @device: Device ID of the demote domain
182 * @lock: Lock to serialize access to the following fields of this structure
183 * @vendor_name: Name of the vendor (or %NULL if not known)
184 * @device_name: Name of the device (or %NULL if not known)
185 * @link_speed: Speed of the link in Gb/s
186 * @link_width: Width of the link (1 or 2)
187 * @is_unplugged: The XDomain is unplugged
188 * @resume: The XDomain is being resumed
189 * @needs_uuid: If the XDomain does not have @remote_uuid it will be
191 * @transmit_path: HopID which the remote end expects us to transmit
192 * @transmit_ring: Local ring (hop) where outgoing packets are pushed
193 * @receive_path: HopID which we expect the remote end to transmit
194 * @receive_ring: Local ring (hop) where incoming packets arrive
195 * @service_ids: Used to generate IDs for the services
196 * @properties: Properties exported by the remote domain
197 * @property_block_gen: Generation of @properties
198 * @properties_lock: Lock protecting @properties.
199 * @get_uuid_work: Work used to retrieve @remote_uuid
200 * @uuid_retries: Number of times left @remote_uuid is requested before
202 * @get_properties_work: Work used to get remote domain properties
203 * @properties_retries: Number of times left to read properties
204 * @properties_changed_work: Work used to notify the remote domain that
205 * our properties have changed
206 * @properties_changed_retries: Number of times left to send properties
207 * changed notification
208 * @link: Root switch link the remote domain is connected (ICM only)
209 * @depth: Depth in the chain the remote domain is connected (ICM only)
211 * This structure represents connection across two domains (hosts).
212 * Each XDomain contains zero or more services which are exposed as
213 * &struct tb_service objects.
215 * Service drivers may access this structure if they need to enumerate
216 * non-standard properties but they need hold @lock when doing so
217 * because properties can be changed asynchronously in response to
218 * changes in the remote domain.
224 const uuid_t *local_uuid;
229 const char *vendor_name;
230 const char *device_name;
231 unsigned int link_speed;
232 unsigned int link_width;
240 struct ida service_ids;
241 struct tb_property_dir *properties;
242 u32 property_block_gen;
243 struct delayed_work get_uuid_work;
245 struct delayed_work get_properties_work;
246 int properties_retries;
247 struct delayed_work properties_changed_work;
248 int properties_changed_retries;
253 int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd);
254 void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd);
255 int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
256 u16 transmit_ring, u16 receive_path,
258 int tb_xdomain_disable_paths(struct tb_xdomain *xd);
259 struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
260 struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
262 static inline struct tb_xdomain *
263 tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
265 struct tb_xdomain *xd;
267 mutex_lock(&tb->lock);
268 xd = tb_xdomain_find_by_uuid(tb, uuid);
269 mutex_unlock(&tb->lock);
274 static inline struct tb_xdomain *
275 tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
277 struct tb_xdomain *xd;
279 mutex_lock(&tb->lock);
280 xd = tb_xdomain_find_by_route(tb, route);
281 mutex_unlock(&tb->lock);
286 static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
289 get_device(&xd->dev);
293 static inline void tb_xdomain_put(struct tb_xdomain *xd)
296 put_device(&xd->dev);
299 static inline bool tb_is_xdomain(const struct device *dev)
301 return dev->type == &tb_xdomain_type;
304 static inline struct tb_xdomain *tb_to_xdomain(struct device *dev)
306 if (tb_is_xdomain(dev))
307 return container_of(dev, struct tb_xdomain, dev);
311 int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
312 size_t size, enum tb_cfg_pkg_type type);
313 int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
314 size_t request_size, enum tb_cfg_pkg_type request_type,
315 void *response, size_t response_size,
316 enum tb_cfg_pkg_type response_type,
317 unsigned int timeout_msec);
320 * tb_protocol_handler - Protocol specific handler
321 * @uuid: XDomain messages with this UUID are dispatched to this handler
322 * @callback: Callback called with the XDomain message. Returning %1
323 * here tells the XDomain core that the message was handled
324 * by this handler and should not be forwared to other
326 * @data: Data passed with the callback
327 * @list: Handlers are linked using this
329 * Thunderbolt services can hook into incoming XDomain requests by
330 * registering protocol handler. Only limitation is that the XDomain
331 * discovery protocol UUID cannot be registered since it is handled by
332 * the core XDomain code.
334 * The @callback must check that the message is really directed to the
335 * service the driver implements.
337 struct tb_protocol_handler {
339 int (*callback)(const void *buf, size_t size, void *data);
341 struct list_head list;
344 int tb_register_protocol_handler(struct tb_protocol_handler *handler);
345 void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
348 * struct tb_service - Thunderbolt service
349 * @dev: XDomain device
350 * @id: ID of the service (shown in sysfs)
351 * @key: Protocol key from the properties directory
352 * @prtcid: Protocol ID from the properties directory
353 * @prtcvers: Protocol version from the properties directory
354 * @prtcrevs: Protocol software revision from the properties directory
355 * @prtcstns: Protocol settings mask from the properties directory
356 * @debugfs_dir: Pointer to the service debugfs directory. Always created
357 * when debugfs is enabled. Can be used by service drivers to
358 * add their own entries under the service.
360 * Each domain exposes set of services it supports as collection of
361 * properties. For each service there will be one corresponding
362 * &struct tb_service. Service drivers are bound to these.
372 struct dentry *debugfs_dir;
375 static inline struct tb_service *tb_service_get(struct tb_service *svc)
378 get_device(&svc->dev);
382 static inline void tb_service_put(struct tb_service *svc)
385 put_device(&svc->dev);
388 static inline bool tb_is_service(const struct device *dev)
390 return dev->type == &tb_service_type;
393 static inline struct tb_service *tb_to_service(struct device *dev)
395 if (tb_is_service(dev))
396 return container_of(dev, struct tb_service, dev);
401 * tb_service_driver - Thunderbolt service driver
402 * @driver: Driver structure
403 * @probe: Called when the driver is probed
404 * @remove: Called when the driver is removed (optional)
405 * @shutdown: Called at shutdown time to stop the service (optional)
406 * @id_table: Table of service identifiers the driver supports
408 struct tb_service_driver {
409 struct device_driver driver;
410 int (*probe)(struct tb_service *svc, const struct tb_service_id *id);
411 void (*remove)(struct tb_service *svc);
412 void (*shutdown)(struct tb_service *svc);
413 const struct tb_service_id *id_table;
416 #define TB_SERVICE(key, id) \
417 .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \
418 TBSVC_MATCH_PROTOCOL_ID, \
419 .protocol_key = (key), \
422 int tb_register_service_driver(struct tb_service_driver *drv);
423 void tb_unregister_service_driver(struct tb_service_driver *drv);
425 static inline void *tb_service_get_drvdata(const struct tb_service *svc)
427 return dev_get_drvdata(&svc->dev);
430 static inline void tb_service_set_drvdata(struct tb_service *svc, void *data)
432 dev_set_drvdata(&svc->dev, data);
435 static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
437 return tb_to_xdomain(svc->dev.parent);
441 * struct tb_nhi - thunderbolt native host interface
442 * @lock: Must be held during ring creation/destruction. Is acquired by
443 * interrupt_work when dispatching interrupts to individual rings.
444 * @pdev: Pointer to the PCI device
445 * @ops: NHI specific optional ops
446 * @iobase: MMIO space of the NHI
447 * @tx_rings: All Tx rings available on this host controller
448 * @rx_rings: All Rx rings available on this host controller
449 * @msix_ida: Used to allocate MSI-X vectors for rings
450 * @going_away: The host controller device is about to disappear so when
451 * this flag is set, avoid touching the hardware anymore.
452 * @interrupt_work: Work scheduled to handle ring interrupt when no
454 * @hop_count: Number of rings (end point hops) supported by NHI.
458 struct pci_dev *pdev;
459 const struct tb_nhi_ops *ops;
460 void __iomem *iobase;
461 struct tb_ring **tx_rings;
462 struct tb_ring **rx_rings;
465 struct work_struct interrupt_work;
470 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
471 * @lock: Lock serializing actions to this ring. Must be acquired after
473 * @nhi: Pointer to the native host controller interface
474 * @size: Size of the ring
475 * @hop: Hop (DMA channel) associated with this ring
476 * @head: Head of the ring (write next descriptor here)
477 * @tail: Tail of the ring (complete next descriptor here)
478 * @descriptors: Allocated descriptors for this ring
479 * @queue: Queue holding frames to be transferred over this ring
480 * @in_flight: Queue holding frames that are currently in flight
481 * @work: Interrupt work structure
482 * @is_tx: Is the ring Tx or Rx
483 * @running: Is the ring running
484 * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
485 * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
486 * @flags: Ring specific flags
487 * @e2e_tx_hop: Transmit HopID when E2E is enabled. Only applicable to
488 * RX ring. For TX ring this should be set to %0.
489 * @sof_mask: Bit mask used to detect start of frame PDF
490 * @eof_mask: Bit mask used to detect end of frame PDF
491 * @start_poll: Called when ring interrupt is triggered to start
492 * polling. Passing %NULL keeps the ring in interrupt mode.
493 * @poll_data: Data passed to @start_poll
502 struct ring_desc *descriptors;
503 dma_addr_t descriptors_dma;
504 struct list_head queue;
505 struct list_head in_flight;
506 struct work_struct work;
515 void (*start_poll)(void *data);
519 /* Leave ring interrupt enabled on suspend */
520 #define RING_FLAG_NO_SUSPEND BIT(0)
521 /* Configure the ring to be in frame mode */
522 #define RING_FLAG_FRAME BIT(1)
523 /* Enable end-to-end flow control */
524 #define RING_FLAG_E2E BIT(2)
527 typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
530 * enum ring_desc_flags - Flags for DMA ring descriptor
531 * %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only)
532 * %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only)
533 * %RING_DESC_COMPLETED: Descriptor completed (set by NHI)
534 * %RING_DESC_POSTED: Always set this
535 * %RING_DESC_BUFFER_OVERRUN: RX buffer overrun
536 * %RING_DESC_INTERRUPT: Request an interrupt on completion
538 enum ring_desc_flags {
539 RING_DESC_ISOCH = 0x1,
540 RING_DESC_CRC_ERROR = 0x1,
541 RING_DESC_COMPLETED = 0x2,
542 RING_DESC_POSTED = 0x4,
543 RING_DESC_BUFFER_OVERRUN = 0x04,
544 RING_DESC_INTERRUPT = 0x8,
548 * struct ring_frame - For use with ring_rx/ring_tx
549 * @buffer_phy: DMA mapped address of the frame
550 * @callback: Callback called when the frame is finished (optional)
551 * @list: Frame is linked to a queue using this
552 * @size: Size of the frame in bytes (%0 means %4096)
553 * @flags: Flags for the frame (see &enum ring_desc_flags)
554 * @eof: End of frame protocol defined field
555 * @sof: Start of frame protocol defined field
558 dma_addr_t buffer_phy;
560 struct list_head list;
567 /* Minimum size for ring_rx */
568 #define TB_FRAME_SIZE 0x100
570 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
572 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
573 unsigned int flags, int e2e_tx_hop,
574 u16 sof_mask, u16 eof_mask,
575 void (*start_poll)(void *), void *poll_data);
576 void tb_ring_start(struct tb_ring *ring);
577 void tb_ring_stop(struct tb_ring *ring);
578 void tb_ring_free(struct tb_ring *ring);
580 int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
583 * tb_ring_rx() - enqueue a frame on an RX ring
584 * @ring: Ring to enqueue the frame
585 * @frame: Frame to enqueue
587 * @frame->buffer, @frame->buffer_phy have to be set. The buffer must
588 * contain at least %TB_FRAME_SIZE bytes.
590 * @frame->callback will be invoked with @frame->size, @frame->flags,
591 * @frame->eof, @frame->sof set once the frame has been received.
593 * If ring_stop() is called after the packet has been enqueued
594 * @frame->callback will be called with canceled set to true.
596 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
598 static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
600 WARN_ON(ring->is_tx);
601 return __tb_ring_enqueue(ring, frame);
605 * tb_ring_tx() - enqueue a frame on an TX ring
606 * @ring: Ring the enqueue the frame
607 * @frame: Frame to enqueue
609 * @frame->buffer, @frame->buffer_phy, @frame->size, @frame->eof and
610 * @frame->sof have to be set.
612 * @frame->callback will be invoked with once the frame has been transmitted.
614 * If ring_stop() is called after the packet has been enqueued @frame->callback
615 * will be called with canceled set to true.
617 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
619 static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
621 WARN_ON(!ring->is_tx);
622 return __tb_ring_enqueue(ring, frame);
625 /* Used only when the ring is in polling mode */
626 struct ring_frame *tb_ring_poll(struct tb_ring *ring);
627 void tb_ring_poll_complete(struct tb_ring *ring);
630 * tb_ring_dma_device() - Return device used for DMA mapping
631 * @ring: Ring whose DMA device is retrieved
633 * Use this function when you are mapping DMA for buffers that are
634 * passed to the ring for sending/receiving.
636 static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
638 return &ring->nhi->pdev->dev;
641 #endif /* THUNDERBOLT_H_ */