1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
7 #ifndef __LINUX_IOMMU_H
8 #define __LINUX_IOMMU_H
10 #include <linux/scatterlist.h>
11 #include <linux/device.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
16 #include <linux/ioasid.h>
17 #include <uapi/linux/iommu.h>
19 #define IOMMU_READ (1 << 0)
20 #define IOMMU_WRITE (1 << 1)
21 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
22 #define IOMMU_NOEXEC (1 << 3)
23 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
25 * Where the bus hardware includes a privilege level as part of its access type
26 * markings, and certain devices are capable of issuing transactions marked as
27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
28 * given permission flags only apply to accesses at the higher privilege level,
29 * and that unprivileged transactions should have as little access as possible.
30 * This would usually imply the same permissions as kernel mappings on the CPU,
31 * if the IOMMU page table format is equivalent.
33 #define IOMMU_PRIV (1 << 5)
40 struct iommu_domain_ops;
41 struct notifier_block;
43 struct iommu_fault_event;
44 struct iommu_dma_cookie;
46 /* iommu fault flags */
47 #define IOMMU_FAULT_READ 0x0
48 #define IOMMU_FAULT_WRITE 0x1
50 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
51 struct device *, unsigned long, int, void *);
52 typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
54 struct iommu_domain_geometry {
55 dma_addr_t aperture_start; /* First address that can be mapped */
56 dma_addr_t aperture_end; /* Last address that can be mapped */
57 bool force_aperture; /* DMA only allowed in mappable range? */
60 /* Domain feature flags */
61 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
62 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
64 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
65 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
68 * This are the possible domain-types
70 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
72 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
73 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
75 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
76 * This flag allows IOMMU drivers to implement
77 * certain optimizations for these domains
78 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
81 #define IOMMU_DOMAIN_BLOCKED (0U)
82 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
83 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
84 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
85 __IOMMU_DOMAIN_DMA_API)
86 #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
87 __IOMMU_DOMAIN_DMA_API | \
88 __IOMMU_DOMAIN_DMA_FQ)
92 const struct iommu_domain_ops *ops;
93 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
94 iommu_fault_handler_t handler;
96 struct iommu_domain_geometry geometry;
97 struct iommu_dma_cookie *iova_cookie;
100 static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
102 return domain->type & __IOMMU_DOMAIN_DMA_API;
106 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
108 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
109 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
112 /* These are the possible reserved region types */
113 enum iommu_resv_type {
114 /* Memory regions which must be mapped 1:1 at all times */
117 * Memory regions which are advertised to be 1:1 but are
118 * commonly considered relaxable in some conditions,
119 * for instance in device assignment use case (USB, Graphics)
121 IOMMU_RESV_DIRECT_RELAXABLE,
122 /* Arbitrary "never map this or give it to a device" address ranges */
124 /* Hardware MSI region (untranslated) */
126 /* Software-managed MSI translation window */
131 * struct iommu_resv_region - descriptor for a reserved memory region
132 * @list: Linked list pointers
133 * @start: System physical start address of the region
134 * @length: Length of the region in bytes
135 * @prot: IOMMU Protection flags (READ/WRITE/...)
136 * @type: Type of the reserved region
138 struct iommu_resv_region {
139 struct list_head list;
143 enum iommu_resv_type type;
147 * enum iommu_dev_features - Per device IOMMU features
148 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
149 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
150 * enabling %IOMMU_DEV_FEAT_SVA requires
151 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
152 * Faults themselves instead of relying on the IOMMU. When
153 * supported, this feature must be enabled before and
154 * disabled after %IOMMU_DEV_FEAT_SVA.
156 * Device drivers query whether a feature is supported using
157 * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature().
159 enum iommu_dev_features {
164 #define IOMMU_PASID_INVALID (-1U)
166 #ifdef CONFIG_IOMMU_API
169 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
171 * @start: IOVA representing the start of the range to be flushed
172 * @end: IOVA representing the end of the range to be flushed (inclusive)
173 * @pgsize: The interval at which to perform the flush
174 * @freelist: Removed pages to free after sync
175 * @queued: Indicates that the flush will be queued
177 * This structure is intended to be updated by multiple calls to the
178 * ->unmap() function in struct iommu_ops before eventually being passed
179 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
180 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
181 * them. @queued is set to indicate when ->iotlb_flush_all() will be called
182 * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
184 struct iommu_iotlb_gather {
188 struct list_head freelist;
193 * struct iommu_ops - iommu ops and capabilities
194 * @capable: check capability
195 * @domain_alloc: allocate iommu domain
196 * @probe_device: Add device to iommu driver handling
197 * @release_device: Remove device from iommu driver handling
198 * @probe_finalize: Do final setup work after the device is added to an IOMMU
199 * group and attached to the groups domain
200 * @device_group: find iommu group for a particular device
201 * @get_resv_regions: Request list of reserved regions for a device
202 * @put_resv_regions: Free list of reserved regions for a device
203 * @of_xlate: add OF master IDs to iommu grouping
204 * @is_attach_deferred: Check if domain attach should be deferred from iommu
205 * driver init to device driver init (default no)
206 * @dev_has/enable/disable_feat: per device entries to check/enable/disable
207 * iommu specific features.
208 * @dev_feat_enabled: check enabled feature
209 * @sva_bind: Bind process address space to device
210 * @sva_unbind: Unbind process address space from device
211 * @sva_get_pasid: Get PASID associated to a SVA handle
212 * @page_response: handle page request response
213 * @def_domain_type: device default domain type, return value:
214 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
215 * - IOMMU_DOMAIN_DMA: must use a dma domain
216 * - 0: use the default setting
217 * @default_domain_ops: the default ops for domains
218 * @pgsize_bitmap: bitmap of all possible supported page sizes
219 * @owner: Driver module providing these ops
222 bool (*capable)(enum iommu_cap);
224 /* Domain allocation and freeing by the iommu driver */
225 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
227 struct iommu_device *(*probe_device)(struct device *dev);
228 void (*release_device)(struct device *dev);
229 void (*probe_finalize)(struct device *dev);
230 struct iommu_group *(*device_group)(struct device *dev);
232 /* Request/Free a list of reserved regions for a device */
233 void (*get_resv_regions)(struct device *dev, struct list_head *list);
234 void (*put_resv_regions)(struct device *dev, struct list_head *list);
236 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
237 bool (*is_attach_deferred)(struct device *dev);
239 /* Per device IOMMU features */
240 bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
241 bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
242 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
243 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
245 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
247 void (*sva_unbind)(struct iommu_sva *handle);
248 u32 (*sva_get_pasid)(struct iommu_sva *handle);
250 int (*page_response)(struct device *dev,
251 struct iommu_fault_event *evt,
252 struct iommu_page_response *msg);
254 int (*def_domain_type)(struct device *dev);
256 const struct iommu_domain_ops *default_domain_ops;
257 unsigned long pgsize_bitmap;
258 struct module *owner;
262 * struct iommu_domain_ops - domain specific operations
263 * @attach_dev: attach an iommu domain to a device
264 * @detach_dev: detach an iommu domain from a device
265 * @map: map a physically contiguous memory region to an iommu domain
266 * @map_pages: map a physically contiguous set of pages of the same size to
268 * @unmap: unmap a physically contiguous memory region from an iommu domain
269 * @unmap_pages: unmap a number of pages of the same size from an iommu domain
270 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
271 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
272 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
274 * @iova_to_phys: translate iova to physical address
275 * @enable_nesting: Enable nesting
276 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
277 * @free: Release the domain after use.
279 struct iommu_domain_ops {
280 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
281 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
283 int (*map)(struct iommu_domain *domain, unsigned long iova,
284 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
285 int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
286 phys_addr_t paddr, size_t pgsize, size_t pgcount,
287 int prot, gfp_t gfp, size_t *mapped);
288 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
289 size_t size, struct iommu_iotlb_gather *iotlb_gather);
290 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
291 size_t pgsize, size_t pgcount,
292 struct iommu_iotlb_gather *iotlb_gather);
294 void (*flush_iotlb_all)(struct iommu_domain *domain);
295 void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
297 void (*iotlb_sync)(struct iommu_domain *domain,
298 struct iommu_iotlb_gather *iotlb_gather);
300 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
303 int (*enable_nesting)(struct iommu_domain *domain);
304 int (*set_pgtable_quirks)(struct iommu_domain *domain,
305 unsigned long quirks);
307 void (*free)(struct iommu_domain *domain);
311 * struct iommu_device - IOMMU core representation of one IOMMU hardware
313 * @list: Used by the iommu-core to keep a list of registered iommus
314 * @ops: iommu-ops for talking to this iommu
315 * @dev: struct device for sysfs handling
317 struct iommu_device {
318 struct list_head list;
319 const struct iommu_ops *ops;
320 struct fwnode_handle *fwnode;
325 * struct iommu_fault_event - Generic fault event
327 * Can represent recoverable faults such as a page requests or
328 * unrecoverable faults such as DMA or IRQ remapping faults.
330 * @fault: fault descriptor
331 * @list: pending fault event list, used for tracking responses
333 struct iommu_fault_event {
334 struct iommu_fault fault;
335 struct list_head list;
339 * struct iommu_fault_param - per-device IOMMU fault data
340 * @handler: Callback function to handle IOMMU faults at device level
341 * @data: handler private data
342 * @faults: holds the pending faults which needs response
343 * @lock: protect pending faults list
345 struct iommu_fault_param {
346 iommu_dev_fault_handler_t handler;
348 struct list_head faults;
353 * struct dev_iommu - Collection of per-device IOMMU data
355 * @fault_param: IOMMU detected device fault reporting data
356 * @iopf_param: I/O Page Fault queue and data
357 * @fwspec: IOMMU fwspec data
358 * @iommu_dev: IOMMU device this device is linked to
359 * @priv: IOMMU Driver private data
361 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
362 * struct iommu_group *iommu_group;
366 struct iommu_fault_param *fault_param;
367 struct iopf_device_param *iopf_param;
368 struct iommu_fwspec *fwspec;
369 struct iommu_device *iommu_dev;
373 int iommu_device_register(struct iommu_device *iommu,
374 const struct iommu_ops *ops,
375 struct device *hwdev);
376 void iommu_device_unregister(struct iommu_device *iommu);
377 int iommu_device_sysfs_add(struct iommu_device *iommu,
378 struct device *parent,
379 const struct attribute_group **groups,
380 const char *fmt, ...) __printf(4, 5);
381 void iommu_device_sysfs_remove(struct iommu_device *iommu);
382 int iommu_device_link(struct iommu_device *iommu, struct device *link);
383 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
384 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
386 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
388 return (struct iommu_device *)dev_get_drvdata(dev);
391 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
393 *gather = (struct iommu_iotlb_gather) {
395 .freelist = LIST_HEAD_INIT(gather->freelist),
399 static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
402 * Assume that valid ops must be installed if iommu_probe_device()
403 * has succeeded. The device ops are essentially for internal use
404 * within the IOMMU subsystem itself, so we should be able to trust
405 * ourselves not to misuse the helper.
407 return dev->iommu->iommu_dev->ops;
410 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
411 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
412 #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
413 #define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
414 #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
415 #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
417 extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
418 extern int bus_iommu_probe(struct bus_type *bus);
419 extern bool iommu_present(struct bus_type *bus);
420 extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
421 extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
422 extern struct iommu_group *iommu_group_get_by_id(int id);
423 extern void iommu_domain_free(struct iommu_domain *domain);
424 extern int iommu_attach_device(struct iommu_domain *domain,
426 extern void iommu_detach_device(struct iommu_domain *domain,
428 extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
429 struct device *dev, ioasid_t pasid);
430 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
431 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
432 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
433 phys_addr_t paddr, size_t size, int prot);
434 extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
435 phys_addr_t paddr, size_t size, int prot);
436 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
438 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
439 unsigned long iova, size_t size,
440 struct iommu_iotlb_gather *iotlb_gather);
441 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
442 struct scatterlist *sg, unsigned int nents, int prot);
443 extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
444 unsigned long iova, struct scatterlist *sg,
445 unsigned int nents, int prot);
446 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
447 extern void iommu_set_fault_handler(struct iommu_domain *domain,
448 iommu_fault_handler_t handler, void *token);
450 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
451 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
452 extern void generic_iommu_put_resv_regions(struct device *dev,
453 struct list_head *list);
454 extern void iommu_set_default_passthrough(bool cmd_line);
455 extern void iommu_set_default_translated(bool cmd_line);
456 extern bool iommu_default_passthrough(void);
457 extern struct iommu_resv_region *
458 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
459 enum iommu_resv_type type);
460 extern int iommu_get_group_resv_regions(struct iommu_group *group,
461 struct list_head *head);
463 extern int iommu_attach_group(struct iommu_domain *domain,
464 struct iommu_group *group);
465 extern void iommu_detach_group(struct iommu_domain *domain,
466 struct iommu_group *group);
467 extern struct iommu_group *iommu_group_alloc(void);
468 extern void *iommu_group_get_iommudata(struct iommu_group *group);
469 extern void iommu_group_set_iommudata(struct iommu_group *group,
471 void (*release)(void *iommu_data));
472 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
473 extern int iommu_group_add_device(struct iommu_group *group,
475 extern void iommu_group_remove_device(struct device *dev);
476 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
477 int (*fn)(struct device *, void *));
478 extern struct iommu_group *iommu_group_get(struct device *dev);
479 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
480 extern void iommu_group_put(struct iommu_group *group);
481 extern int iommu_group_register_notifier(struct iommu_group *group,
482 struct notifier_block *nb);
483 extern int iommu_group_unregister_notifier(struct iommu_group *group,
484 struct notifier_block *nb);
485 extern int iommu_register_device_fault_handler(struct device *dev,
486 iommu_dev_fault_handler_t handler,
489 extern int iommu_unregister_device_fault_handler(struct device *dev);
491 extern int iommu_report_device_fault(struct device *dev,
492 struct iommu_fault_event *evt);
493 extern int iommu_page_response(struct device *dev,
494 struct iommu_page_response *msg);
496 extern int iommu_group_id(struct iommu_group *group);
497 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
499 int iommu_enable_nesting(struct iommu_domain *domain);
500 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
501 unsigned long quirks);
503 void iommu_set_dma_strict(void);
505 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
506 unsigned long iova, int flags);
508 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
510 if (domain->ops->flush_iotlb_all)
511 domain->ops->flush_iotlb_all(domain);
514 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
515 struct iommu_iotlb_gather *iotlb_gather)
517 if (domain->ops->iotlb_sync)
518 domain->ops->iotlb_sync(domain, iotlb_gather);
520 iommu_iotlb_gather_init(iotlb_gather);
524 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
526 * @gather: TLB gather data
527 * @iova: start of page to invalidate
528 * @size: size of page to invalidate
530 * Helper for IOMMU drivers to check whether a new range and the gathered range
531 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
532 * than merging the two, which might lead to unnecessary invalidations.
535 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
536 unsigned long iova, size_t size)
538 unsigned long start = iova, end = start + size - 1;
540 return gather->end != 0 &&
541 (end + 1 < gather->start || start > gather->end + 1);
546 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
547 * @gather: TLB gather data
548 * @iova: start of page to invalidate
549 * @size: size of page to invalidate
551 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
552 * where only the address range matters, and simply minimising intermediate
553 * syncs is preferred.
555 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
556 unsigned long iova, size_t size)
558 unsigned long end = iova + size - 1;
560 if (gather->start > iova)
561 gather->start = iova;
562 if (gather->end < end)
567 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
568 * @domain: IOMMU domain to be invalidated
569 * @gather: TLB gather data
570 * @iova: start of page to invalidate
571 * @size: size of page to invalidate
573 * Helper for IOMMU drivers to build invalidation commands based on individual
574 * pages, or with page size/table level hints which cannot be gathered if they
577 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
578 struct iommu_iotlb_gather *gather,
579 unsigned long iova, size_t size)
582 * If the new page is disjoint from the current range or is mapped at
583 * a different granularity, then sync the TLB so that the gather
584 * structure can be rewritten.
586 if ((gather->pgsize && gather->pgsize != size) ||
587 iommu_iotlb_gather_is_disjoint(gather, iova, size))
588 iommu_iotlb_sync(domain, gather);
590 gather->pgsize = size;
591 iommu_iotlb_gather_add_range(gather, iova, size);
594 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
596 return gather && gather->queued;
599 /* PCI device grouping function */
600 extern struct iommu_group *pci_device_group(struct device *dev);
601 /* Generic device grouping function */
602 extern struct iommu_group *generic_device_group(struct device *dev);
603 /* FSL-MC device grouping function */
604 struct iommu_group *fsl_mc_device_group(struct device *dev);
607 * struct iommu_fwspec - per-device IOMMU instance data
608 * @ops: ops for this device's IOMMU
609 * @iommu_fwnode: firmware handle for this device's IOMMU
610 * @flags: IOMMU_FWSPEC_* flags
611 * @num_ids: number of associated device IDs
612 * @ids: IDs which this device may present to the IOMMU
614 struct iommu_fwspec {
615 const struct iommu_ops *ops;
616 struct fwnode_handle *iommu_fwnode;
618 unsigned int num_ids;
622 /* ATS is supported */
623 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
626 * struct iommu_sva - handle to a device-mm bond
632 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
633 const struct iommu_ops *ops);
634 void iommu_fwspec_free(struct device *dev);
635 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
636 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
638 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
641 return dev->iommu->fwspec;
646 static inline void dev_iommu_fwspec_set(struct device *dev,
647 struct iommu_fwspec *fwspec)
649 dev->iommu->fwspec = fwspec;
652 static inline void *dev_iommu_priv_get(struct device *dev)
655 return dev->iommu->priv;
660 static inline void dev_iommu_priv_set(struct device *dev, void *priv)
662 dev->iommu->priv = priv;
665 int iommu_probe_device(struct device *dev);
666 void iommu_release_device(struct device *dev);
668 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
669 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
670 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
672 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
673 struct mm_struct *mm,
675 void iommu_sva_unbind_device(struct iommu_sva *handle);
676 u32 iommu_sva_get_pasid(struct iommu_sva *handle);
678 #else /* CONFIG_IOMMU_API */
681 struct iommu_group {};
682 struct iommu_fwspec {};
683 struct iommu_device {};
684 struct iommu_fault_param {};
685 struct iommu_iotlb_gather {};
687 static inline bool iommu_present(struct bus_type *bus)
692 static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
697 static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
702 static inline struct iommu_group *iommu_group_get_by_id(int id)
707 static inline void iommu_domain_free(struct iommu_domain *domain)
711 static inline int iommu_attach_device(struct iommu_domain *domain,
717 static inline void iommu_detach_device(struct iommu_domain *domain,
722 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
727 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
728 phys_addr_t paddr, size_t size, int prot)
733 static inline int iommu_map_atomic(struct iommu_domain *domain,
734 unsigned long iova, phys_addr_t paddr,
735 size_t size, int prot)
740 static inline size_t iommu_unmap(struct iommu_domain *domain,
741 unsigned long iova, size_t size)
746 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
747 unsigned long iova, int gfp_order,
748 struct iommu_iotlb_gather *iotlb_gather)
753 static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
754 unsigned long iova, struct scatterlist *sg,
755 unsigned int nents, int prot)
760 static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
761 unsigned long iova, struct scatterlist *sg,
762 unsigned int nents, int prot)
767 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
771 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
772 struct iommu_iotlb_gather *iotlb_gather)
776 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
781 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
782 iommu_fault_handler_t handler, void *token)
786 static inline void iommu_get_resv_regions(struct device *dev,
787 struct list_head *list)
791 static inline void iommu_put_resv_regions(struct device *dev,
792 struct list_head *list)
796 static inline int iommu_get_group_resv_regions(struct iommu_group *group,
797 struct list_head *head)
802 static inline void iommu_set_default_passthrough(bool cmd_line)
806 static inline void iommu_set_default_translated(bool cmd_line)
810 static inline bool iommu_default_passthrough(void)
815 static inline int iommu_attach_group(struct iommu_domain *domain,
816 struct iommu_group *group)
821 static inline void iommu_detach_group(struct iommu_domain *domain,
822 struct iommu_group *group)
826 static inline struct iommu_group *iommu_group_alloc(void)
828 return ERR_PTR(-ENODEV);
831 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
836 static inline void iommu_group_set_iommudata(struct iommu_group *group,
838 void (*release)(void *iommu_data))
842 static inline int iommu_group_set_name(struct iommu_group *group,
848 static inline int iommu_group_add_device(struct iommu_group *group,
854 static inline void iommu_group_remove_device(struct device *dev)
858 static inline int iommu_group_for_each_dev(struct iommu_group *group,
860 int (*fn)(struct device *, void *))
865 static inline struct iommu_group *iommu_group_get(struct device *dev)
870 static inline void iommu_group_put(struct iommu_group *group)
874 static inline int iommu_group_register_notifier(struct iommu_group *group,
875 struct notifier_block *nb)
880 static inline int iommu_group_unregister_notifier(struct iommu_group *group,
881 struct notifier_block *nb)
887 int iommu_register_device_fault_handler(struct device *dev,
888 iommu_dev_fault_handler_t handler,
894 static inline int iommu_unregister_device_fault_handler(struct device *dev)
900 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
905 static inline int iommu_page_response(struct device *dev,
906 struct iommu_page_response *msg)
911 static inline int iommu_group_id(struct iommu_group *group)
916 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
917 unsigned long quirks)
922 static inline int iommu_device_register(struct iommu_device *iommu,
923 const struct iommu_ops *ops,
924 struct device *hwdev)
929 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
934 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
938 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
939 struct iommu_iotlb_gather *gather,
940 unsigned long iova, size_t size)
944 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
949 static inline void iommu_device_unregister(struct iommu_device *iommu)
953 static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
954 struct device *parent,
955 const struct attribute_group **groups,
956 const char *fmt, ...)
961 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
965 static inline int iommu_device_link(struct device *dev, struct device *link)
970 static inline void iommu_device_unlink(struct device *dev, struct device *link)
974 static inline int iommu_fwspec_init(struct device *dev,
975 struct fwnode_handle *iommu_fwnode,
976 const struct iommu_ops *ops)
981 static inline void iommu_fwspec_free(struct device *dev)
985 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
992 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
998 iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
1004 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1010 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1015 static inline struct iommu_sva *
1016 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
1021 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1025 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1027 return IOMMU_PASID_INVALID;
1030 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1034 #endif /* CONFIG_IOMMU_API */
1037 * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1038 * @domain: The IOMMU domain to perform the mapping
1039 * @iova: The start address to map the buffer
1040 * @sgt: The sg_table object describing the buffer
1041 * @prot: IOMMU protection bits
1043 * Creates a mapping at @iova for the buffer described by a scatterlist
1044 * stored in the given sg_table object in the provided IOMMU domain.
1046 static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
1047 unsigned long iova, struct sg_table *sgt, int prot)
1049 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
1052 #ifdef CONFIG_IOMMU_DEBUGFS
1053 extern struct dentry *iommu_debugfs_dir;
1054 void iommu_debugfs_setup(void);
1056 static inline void iommu_debugfs_setup(void) {}
1059 #endif /* __LINUX_IOMMU_H */