1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <jroedel@suse.de>
7 #define pr_fmt(fmt) "iommu: " fmt
9 #include <linux/device.h>
10 #include <linux/kernel.h>
11 #include <linux/bits.h>
12 #include <linux/bug.h>
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/iommu.h>
19 #include <linux/idr.h>
20 #include <linux/notifier.h>
21 #include <linux/err.h>
22 #include <linux/pci.h>
23 #include <linux/bitops.h>
24 #include <linux/property.h>
25 #include <linux/fsl/mc.h>
26 #include <linux/module.h>
27 #include <trace/events/iommu.h>
29 static struct kset *iommu_group_kset;
30 static DEFINE_IDA(iommu_group_ida);
32 static unsigned int iommu_def_domain_type __read_mostly;
33 static bool iommu_dma_strict __read_mostly = true;
34 static u32 iommu_cmd_line __read_mostly;
38 struct kobject *devices_kobj;
39 struct list_head devices;
41 struct blocking_notifier_head notifier;
43 void (*iommu_data_release)(void *iommu_data);
46 struct iommu_domain *default_domain;
47 struct iommu_domain *domain;
48 struct list_head entry;
52 struct list_head list;
57 struct iommu_group_attribute {
58 struct attribute attr;
59 ssize_t (*show)(struct iommu_group *group, char *buf);
60 ssize_t (*store)(struct iommu_group *group,
61 const char *buf, size_t count);
64 static const char * const iommu_group_resv_type_string[] = {
65 [IOMMU_RESV_DIRECT] = "direct",
66 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
67 [IOMMU_RESV_RESERVED] = "reserved",
68 [IOMMU_RESV_MSI] = "msi",
69 [IOMMU_RESV_SW_MSI] = "msi",
72 #define IOMMU_CMD_LINE_DMA_API BIT(0)
73 #define IOMMU_CMD_LINE_STRICT BIT(1)
75 static int iommu_alloc_default_domain(struct iommu_group *group,
77 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
79 static int __iommu_attach_device(struct iommu_domain *domain,
81 static int __iommu_attach_group(struct iommu_domain *domain,
82 struct iommu_group *group);
83 static void __iommu_detach_group(struct iommu_domain *domain,
84 struct iommu_group *group);
85 static int iommu_create_device_direct_mappings(struct iommu_group *group,
87 static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
88 static ssize_t iommu_group_store_type(struct iommu_group *group,
89 const char *buf, size_t count);
91 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
92 struct iommu_group_attribute iommu_group_attr_##_name = \
93 __ATTR(_name, _mode, _show, _store)
95 #define to_iommu_group_attr(_attr) \
96 container_of(_attr, struct iommu_group_attribute, attr)
97 #define to_iommu_group(_kobj) \
98 container_of(_kobj, struct iommu_group, kobj)
100 static LIST_HEAD(iommu_device_list);
101 static DEFINE_SPINLOCK(iommu_device_lock);
104 * Use a function instead of an array here because the domain-type is a
105 * bit-field, so an array would waste memory.
107 static const char *iommu_domain_type_str(unsigned int t)
110 case IOMMU_DOMAIN_BLOCKED:
112 case IOMMU_DOMAIN_IDENTITY:
113 return "Passthrough";
114 case IOMMU_DOMAIN_UNMANAGED:
116 case IOMMU_DOMAIN_DMA:
123 static int __init iommu_subsys_init(void)
125 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
126 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
127 iommu_set_default_passthrough(false);
129 iommu_set_default_translated(false);
131 if (iommu_default_passthrough() && mem_encrypt_active()) {
132 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
133 iommu_set_default_translated(false);
137 pr_info("Default domain type: %s %s\n",
138 iommu_domain_type_str(iommu_def_domain_type),
139 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ?
140 "(set via kernel command line)" : "");
144 subsys_initcall(iommu_subsys_init);
147 * iommu_device_register() - Register an IOMMU hardware instance
148 * @iommu: IOMMU handle for the instance
149 * @ops: IOMMU ops to associate with the instance
150 * @hwdev: (optional) actual instance device, used for fwnode lookup
152 * Return: 0 on success, or an error.
154 int iommu_device_register(struct iommu_device *iommu,
155 const struct iommu_ops *ops, struct device *hwdev)
157 /* We need to be able to take module references appropriately */
158 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
163 iommu->fwnode = hwdev->fwnode;
165 spin_lock(&iommu_device_lock);
166 list_add_tail(&iommu->list, &iommu_device_list);
167 spin_unlock(&iommu_device_lock);
170 EXPORT_SYMBOL_GPL(iommu_device_register);
172 void iommu_device_unregister(struct iommu_device *iommu)
174 spin_lock(&iommu_device_lock);
175 list_del(&iommu->list);
176 spin_unlock(&iommu_device_lock);
178 EXPORT_SYMBOL_GPL(iommu_device_unregister);
180 static struct dev_iommu *dev_iommu_get(struct device *dev)
182 struct dev_iommu *param = dev->iommu;
187 param = kzalloc(sizeof(*param), GFP_KERNEL);
191 mutex_init(¶m->lock);
196 static void dev_iommu_free(struct device *dev)
198 iommu_fwspec_free(dev);
203 static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
205 const struct iommu_ops *ops = dev->bus->iommu_ops;
206 struct iommu_device *iommu_dev;
207 struct iommu_group *group;
213 if (!dev_iommu_get(dev))
216 if (!try_module_get(ops->owner)) {
221 iommu_dev = ops->probe_device(dev);
222 if (IS_ERR(iommu_dev)) {
223 ret = PTR_ERR(iommu_dev);
227 dev->iommu->iommu_dev = iommu_dev;
229 group = iommu_group_get_for_dev(dev);
231 ret = PTR_ERR(group);
234 iommu_group_put(group);
236 if (group_list && !group->default_domain && list_empty(&group->entry))
237 list_add_tail(&group->entry, group_list);
239 iommu_device_link(iommu_dev, dev);
244 ops->release_device(dev);
247 module_put(ops->owner);
255 int iommu_probe_device(struct device *dev)
257 const struct iommu_ops *ops = dev->bus->iommu_ops;
258 struct iommu_group *group;
261 ret = __iommu_probe_device(dev, NULL);
265 group = iommu_group_get(dev);
272 * Try to allocate a default domain - needs support from the
273 * IOMMU driver. There are still some drivers which don't
274 * support default domains, so the return value is not yet
277 iommu_alloc_default_domain(group, dev);
279 if (group->default_domain) {
280 ret = __iommu_attach_device(group->default_domain, dev);
282 iommu_group_put(group);
287 iommu_create_device_direct_mappings(group, dev);
289 iommu_group_put(group);
291 if (ops->probe_finalize)
292 ops->probe_finalize(dev);
297 iommu_release_device(dev);
304 void iommu_release_device(struct device *dev)
306 const struct iommu_ops *ops = dev->bus->iommu_ops;
311 iommu_device_unlink(dev->iommu->iommu_dev, dev);
313 ops->release_device(dev);
315 iommu_group_remove_device(dev);
316 module_put(ops->owner);
320 static int __init iommu_set_def_domain_type(char *str)
325 ret = kstrtobool(str, &pt);
330 iommu_set_default_passthrough(true);
332 iommu_set_default_translated(true);
336 early_param("iommu.passthrough", iommu_set_def_domain_type);
338 static int __init iommu_dma_setup(char *str)
340 int ret = kstrtobool(str, &iommu_dma_strict);
343 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
346 early_param("iommu.strict", iommu_dma_setup);
348 void iommu_set_dma_strict(bool strict)
350 if (strict || !(iommu_cmd_line & IOMMU_CMD_LINE_STRICT))
351 iommu_dma_strict = strict;
354 bool iommu_get_dma_strict(struct iommu_domain *domain)
356 /* only allow lazy flushing for DMA domains */
357 if (domain->type == IOMMU_DOMAIN_DMA)
358 return iommu_dma_strict;
361 EXPORT_SYMBOL_GPL(iommu_get_dma_strict);
363 static ssize_t iommu_group_attr_show(struct kobject *kobj,
364 struct attribute *__attr, char *buf)
366 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
367 struct iommu_group *group = to_iommu_group(kobj);
371 ret = attr->show(group, buf);
375 static ssize_t iommu_group_attr_store(struct kobject *kobj,
376 struct attribute *__attr,
377 const char *buf, size_t count)
379 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
380 struct iommu_group *group = to_iommu_group(kobj);
384 ret = attr->store(group, buf, count);
388 static const struct sysfs_ops iommu_group_sysfs_ops = {
389 .show = iommu_group_attr_show,
390 .store = iommu_group_attr_store,
393 static int iommu_group_create_file(struct iommu_group *group,
394 struct iommu_group_attribute *attr)
396 return sysfs_create_file(&group->kobj, &attr->attr);
399 static void iommu_group_remove_file(struct iommu_group *group,
400 struct iommu_group_attribute *attr)
402 sysfs_remove_file(&group->kobj, &attr->attr);
405 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
407 return sprintf(buf, "%s\n", group->name);
411 * iommu_insert_resv_region - Insert a new region in the
412 * list of reserved regions.
413 * @new: new region to insert
414 * @regions: list of regions
416 * Elements are sorted by start address and overlapping segments
417 * of the same type are merged.
419 static int iommu_insert_resv_region(struct iommu_resv_region *new,
420 struct list_head *regions)
422 struct iommu_resv_region *iter, *tmp, *nr, *top;
425 nr = iommu_alloc_resv_region(new->start, new->length,
426 new->prot, new->type);
430 /* First add the new element based on start address sorting */
431 list_for_each_entry(iter, regions, list) {
432 if (nr->start < iter->start ||
433 (nr->start == iter->start && nr->type <= iter->type))
436 list_add_tail(&nr->list, &iter->list);
438 /* Merge overlapping segments of type nr->type in @regions, if any */
439 list_for_each_entry_safe(iter, tmp, regions, list) {
440 phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
442 /* no merge needed on elements of different types than @new */
443 if (iter->type != new->type) {
444 list_move_tail(&iter->list, &stack);
448 /* look for the last stack element of same type as @iter */
449 list_for_each_entry_reverse(top, &stack, list)
450 if (top->type == iter->type)
453 list_move_tail(&iter->list, &stack);
457 top_end = top->start + top->length - 1;
459 if (iter->start > top_end + 1) {
460 list_move_tail(&iter->list, &stack);
462 top->length = max(top_end, iter_end) - top->start + 1;
463 list_del(&iter->list);
467 list_splice(&stack, regions);
472 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
473 struct list_head *group_resv_regions)
475 struct iommu_resv_region *entry;
478 list_for_each_entry(entry, dev_resv_regions, list) {
479 ret = iommu_insert_resv_region(entry, group_resv_regions);
486 int iommu_get_group_resv_regions(struct iommu_group *group,
487 struct list_head *head)
489 struct group_device *device;
492 mutex_lock(&group->mutex);
493 list_for_each_entry(device, &group->devices, list) {
494 struct list_head dev_resv_regions;
496 INIT_LIST_HEAD(&dev_resv_regions);
497 iommu_get_resv_regions(device->dev, &dev_resv_regions);
498 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
499 iommu_put_resv_regions(device->dev, &dev_resv_regions);
503 mutex_unlock(&group->mutex);
506 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
508 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
511 struct iommu_resv_region *region, *next;
512 struct list_head group_resv_regions;
515 INIT_LIST_HEAD(&group_resv_regions);
516 iommu_get_group_resv_regions(group, &group_resv_regions);
518 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
519 str += sprintf(str, "0x%016llx 0x%016llx %s\n",
520 (long long int)region->start,
521 (long long int)(region->start +
523 iommu_group_resv_type_string[region->type]);
530 static ssize_t iommu_group_show_type(struct iommu_group *group,
533 char *type = "unknown\n";
535 mutex_lock(&group->mutex);
536 if (group->default_domain) {
537 switch (group->default_domain->type) {
538 case IOMMU_DOMAIN_BLOCKED:
541 case IOMMU_DOMAIN_IDENTITY:
544 case IOMMU_DOMAIN_UNMANAGED:
545 type = "unmanaged\n";
547 case IOMMU_DOMAIN_DMA:
552 mutex_unlock(&group->mutex);
558 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
560 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
561 iommu_group_show_resv_regions, NULL);
563 static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
564 iommu_group_store_type);
566 static void iommu_group_release(struct kobject *kobj)
568 struct iommu_group *group = to_iommu_group(kobj);
570 pr_debug("Releasing group %d\n", group->id);
572 if (group->iommu_data_release)
573 group->iommu_data_release(group->iommu_data);
575 ida_simple_remove(&iommu_group_ida, group->id);
577 if (group->default_domain)
578 iommu_domain_free(group->default_domain);
584 static struct kobj_type iommu_group_ktype = {
585 .sysfs_ops = &iommu_group_sysfs_ops,
586 .release = iommu_group_release,
590 * iommu_group_alloc - Allocate a new group
592 * This function is called by an iommu driver to allocate a new iommu
593 * group. The iommu group represents the minimum granularity of the iommu.
594 * Upon successful return, the caller holds a reference to the supplied
595 * group in order to hold the group until devices are added. Use
596 * iommu_group_put() to release this extra reference count, allowing the
597 * group to be automatically reclaimed once it has no devices or external
600 struct iommu_group *iommu_group_alloc(void)
602 struct iommu_group *group;
605 group = kzalloc(sizeof(*group), GFP_KERNEL);
607 return ERR_PTR(-ENOMEM);
609 group->kobj.kset = iommu_group_kset;
610 mutex_init(&group->mutex);
611 INIT_LIST_HEAD(&group->devices);
612 INIT_LIST_HEAD(&group->entry);
613 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
615 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
622 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
623 NULL, "%d", group->id);
625 ida_simple_remove(&iommu_group_ida, group->id);
626 kobject_put(&group->kobj);
630 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
631 if (!group->devices_kobj) {
632 kobject_put(&group->kobj); /* triggers .release & free */
633 return ERR_PTR(-ENOMEM);
637 * The devices_kobj holds a reference on the group kobject, so
638 * as long as that exists so will the group. We can therefore
639 * use the devices_kobj for reference counting.
641 kobject_put(&group->kobj);
643 ret = iommu_group_create_file(group,
644 &iommu_group_attr_reserved_regions);
648 ret = iommu_group_create_file(group, &iommu_group_attr_type);
652 pr_debug("Allocated group %d\n", group->id);
656 EXPORT_SYMBOL_GPL(iommu_group_alloc);
658 struct iommu_group *iommu_group_get_by_id(int id)
660 struct kobject *group_kobj;
661 struct iommu_group *group;
664 if (!iommu_group_kset)
667 name = kasprintf(GFP_KERNEL, "%d", id);
671 group_kobj = kset_find_obj(iommu_group_kset, name);
677 group = container_of(group_kobj, struct iommu_group, kobj);
678 BUG_ON(group->id != id);
680 kobject_get(group->devices_kobj);
681 kobject_put(&group->kobj);
685 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
688 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
691 * iommu drivers can store data in the group for use when doing iommu
692 * operations. This function provides a way to retrieve it. Caller
693 * should hold a group reference.
695 void *iommu_group_get_iommudata(struct iommu_group *group)
697 return group->iommu_data;
699 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
702 * iommu_group_set_iommudata - set iommu_data for a group
704 * @iommu_data: new data
705 * @release: release function for iommu_data
707 * iommu drivers can store data in the group for use when doing iommu
708 * operations. This function provides a way to set the data after
709 * the group has been allocated. Caller should hold a group reference.
711 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
712 void (*release)(void *iommu_data))
714 group->iommu_data = iommu_data;
715 group->iommu_data_release = release;
717 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
720 * iommu_group_set_name - set name for a group
724 * Allow iommu driver to set a name for a group. When set it will
725 * appear in a name attribute file under the group in sysfs.
727 int iommu_group_set_name(struct iommu_group *group, const char *name)
732 iommu_group_remove_file(group, &iommu_group_attr_name);
739 group->name = kstrdup(name, GFP_KERNEL);
743 ret = iommu_group_create_file(group, &iommu_group_attr_name);
752 EXPORT_SYMBOL_GPL(iommu_group_set_name);
754 static int iommu_create_device_direct_mappings(struct iommu_group *group,
757 struct iommu_domain *domain = group->default_domain;
758 struct iommu_resv_region *entry;
759 struct list_head mappings;
760 unsigned long pg_size;
763 if (!domain || domain->type != IOMMU_DOMAIN_DMA)
766 BUG_ON(!domain->pgsize_bitmap);
768 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
769 INIT_LIST_HEAD(&mappings);
771 iommu_get_resv_regions(dev, &mappings);
773 /* We need to consider overlapping regions for different devices */
774 list_for_each_entry(entry, &mappings, list) {
775 dma_addr_t start, end, addr;
778 if (domain->ops->apply_resv_region)
779 domain->ops->apply_resv_region(dev, domain, entry);
781 start = ALIGN(entry->start, pg_size);
782 end = ALIGN(entry->start + entry->length, pg_size);
784 if (entry->type != IOMMU_RESV_DIRECT &&
785 entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
788 for (addr = start; addr <= end; addr += pg_size) {
789 phys_addr_t phys_addr;
794 phys_addr = iommu_iova_to_phys(domain, addr);
802 ret = iommu_map(domain, addr - map_size,
803 addr - map_size, map_size,
813 iommu_flush_iotlb_all(domain);
816 iommu_put_resv_regions(dev, &mappings);
821 static bool iommu_is_attach_deferred(struct iommu_domain *domain,
824 if (domain->ops->is_attach_deferred)
825 return domain->ops->is_attach_deferred(domain, dev);
831 * iommu_group_add_device - add a device to an iommu group
832 * @group: the group into which to add the device (reference should be held)
835 * This function is called by an iommu driver to add a device into a
836 * group. Adding a device increments the group reference count.
838 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
841 struct group_device *device;
843 device = kzalloc(sizeof(*device), GFP_KERNEL);
849 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
851 goto err_free_device;
853 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
857 goto err_remove_link;
860 ret = sysfs_create_link_nowarn(group->devices_kobj,
861 &dev->kobj, device->name);
863 if (ret == -EEXIST && i >= 0) {
865 * Account for the slim chance of collision
866 * and append an instance to the name.
869 device->name = kasprintf(GFP_KERNEL, "%s.%d",
870 kobject_name(&dev->kobj), i++);
876 kobject_get(group->devices_kobj);
878 dev->iommu_group = group;
880 mutex_lock(&group->mutex);
881 list_add_tail(&device->list, &group->devices);
882 if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
883 ret = __iommu_attach_device(group->domain, dev);
884 mutex_unlock(&group->mutex);
888 /* Notify any listeners about change to group. */
889 blocking_notifier_call_chain(&group->notifier,
890 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
892 trace_add_device_to_group(group->id, dev);
894 dev_info(dev, "Adding to iommu group %d\n", group->id);
899 mutex_lock(&group->mutex);
900 list_del(&device->list);
901 mutex_unlock(&group->mutex);
902 dev->iommu_group = NULL;
903 kobject_put(group->devices_kobj);
904 sysfs_remove_link(group->devices_kobj, device->name);
908 sysfs_remove_link(&dev->kobj, "iommu_group");
911 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
914 EXPORT_SYMBOL_GPL(iommu_group_add_device);
917 * iommu_group_remove_device - remove a device from it's current group
918 * @dev: device to be removed
920 * This function is called by an iommu driver to remove the device from
921 * it's current group. This decrements the iommu group reference count.
923 void iommu_group_remove_device(struct device *dev)
925 struct iommu_group *group = dev->iommu_group;
926 struct group_device *tmp_device, *device = NULL;
928 dev_info(dev, "Removing from iommu group %d\n", group->id);
930 /* Pre-notify listeners that a device is being removed. */
931 blocking_notifier_call_chain(&group->notifier,
932 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
934 mutex_lock(&group->mutex);
935 list_for_each_entry(tmp_device, &group->devices, list) {
936 if (tmp_device->dev == dev) {
938 list_del(&device->list);
942 mutex_unlock(&group->mutex);
947 sysfs_remove_link(group->devices_kobj, device->name);
948 sysfs_remove_link(&dev->kobj, "iommu_group");
950 trace_remove_device_from_group(group->id, dev);
954 dev->iommu_group = NULL;
955 kobject_put(group->devices_kobj);
957 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
959 static int iommu_group_device_count(struct iommu_group *group)
961 struct group_device *entry;
964 list_for_each_entry(entry, &group->devices, list)
971 * iommu_group_for_each_dev - iterate over each device in the group
973 * @data: caller opaque data to be passed to callback function
974 * @fn: caller supplied callback function
976 * This function is called by group users to iterate over group devices.
977 * Callers should hold a reference count to the group during callback.
978 * The group->mutex is held across callbacks, which will block calls to
979 * iommu_group_add/remove_device.
981 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
982 int (*fn)(struct device *, void *))
984 struct group_device *device;
987 list_for_each_entry(device, &group->devices, list) {
988 ret = fn(device->dev, data);
996 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
997 int (*fn)(struct device *, void *))
1001 mutex_lock(&group->mutex);
1002 ret = __iommu_group_for_each_dev(group, data, fn);
1003 mutex_unlock(&group->mutex);
1007 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
1010 * iommu_group_get - Return the group for a device and increment reference
1011 * @dev: get the group that this device belongs to
1013 * This function is called by iommu drivers and users to get the group
1014 * for the specified device. If found, the group is returned and the group
1015 * reference in incremented, else NULL.
1017 struct iommu_group *iommu_group_get(struct device *dev)
1019 struct iommu_group *group = dev->iommu_group;
1022 kobject_get(group->devices_kobj);
1026 EXPORT_SYMBOL_GPL(iommu_group_get);
1029 * iommu_group_ref_get - Increment reference on a group
1030 * @group: the group to use, must not be NULL
1032 * This function is called by iommu drivers to take additional references on an
1033 * existing group. Returns the given group for convenience.
1035 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1037 kobject_get(group->devices_kobj);
1040 EXPORT_SYMBOL_GPL(iommu_group_ref_get);
1043 * iommu_group_put - Decrement group reference
1044 * @group: the group to use
1046 * This function is called by iommu drivers and users to release the
1047 * iommu group. Once the reference count is zero, the group is released.
1049 void iommu_group_put(struct iommu_group *group)
1052 kobject_put(group->devices_kobj);
1054 EXPORT_SYMBOL_GPL(iommu_group_put);
1057 * iommu_group_register_notifier - Register a notifier for group changes
1058 * @group: the group to watch
1059 * @nb: notifier block to signal
1061 * This function allows iommu group users to track changes in a group.
1062 * See include/linux/iommu.h for actions sent via this notifier. Caller
1063 * should hold a reference to the group throughout notifier registration.
1065 int iommu_group_register_notifier(struct iommu_group *group,
1066 struct notifier_block *nb)
1068 return blocking_notifier_chain_register(&group->notifier, nb);
1070 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
1073 * iommu_group_unregister_notifier - Unregister a notifier
1074 * @group: the group to watch
1075 * @nb: notifier block to signal
1077 * Unregister a previously registered group notifier block.
1079 int iommu_group_unregister_notifier(struct iommu_group *group,
1080 struct notifier_block *nb)
1082 return blocking_notifier_chain_unregister(&group->notifier, nb);
1084 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
1087 * iommu_register_device_fault_handler() - Register a device fault handler
1089 * @handler: the fault handler
1090 * @data: private data passed as argument to the handler
1092 * When an IOMMU fault event is received, this handler gets called with the
1093 * fault event and data as argument. The handler should return 0 on success. If
1094 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
1095 * complete the fault by calling iommu_page_response() with one of the following
1097 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
1098 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
1099 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
1100 * page faults if possible.
1102 * Return 0 if the fault handler was installed successfully, or an error.
1104 int iommu_register_device_fault_handler(struct device *dev,
1105 iommu_dev_fault_handler_t handler,
1108 struct dev_iommu *param = dev->iommu;
1114 mutex_lock(¶m->lock);
1115 /* Only allow one fault handler registered for each device */
1116 if (param->fault_param) {
1122 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1123 if (!param->fault_param) {
1128 param->fault_param->handler = handler;
1129 param->fault_param->data = data;
1130 mutex_init(¶m->fault_param->lock);
1131 INIT_LIST_HEAD(¶m->fault_param->faults);
1134 mutex_unlock(¶m->lock);
1138 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1141 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1144 * Remove the device fault handler installed with
1145 * iommu_register_device_fault_handler().
1147 * Return 0 on success, or an error.
1149 int iommu_unregister_device_fault_handler(struct device *dev)
1151 struct dev_iommu *param = dev->iommu;
1157 mutex_lock(¶m->lock);
1159 if (!param->fault_param)
1162 /* we cannot unregister handler if there are pending faults */
1163 if (!list_empty(¶m->fault_param->faults)) {
1168 kfree(param->fault_param);
1169 param->fault_param = NULL;
1172 mutex_unlock(¶m->lock);
1176 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1179 * iommu_report_device_fault() - Report fault event to device driver
1181 * @evt: fault event data
1183 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1184 * handler. When this function fails and the fault is recoverable, it is the
1185 * caller's responsibility to complete the fault.
1187 * Return 0 on success, or an error.
1189 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1191 struct dev_iommu *param = dev->iommu;
1192 struct iommu_fault_event *evt_pending = NULL;
1193 struct iommu_fault_param *fparam;
1199 /* we only report device fault if there is a handler registered */
1200 mutex_lock(¶m->lock);
1201 fparam = param->fault_param;
1202 if (!fparam || !fparam->handler) {
1207 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1208 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1209 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1215 mutex_lock(&fparam->lock);
1216 list_add_tail(&evt_pending->list, &fparam->faults);
1217 mutex_unlock(&fparam->lock);
1220 ret = fparam->handler(&evt->fault, fparam->data);
1221 if (ret && evt_pending) {
1222 mutex_lock(&fparam->lock);
1223 list_del(&evt_pending->list);
1224 mutex_unlock(&fparam->lock);
1228 mutex_unlock(¶m->lock);
1231 EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1233 int iommu_page_response(struct device *dev,
1234 struct iommu_page_response *msg)
1238 struct iommu_fault_event *evt;
1239 struct iommu_fault_page_request *prm;
1240 struct dev_iommu *param = dev->iommu;
1241 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
1242 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1244 if (!domain || !domain->ops->page_response)
1247 if (!param || !param->fault_param)
1250 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1251 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1254 /* Only send response if there is a fault report pending */
1255 mutex_lock(¶m->fault_param->lock);
1256 if (list_empty(¶m->fault_param->faults)) {
1257 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1261 * Check if we have a matching page request pending to respond,
1262 * otherwise return -EINVAL
1264 list_for_each_entry(evt, ¶m->fault_param->faults, list) {
1265 prm = &evt->fault.prm;
1266 if (prm->grpid != msg->grpid)
1270 * If the PASID is required, the corresponding request is
1271 * matched using the group ID, the PASID valid bit and the PASID
1272 * value. Otherwise only the group ID matches request and
1275 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
1276 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
1279 if (!needs_pasid && has_pasid) {
1280 /* No big deal, just clear it. */
1281 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
1285 ret = domain->ops->page_response(dev, evt, msg);
1286 list_del(&evt->list);
1292 mutex_unlock(¶m->fault_param->lock);
1295 EXPORT_SYMBOL_GPL(iommu_page_response);
1298 * iommu_group_id - Return ID for a group
1299 * @group: the group to ID
1301 * Return the unique ID for the group matching the sysfs group number.
1303 int iommu_group_id(struct iommu_group *group)
1307 EXPORT_SYMBOL_GPL(iommu_group_id);
1309 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1310 unsigned long *devfns);
1313 * To consider a PCI device isolated, we require ACS to support Source
1314 * Validation, Request Redirection, Completer Redirection, and Upstream
1315 * Forwarding. This effectively means that devices cannot spoof their
1316 * requester ID, requests and completions cannot be redirected, and all
1317 * transactions are forwarded upstream, even as it passes through a
1318 * bridge where the target device is downstream.
1320 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1323 * For multifunction devices which are not isolated from each other, find
1324 * all the other non-isolated functions and look for existing groups. For
1325 * each function, we also need to look for aliases to or from other devices
1326 * that may already have a group.
1328 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1329 unsigned long *devfns)
1331 struct pci_dev *tmp = NULL;
1332 struct iommu_group *group;
1334 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1337 for_each_pci_dev(tmp) {
1338 if (tmp == pdev || tmp->bus != pdev->bus ||
1339 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1340 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1343 group = get_pci_alias_group(tmp, devfns);
1354 * Look for aliases to or from the given device for existing groups. DMA
1355 * aliases are only supported on the same bus, therefore the search
1356 * space is quite small (especially since we're really only looking at pcie
1357 * device, and therefore only expect multiple slots on the root complex or
1358 * downstream switch ports). It's conceivable though that a pair of
1359 * multifunction devices could have aliases between them that would cause a
1360 * loop. To prevent this, we use a bitmap to track where we've been.
1362 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1363 unsigned long *devfns)
1365 struct pci_dev *tmp = NULL;
1366 struct iommu_group *group;
1368 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1371 group = iommu_group_get(&pdev->dev);
1375 for_each_pci_dev(tmp) {
1376 if (tmp == pdev || tmp->bus != pdev->bus)
1379 /* We alias them or they alias us */
1380 if (pci_devs_are_dma_aliases(pdev, tmp)) {
1381 group = get_pci_alias_group(tmp, devfns);
1387 group = get_pci_function_alias_group(tmp, devfns);
1398 struct group_for_pci_data {
1399 struct pci_dev *pdev;
1400 struct iommu_group *group;
1404 * DMA alias iterator callback, return the last seen device. Stop and return
1405 * the IOMMU group if we find one along the way.
1407 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1409 struct group_for_pci_data *data = opaque;
1412 data->group = iommu_group_get(&pdev->dev);
1414 return data->group != NULL;
1418 * Generic device_group call-back function. It just allocates one
1419 * iommu-group per device.
1421 struct iommu_group *generic_device_group(struct device *dev)
1423 return iommu_group_alloc();
1425 EXPORT_SYMBOL_GPL(generic_device_group);
1428 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1429 * to find or create an IOMMU group for a device.
1431 struct iommu_group *pci_device_group(struct device *dev)
1433 struct pci_dev *pdev = to_pci_dev(dev);
1434 struct group_for_pci_data data;
1435 struct pci_bus *bus;
1436 struct iommu_group *group = NULL;
1437 u64 devfns[4] = { 0 };
1439 if (WARN_ON(!dev_is_pci(dev)))
1440 return ERR_PTR(-EINVAL);
1443 * Find the upstream DMA alias for the device. A device must not
1444 * be aliased due to topology in order to have its own IOMMU group.
1445 * If we find an alias along the way that already belongs to a
1448 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1454 * Continue upstream from the point of minimum IOMMU granularity
1455 * due to aliases to the point where devices are protected from
1456 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
1459 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1463 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1468 group = iommu_group_get(&pdev->dev);
1474 * Look for existing groups on device aliases. If we alias another
1475 * device or another device aliases us, use the same group.
1477 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1482 * Look for existing groups on non-isolated functions on the same
1483 * slot and aliases of those funcions, if any. No need to clear
1484 * the search bitmap, the tested devfns are still valid.
1486 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1490 /* No shared group found, allocate new */
1491 return iommu_group_alloc();
1493 EXPORT_SYMBOL_GPL(pci_device_group);
1495 /* Get the IOMMU group for device on fsl-mc bus */
1496 struct iommu_group *fsl_mc_device_group(struct device *dev)
1498 struct device *cont_dev = fsl_mc_cont_dev(dev);
1499 struct iommu_group *group;
1501 group = iommu_group_get(cont_dev);
1503 group = iommu_group_alloc();
1506 EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1508 static int iommu_get_def_domain_type(struct device *dev)
1510 const struct iommu_ops *ops = dev->bus->iommu_ops;
1512 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
1513 return IOMMU_DOMAIN_DMA;
1515 if (ops->def_domain_type)
1516 return ops->def_domain_type(dev);
1521 static int iommu_group_alloc_default_domain(struct bus_type *bus,
1522 struct iommu_group *group,
1525 struct iommu_domain *dom;
1527 dom = __iommu_domain_alloc(bus, type);
1528 if (!dom && type != IOMMU_DOMAIN_DMA) {
1529 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
1531 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1538 group->default_domain = dom;
1540 group->domain = dom;
1544 static int iommu_alloc_default_domain(struct iommu_group *group,
1549 if (group->default_domain)
1552 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type;
1554 return iommu_group_alloc_default_domain(dev->bus, group, type);
1558 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1559 * @dev: target device
1561 * This function is intended to be called by IOMMU drivers and extended to
1562 * support common, bus-defined algorithms when determining or creating the
1563 * IOMMU group for a device. On success, the caller will hold a reference
1564 * to the returned IOMMU group, which will already include the provided
1565 * device. The reference should be released with iommu_group_put().
1567 static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1569 const struct iommu_ops *ops = dev->bus->iommu_ops;
1570 struct iommu_group *group;
1573 group = iommu_group_get(dev);
1578 return ERR_PTR(-EINVAL);
1580 group = ops->device_group(dev);
1581 if (WARN_ON_ONCE(group == NULL))
1582 return ERR_PTR(-EINVAL);
1587 ret = iommu_group_add_device(group, dev);
1594 iommu_group_put(group);
1596 return ERR_PTR(ret);
1599 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1601 return group->default_domain;
1604 static int probe_iommu_group(struct device *dev, void *data)
1606 struct list_head *group_list = data;
1607 struct iommu_group *group;
1610 /* Device is probed already if in a group */
1611 group = iommu_group_get(dev);
1613 iommu_group_put(group);
1617 ret = __iommu_probe_device(dev, group_list);
1624 static int remove_iommu_group(struct device *dev, void *data)
1626 iommu_release_device(dev);
1631 static int iommu_bus_notifier(struct notifier_block *nb,
1632 unsigned long action, void *data)
1634 unsigned long group_action = 0;
1635 struct device *dev = data;
1636 struct iommu_group *group;
1639 * ADD/DEL call into iommu driver ops if provided, which may
1640 * result in ADD/DEL notifiers to group->notifier
1642 if (action == BUS_NOTIFY_ADD_DEVICE) {
1645 ret = iommu_probe_device(dev);
1646 return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1647 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1648 iommu_release_device(dev);
1653 * Remaining BUS_NOTIFYs get filtered and republished to the
1654 * group, if anyone is listening
1656 group = iommu_group_get(dev);
1661 case BUS_NOTIFY_BIND_DRIVER:
1662 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1664 case BUS_NOTIFY_BOUND_DRIVER:
1665 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1667 case BUS_NOTIFY_UNBIND_DRIVER:
1668 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1670 case BUS_NOTIFY_UNBOUND_DRIVER:
1671 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1676 blocking_notifier_call_chain(&group->notifier,
1679 iommu_group_put(group);
1683 struct __group_domain_type {
1688 static int probe_get_default_domain_type(struct device *dev, void *data)
1690 struct __group_domain_type *gtype = data;
1691 unsigned int type = iommu_get_def_domain_type(dev);
1694 if (gtype->type && gtype->type != type) {
1695 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1696 iommu_domain_type_str(type),
1697 dev_name(gtype->dev),
1698 iommu_domain_type_str(gtype->type));
1711 static void probe_alloc_default_domain(struct bus_type *bus,
1712 struct iommu_group *group)
1714 struct __group_domain_type gtype;
1716 memset(>ype, 0, sizeof(gtype));
1718 /* Ask for default domain requirements of all devices in the group */
1719 __iommu_group_for_each_dev(group, >ype,
1720 probe_get_default_domain_type);
1723 gtype.type = iommu_def_domain_type;
1725 iommu_group_alloc_default_domain(bus, group, gtype.type);
1729 static int iommu_group_do_dma_attach(struct device *dev, void *data)
1731 struct iommu_domain *domain = data;
1734 if (!iommu_is_attach_deferred(domain, dev))
1735 ret = __iommu_attach_device(domain, dev);
1740 static int __iommu_group_dma_attach(struct iommu_group *group)
1742 return __iommu_group_for_each_dev(group, group->default_domain,
1743 iommu_group_do_dma_attach);
1746 static int iommu_group_do_probe_finalize(struct device *dev, void *data)
1748 struct iommu_domain *domain = data;
1750 if (domain->ops->probe_finalize)
1751 domain->ops->probe_finalize(dev);
1756 static void __iommu_group_dma_finalize(struct iommu_group *group)
1758 __iommu_group_for_each_dev(group, group->default_domain,
1759 iommu_group_do_probe_finalize);
1762 static int iommu_do_create_direct_mappings(struct device *dev, void *data)
1764 struct iommu_group *group = data;
1766 iommu_create_device_direct_mappings(group, dev);
1771 static int iommu_group_create_direct_mappings(struct iommu_group *group)
1773 return __iommu_group_for_each_dev(group, group,
1774 iommu_do_create_direct_mappings);
1777 int bus_iommu_probe(struct bus_type *bus)
1779 struct iommu_group *group, *next;
1780 LIST_HEAD(group_list);
1784 * This code-path does not allocate the default domain when
1785 * creating the iommu group, so do it after the groups are
1788 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1792 list_for_each_entry_safe(group, next, &group_list, entry) {
1793 /* Remove item from the list */
1794 list_del_init(&group->entry);
1796 mutex_lock(&group->mutex);
1798 /* Try to allocate default domain */
1799 probe_alloc_default_domain(bus, group);
1801 if (!group->default_domain) {
1802 mutex_unlock(&group->mutex);
1806 iommu_group_create_direct_mappings(group);
1808 ret = __iommu_group_dma_attach(group);
1810 mutex_unlock(&group->mutex);
1815 __iommu_group_dma_finalize(group);
1821 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1823 struct notifier_block *nb;
1826 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1830 nb->notifier_call = iommu_bus_notifier;
1832 err = bus_register_notifier(bus, nb);
1836 err = bus_iommu_probe(bus);
1845 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1846 bus_unregister_notifier(bus, nb);
1855 * bus_set_iommu - set iommu-callbacks for the bus
1857 * @ops: the callbacks provided by the iommu-driver
1859 * This function is called by an iommu driver to set the iommu methods
1860 * used for a particular bus. Drivers for devices on that bus can use
1861 * the iommu-api after these ops are registered.
1862 * This special function is needed because IOMMUs are usually devices on
1863 * the bus itself, so the iommu drivers are not initialized when the bus
1864 * is set up. With this function the iommu-driver can set the iommu-ops
1867 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1872 bus->iommu_ops = NULL;
1876 if (bus->iommu_ops != NULL)
1879 bus->iommu_ops = ops;
1881 /* Do IOMMU specific setup for this bus-type */
1882 err = iommu_bus_init(bus, ops);
1884 bus->iommu_ops = NULL;
1888 EXPORT_SYMBOL_GPL(bus_set_iommu);
1890 bool iommu_present(struct bus_type *bus)
1892 return bus->iommu_ops != NULL;
1894 EXPORT_SYMBOL_GPL(iommu_present);
1896 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1898 if (!bus->iommu_ops || !bus->iommu_ops->capable)
1901 return bus->iommu_ops->capable(cap);
1903 EXPORT_SYMBOL_GPL(iommu_capable);
1906 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1907 * @domain: iommu domain
1908 * @handler: fault handler
1909 * @token: user data, will be passed back to the fault handler
1911 * This function should be used by IOMMU users which want to be notified
1912 * whenever an IOMMU fault happens.
1914 * The fault handler itself should return 0 on success, and an appropriate
1915 * error code otherwise.
1917 void iommu_set_fault_handler(struct iommu_domain *domain,
1918 iommu_fault_handler_t handler,
1923 domain->handler = handler;
1924 domain->handler_token = token;
1926 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1928 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1931 struct iommu_domain *domain;
1933 if (bus == NULL || bus->iommu_ops == NULL)
1936 domain = bus->iommu_ops->domain_alloc(type);
1940 domain->ops = bus->iommu_ops;
1941 domain->type = type;
1942 /* Assume all sizes by default; the driver may override this later */
1943 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
1948 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1950 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1952 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1954 void iommu_domain_free(struct iommu_domain *domain)
1956 domain->ops->domain_free(domain);
1958 EXPORT_SYMBOL_GPL(iommu_domain_free);
1960 static int __iommu_attach_device(struct iommu_domain *domain,
1965 if (unlikely(domain->ops->attach_dev == NULL))
1968 ret = domain->ops->attach_dev(domain, dev);
1970 trace_attach_device_to_domain(dev);
1974 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1976 struct iommu_group *group;
1979 group = iommu_group_get(dev);
1984 * Lock the group to make sure the device-count doesn't
1985 * change while we are attaching
1987 mutex_lock(&group->mutex);
1989 if (iommu_group_device_count(group) != 1)
1992 ret = __iommu_attach_group(domain, group);
1995 mutex_unlock(&group->mutex);
1996 iommu_group_put(group);
2000 EXPORT_SYMBOL_GPL(iommu_attach_device);
2002 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
2004 const struct iommu_ops *ops = domain->ops;
2006 if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
2007 return __iommu_attach_device(domain, dev);
2013 * Check flags and other user provided data for valid combinations. We also
2014 * make sure no reserved fields or unused flags are set. This is to ensure
2015 * not breaking userspace in the future when these fields or flags are used.
2017 static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
2022 if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
2025 mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
2026 if (info->cache & ~mask)
2029 if (info->granularity >= IOMMU_INV_GRANU_NR)
2032 switch (info->granularity) {
2033 case IOMMU_INV_GRANU_ADDR:
2034 if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
2037 mask = IOMMU_INV_ADDR_FLAGS_PASID |
2038 IOMMU_INV_ADDR_FLAGS_ARCHID |
2039 IOMMU_INV_ADDR_FLAGS_LEAF;
2041 if (info->granu.addr_info.flags & ~mask)
2044 case IOMMU_INV_GRANU_PASID:
2045 mask = IOMMU_INV_PASID_FLAGS_PASID |
2046 IOMMU_INV_PASID_FLAGS_ARCHID;
2047 if (info->granu.pasid_info.flags & ~mask)
2051 case IOMMU_INV_GRANU_DOMAIN:
2052 if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
2059 /* Check reserved padding fields */
2060 for (i = 0; i < sizeof(info->padding); i++) {
2061 if (info->padding[i])
2068 int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
2071 struct iommu_cache_invalidate_info inv_info = { 0 };
2075 if (unlikely(!domain->ops->cache_invalidate))
2079 * No new spaces can be added before the variable sized union, the
2080 * minimum size is the offset to the union.
2082 minsz = offsetof(struct iommu_cache_invalidate_info, granu);
2084 /* Copy minsz from user to get flags and argsz */
2085 if (copy_from_user(&inv_info, uinfo, minsz))
2088 /* Fields before the variable size union are mandatory */
2089 if (inv_info.argsz < minsz)
2092 /* PASID and address granu require additional info beyond minsz */
2093 if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
2094 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
2097 if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
2098 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
2102 * User might be using a newer UAPI header which has a larger data
2103 * size, we shall support the existing flags within the current
2104 * size. Copy the remaining user data _after_ minsz but not more
2105 * than the current kernel supported size.
2107 if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
2108 min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
2111 /* Now the argsz is validated, check the content */
2112 ret = iommu_check_cache_invl_data(&inv_info);
2116 return domain->ops->cache_invalidate(domain, dev, &inv_info);
2118 EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
2120 static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
2125 if (data->version != IOMMU_GPASID_BIND_VERSION_1)
2128 /* Check the range of supported formats */
2129 if (data->format >= IOMMU_PASID_FORMAT_LAST)
2132 /* Check all flags */
2133 mask = IOMMU_SVA_GPASID_VAL;
2134 if (data->flags & ~mask)
2137 /* Check reserved padding fields */
2138 for (i = 0; i < sizeof(data->padding); i++) {
2139 if (data->padding[i])
2146 static int iommu_sva_prepare_bind_data(void __user *udata,
2147 struct iommu_gpasid_bind_data *data)
2152 * No new spaces can be added before the variable sized union, the
2153 * minimum size is the offset to the union.
2155 minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
2157 /* Copy minsz from user to get flags and argsz */
2158 if (copy_from_user(data, udata, minsz))
2161 /* Fields before the variable size union are mandatory */
2162 if (data->argsz < minsz)
2165 * User might be using a newer UAPI header, we shall let IOMMU vendor
2166 * driver decide on what size it needs. Since the guest PASID bind data
2167 * can be vendor specific, larger argsz could be the result of extension
2168 * for one vendor but it should not affect another vendor.
2169 * Copy the remaining user data _after_ minsz
2171 if (copy_from_user((void *)data + minsz, udata + minsz,
2172 min_t(u32, data->argsz, sizeof(*data)) - minsz))
2175 return iommu_check_bind_data(data);
2178 int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
2181 struct iommu_gpasid_bind_data data = { 0 };
2184 if (unlikely(!domain->ops->sva_bind_gpasid))
2187 ret = iommu_sva_prepare_bind_data(udata, &data);
2191 return domain->ops->sva_bind_gpasid(domain, dev, &data);
2193 EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
2195 int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2198 if (unlikely(!domain->ops->sva_unbind_gpasid))
2201 return domain->ops->sva_unbind_gpasid(dev, pasid);
2203 EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
2205 int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2208 struct iommu_gpasid_bind_data data = { 0 };
2211 if (unlikely(!domain->ops->sva_bind_gpasid))
2214 ret = iommu_sva_prepare_bind_data(udata, &data);
2218 return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
2220 EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
2222 static void __iommu_detach_device(struct iommu_domain *domain,
2225 if (iommu_is_attach_deferred(domain, dev))
2228 if (unlikely(domain->ops->detach_dev == NULL))
2231 domain->ops->detach_dev(domain, dev);
2232 trace_detach_device_from_domain(dev);
2235 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2237 struct iommu_group *group;
2239 group = iommu_group_get(dev);
2243 mutex_lock(&group->mutex);
2244 if (iommu_group_device_count(group) != 1) {
2249 __iommu_detach_group(domain, group);
2252 mutex_unlock(&group->mutex);
2253 iommu_group_put(group);
2255 EXPORT_SYMBOL_GPL(iommu_detach_device);
2257 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2259 struct iommu_domain *domain;
2260 struct iommu_group *group;
2262 group = iommu_group_get(dev);
2266 domain = group->domain;
2268 iommu_group_put(group);
2272 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2275 * For IOMMU_DOMAIN_DMA implementations which already provide their own
2276 * guarantees that the group and its default domain are valid and correct.
2278 struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2280 return dev->iommu_group->default_domain;
2284 * IOMMU groups are really the natural working unit of the IOMMU, but
2285 * the IOMMU API works on domains and devices. Bridge that gap by
2286 * iterating over the devices in a group. Ideally we'd have a single
2287 * device which represents the requestor ID of the group, but we also
2288 * allow IOMMU drivers to create policy defined minimum sets, where
2289 * the physical hardware may be able to distiguish members, but we
2290 * wish to group them at a higher level (ex. untrusted multi-function
2291 * PCI devices). Thus we attach each device.
2293 static int iommu_group_do_attach_device(struct device *dev, void *data)
2295 struct iommu_domain *domain = data;
2297 return __iommu_attach_device(domain, dev);
2300 static int __iommu_attach_group(struct iommu_domain *domain,
2301 struct iommu_group *group)
2305 if (group->default_domain && group->domain != group->default_domain)
2308 ret = __iommu_group_for_each_dev(group, domain,
2309 iommu_group_do_attach_device);
2311 group->domain = domain;
2316 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2320 mutex_lock(&group->mutex);
2321 ret = __iommu_attach_group(domain, group);
2322 mutex_unlock(&group->mutex);
2326 EXPORT_SYMBOL_GPL(iommu_attach_group);
2328 static int iommu_group_do_detach_device(struct device *dev, void *data)
2330 struct iommu_domain *domain = data;
2332 __iommu_detach_device(domain, dev);
2337 static void __iommu_detach_group(struct iommu_domain *domain,
2338 struct iommu_group *group)
2342 if (!group->default_domain) {
2343 __iommu_group_for_each_dev(group, domain,
2344 iommu_group_do_detach_device);
2345 group->domain = NULL;
2349 if (group->domain == group->default_domain)
2352 /* Detach by re-attaching to the default domain */
2353 ret = __iommu_group_for_each_dev(group, group->default_domain,
2354 iommu_group_do_attach_device);
2358 group->domain = group->default_domain;
2361 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2363 mutex_lock(&group->mutex);
2364 __iommu_detach_group(domain, group);
2365 mutex_unlock(&group->mutex);
2367 EXPORT_SYMBOL_GPL(iommu_detach_group);
2369 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2371 if (unlikely(domain->ops->iova_to_phys == NULL))
2374 return domain->ops->iova_to_phys(domain, iova);
2376 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
2378 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
2379 phys_addr_t paddr, size_t size, size_t *count)
2381 unsigned int pgsize_idx, pgsize_idx_next;
2382 unsigned long pgsizes;
2383 size_t offset, pgsize, pgsize_next;
2384 unsigned long addr_merge = paddr | iova;
2386 /* Page sizes supported by the hardware and small enough for @size */
2387 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0);
2389 /* Constrain the page sizes further based on the maximum alignment */
2390 if (likely(addr_merge))
2391 pgsizes &= GENMASK(__ffs(addr_merge), 0);
2393 /* Make sure we have at least one suitable page size */
2396 /* Pick the biggest page size remaining */
2397 pgsize_idx = __fls(pgsizes);
2398 pgsize = BIT(pgsize_idx);
2402 /* Find the next biggest support page size, if it exists */
2403 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
2407 pgsize_idx_next = __ffs(pgsizes);
2408 pgsize_next = BIT(pgsize_idx_next);
2411 * There's no point trying a bigger page size unless the virtual
2412 * and physical addresses are similarly offset within the larger page.
2414 if ((iova ^ paddr) & (pgsize_next - 1))
2417 /* Calculate the offset to the next page size alignment boundary */
2418 offset = pgsize_next - (addr_merge & (pgsize_next - 1));
2421 * If size is big enough to accommodate the larger page, reduce
2422 * the number of smaller pages.
2424 if (offset + pgsize_next <= size)
2428 *count = size >> pgsize_idx;
2432 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
2433 phys_addr_t paddr, size_t size, int prot,
2434 gfp_t gfp, size_t *mapped)
2436 const struct iommu_ops *ops = domain->ops;
2437 size_t pgsize, count;
2440 pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
2442 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
2443 iova, &paddr, pgsize, count);
2445 if (ops->map_pages) {
2446 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
2449 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2450 *mapped = ret ? 0 : pgsize;
2456 static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2457 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2459 const struct iommu_ops *ops = domain->ops;
2460 unsigned long orig_iova = iova;
2461 unsigned int min_pagesz;
2462 size_t orig_size = size;
2463 phys_addr_t orig_paddr = paddr;
2466 if (unlikely(!(ops->map || ops->map_pages) ||
2467 domain->pgsize_bitmap == 0UL))
2470 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2473 /* find out the minimum page size supported */
2474 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2477 * both the virtual address and the physical one, as well as
2478 * the size of the mapping, must be aligned (at least) to the
2479 * size of the smallest page supported by the hardware
2481 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2482 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2483 iova, &paddr, size, min_pagesz);
2487 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2492 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
2495 * Some pages may have been mapped, even if an error occurred,
2496 * so we should account for those so they can be unmapped.
2507 /* unroll mapping in case something went wrong */
2509 iommu_unmap(domain, orig_iova, orig_size - size);
2511 trace_map(orig_iova, orig_paddr, orig_size);
2516 static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
2517 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2519 const struct iommu_ops *ops = domain->ops;
2522 ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2523 if (ret == 0 && ops->iotlb_sync_map)
2524 ops->iotlb_sync_map(domain, iova, size);
2529 int iommu_map(struct iommu_domain *domain, unsigned long iova,
2530 phys_addr_t paddr, size_t size, int prot)
2533 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2535 EXPORT_SYMBOL_GPL(iommu_map);
2537 int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
2538 phys_addr_t paddr, size_t size, int prot)
2540 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2542 EXPORT_SYMBOL_GPL(iommu_map_atomic);
2544 static size_t __iommu_unmap_pages(struct iommu_domain *domain,
2545 unsigned long iova, size_t size,
2546 struct iommu_iotlb_gather *iotlb_gather)
2548 const struct iommu_ops *ops = domain->ops;
2549 size_t pgsize, count;
2551 pgsize = iommu_pgsize(domain, iova, iova, size, &count);
2552 return ops->unmap_pages ?
2553 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
2554 ops->unmap(domain, iova, pgsize, iotlb_gather);
2557 static size_t __iommu_unmap(struct iommu_domain *domain,
2558 unsigned long iova, size_t size,
2559 struct iommu_iotlb_gather *iotlb_gather)
2561 const struct iommu_ops *ops = domain->ops;
2562 size_t unmapped_page, unmapped = 0;
2563 unsigned long orig_iova = iova;
2564 unsigned int min_pagesz;
2566 if (unlikely(!(ops->unmap || ops->unmap_pages) ||
2567 domain->pgsize_bitmap == 0UL))
2570 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2573 /* find out the minimum page size supported */
2574 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2577 * The virtual address, as well as the size of the mapping, must be
2578 * aligned (at least) to the size of the smallest page supported
2581 if (!IS_ALIGNED(iova | size, min_pagesz)) {
2582 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2583 iova, size, min_pagesz);
2587 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2590 * Keep iterating until we either unmap 'size' bytes (or more)
2591 * or we hit an area that isn't mapped.
2593 while (unmapped < size) {
2594 unmapped_page = __iommu_unmap_pages(domain, iova,
2600 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2601 iova, unmapped_page);
2603 iova += unmapped_page;
2604 unmapped += unmapped_page;
2607 trace_unmap(orig_iova, size, unmapped);
2611 size_t iommu_unmap(struct iommu_domain *domain,
2612 unsigned long iova, size_t size)
2614 struct iommu_iotlb_gather iotlb_gather;
2617 iommu_iotlb_gather_init(&iotlb_gather);
2618 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2619 iommu_iotlb_sync(domain, &iotlb_gather);
2623 EXPORT_SYMBOL_GPL(iommu_unmap);
2625 size_t iommu_unmap_fast(struct iommu_domain *domain,
2626 unsigned long iova, size_t size,
2627 struct iommu_iotlb_gather *iotlb_gather)
2629 return __iommu_unmap(domain, iova, size, iotlb_gather);
2631 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2633 static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2634 struct scatterlist *sg, unsigned int nents, int prot,
2637 const struct iommu_ops *ops = domain->ops;
2638 size_t len = 0, mapped = 0;
2643 while (i <= nents) {
2644 phys_addr_t s_phys = sg_phys(sg);
2646 if (len && s_phys != start + len) {
2647 ret = __iommu_map(domain, iova + mapped, start,
2668 if (ops->iotlb_sync_map)
2669 ops->iotlb_sync_map(domain, iova, mapped);
2673 /* undo mappings already done */
2674 iommu_unmap(domain, iova, mapped);
2680 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2681 struct scatterlist *sg, unsigned int nents, int prot)
2684 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2686 EXPORT_SYMBOL_GPL(iommu_map_sg);
2688 size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2689 struct scatterlist *sg, unsigned int nents, int prot)
2691 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2695 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2696 * @domain: the iommu domain where the fault has happened
2697 * @dev: the device where the fault has happened
2698 * @iova: the faulting address
2699 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2701 * This function should be called by the low-level IOMMU implementations
2702 * whenever IOMMU faults happen, to allow high-level users, that are
2703 * interested in such events, to know about them.
2705 * This event may be useful for several possible use cases:
2706 * - mere logging of the event
2707 * - dynamic TLB/PTE loading
2708 * - if restarting of the faulting device is required
2710 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2711 * PTE/TLB loading will one day be supported, implementations will be able
2712 * to tell whether it succeeded or not according to this return value).
2714 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2715 * (though fault handlers can also return -ENOSYS, in case they want to
2716 * elicit the default behavior of the IOMMU drivers).
2718 int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2719 unsigned long iova, int flags)
2724 * if upper layers showed interest and installed a fault handler,
2727 if (domain->handler)
2728 ret = domain->handler(domain, dev, iova, flags,
2729 domain->handler_token);
2731 trace_io_page_fault(dev, iova, flags);
2734 EXPORT_SYMBOL_GPL(report_iommu_fault);
2736 static int __init iommu_init(void)
2738 iommu_group_kset = kset_create_and_add("iommu_groups",
2740 BUG_ON(!iommu_group_kset);
2742 iommu_debugfs_setup();
2746 core_initcall(iommu_init);
2748 int iommu_enable_nesting(struct iommu_domain *domain)
2750 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2752 if (!domain->ops->enable_nesting)
2754 return domain->ops->enable_nesting(domain);
2756 EXPORT_SYMBOL_GPL(iommu_enable_nesting);
2758 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
2759 unsigned long quirk)
2761 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2763 if (!domain->ops->set_pgtable_quirks)
2765 return domain->ops->set_pgtable_quirks(domain, quirk);
2767 EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
2769 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2771 const struct iommu_ops *ops = dev->bus->iommu_ops;
2773 if (ops && ops->get_resv_regions)
2774 ops->get_resv_regions(dev, list);
2777 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2779 const struct iommu_ops *ops = dev->bus->iommu_ops;
2781 if (ops && ops->put_resv_regions)
2782 ops->put_resv_regions(dev, list);
2786 * generic_iommu_put_resv_regions - Reserved region driver helper
2787 * @dev: device for which to free reserved regions
2788 * @list: reserved region list for device
2790 * IOMMU drivers can use this to implement their .put_resv_regions() callback
2791 * for simple reservations. Memory allocated for each reserved region will be
2792 * freed. If an IOMMU driver allocates additional resources per region, it is
2793 * going to have to implement a custom callback.
2795 void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2797 struct iommu_resv_region *entry, *next;
2799 list_for_each_entry_safe(entry, next, list, list)
2802 EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2804 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2805 size_t length, int prot,
2806 enum iommu_resv_type type)
2808 struct iommu_resv_region *region;
2810 region = kzalloc(sizeof(*region), GFP_KERNEL);
2814 INIT_LIST_HEAD(®ion->list);
2815 region->start = start;
2816 region->length = length;
2817 region->prot = prot;
2818 region->type = type;
2821 EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2823 void iommu_set_default_passthrough(bool cmd_line)
2826 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2827 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2830 void iommu_set_default_translated(bool cmd_line)
2833 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2834 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2837 bool iommu_default_passthrough(void)
2839 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2841 EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2843 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2845 const struct iommu_ops *ops = NULL;
2846 struct iommu_device *iommu;
2848 spin_lock(&iommu_device_lock);
2849 list_for_each_entry(iommu, &iommu_device_list, list)
2850 if (iommu->fwnode == fwnode) {
2854 spin_unlock(&iommu_device_lock);
2858 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2859 const struct iommu_ops *ops)
2861 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2864 return ops == fwspec->ops ? 0 : -EINVAL;
2866 if (!dev_iommu_get(dev))
2869 /* Preallocate for the overwhelmingly common case of 1 ID */
2870 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2874 of_node_get(to_of_node(iommu_fwnode));
2875 fwspec->iommu_fwnode = iommu_fwnode;
2877 dev_iommu_fwspec_set(dev, fwspec);
2880 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2882 void iommu_fwspec_free(struct device *dev)
2884 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2887 fwnode_handle_put(fwspec->iommu_fwnode);
2889 dev_iommu_fwspec_set(dev, NULL);
2892 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2894 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2896 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2902 new_num = fwspec->num_ids + num_ids;
2904 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2909 dev_iommu_fwspec_set(dev, fwspec);
2912 for (i = 0; i < num_ids; i++)
2913 fwspec->ids[fwspec->num_ids + i] = ids[i];
2915 fwspec->num_ids = new_num;
2918 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2921 * Per device IOMMU features.
2923 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2925 if (dev->iommu && dev->iommu->iommu_dev) {
2926 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2928 if (ops->dev_enable_feat)
2929 return ops->dev_enable_feat(dev, feat);
2934 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2937 * The device drivers should do the necessary cleanups before calling this.
2938 * For example, before disabling the aux-domain feature, the device driver
2939 * should detach all aux-domains. Otherwise, this will return -EBUSY.
2941 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2943 if (dev->iommu && dev->iommu->iommu_dev) {
2944 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2946 if (ops->dev_disable_feat)
2947 return ops->dev_disable_feat(dev, feat);
2952 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2954 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2956 if (dev->iommu && dev->iommu->iommu_dev) {
2957 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2959 if (ops->dev_feat_enabled)
2960 return ops->dev_feat_enabled(dev, feat);
2965 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2968 * Aux-domain specific attach/detach.
2970 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2971 * true. Also, as long as domains are attached to a device through this
2972 * interface, any tries to call iommu_attach_device() should fail
2973 * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2974 * This should make us safe against a device being attached to a guest as a
2975 * whole while there are still pasid users on it (aux and sva).
2977 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2981 if (domain->ops->aux_attach_dev)
2982 ret = domain->ops->aux_attach_dev(domain, dev);
2985 trace_attach_device_to_domain(dev);
2989 EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2991 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2993 if (domain->ops->aux_detach_dev) {
2994 domain->ops->aux_detach_dev(domain, dev);
2995 trace_detach_device_from_domain(dev);
2998 EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
3000 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
3004 if (domain->ops->aux_get_pasid)
3005 ret = domain->ops->aux_get_pasid(domain, dev);
3009 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
3012 * iommu_sva_bind_device() - Bind a process address space to a device
3014 * @mm: the mm to bind, caller must hold a reference to it
3016 * Create a bond between device and address space, allowing the device to access
3017 * the mm using the returned PASID. If a bond already exists between @device and
3018 * @mm, it is returned and an additional reference is taken. Caller must call
3019 * iommu_sva_unbind_device() to release each reference.
3021 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
3022 * initialize the required SVA features.
3024 * On error, returns an ERR_PTR value.
3027 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
3029 struct iommu_group *group;
3030 struct iommu_sva *handle = ERR_PTR(-EINVAL);
3031 const struct iommu_ops *ops = dev->bus->iommu_ops;
3033 if (!ops || !ops->sva_bind)
3034 return ERR_PTR(-ENODEV);
3036 group = iommu_group_get(dev);
3038 return ERR_PTR(-ENODEV);
3040 /* Ensure device count and domain don't change while we're binding */
3041 mutex_lock(&group->mutex);
3044 * To keep things simple, SVA currently doesn't support IOMMU groups
3045 * with more than one device. Existing SVA-capable systems are not
3046 * affected by the problems that required IOMMU groups (lack of ACS
3047 * isolation, device ID aliasing and other hardware issues).
3049 if (iommu_group_device_count(group) != 1)
3052 handle = ops->sva_bind(dev, mm, drvdata);
3055 mutex_unlock(&group->mutex);
3056 iommu_group_put(group);
3060 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
3063 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
3064 * @handle: the handle returned by iommu_sva_bind_device()
3066 * Put reference to a bond between device and address space. The device should
3067 * not be issuing any more transaction for this PASID. All outstanding page
3068 * requests for this PASID must have been flushed to the IOMMU.
3070 void iommu_sva_unbind_device(struct iommu_sva *handle)
3072 struct iommu_group *group;
3073 struct device *dev = handle->dev;
3074 const struct iommu_ops *ops = dev->bus->iommu_ops;
3076 if (!ops || !ops->sva_unbind)
3079 group = iommu_group_get(dev);
3083 mutex_lock(&group->mutex);
3084 ops->sva_unbind(handle);
3085 mutex_unlock(&group->mutex);
3087 iommu_group_put(group);
3089 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
3091 u32 iommu_sva_get_pasid(struct iommu_sva *handle)
3093 const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
3095 if (!ops || !ops->sva_get_pasid)
3096 return IOMMU_PASID_INVALID;
3098 return ops->sva_get_pasid(handle);
3100 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
3103 * Changes the default domain of an iommu group that has *only* one device
3105 * @group: The group for which the default domain should be changed
3106 * @prev_dev: The device in the group (this is used to make sure that the device
3107 * hasn't changed after the caller has called this function)
3108 * @type: The type of the new default domain that gets associated with the group
3110 * Returns 0 on success and error code on failure
3113 * 1. Presently, this function is called only when user requests to change the
3114 * group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type
3115 * Please take a closer look if intended to use for other purposes.
3117 static int iommu_change_dev_def_domain(struct iommu_group *group,
3118 struct device *prev_dev, int type)
3120 struct iommu_domain *prev_dom;
3121 struct group_device *grp_dev;
3122 int ret, dev_def_dom;
3125 mutex_lock(&group->mutex);
3127 if (group->default_domain != group->domain) {
3128 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n");
3134 * iommu group wasn't locked while acquiring device lock in
3135 * iommu_group_store_type(). So, make sure that the device count hasn't
3136 * changed while acquiring device lock.
3138 * Changing default domain of an iommu group with two or more devices
3139 * isn't supported because there could be a potential deadlock. Consider
3140 * the following scenario. T1 is trying to acquire device locks of all
3141 * the devices in the group and before it could acquire all of them,
3142 * there could be another thread T2 (from different sub-system and use
3143 * case) that has already acquired some of the device locks and might be
3144 * waiting for T1 to release other device locks.
3146 if (iommu_group_device_count(group) != 1) {
3147 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n");
3152 /* Since group has only one device */
3153 grp_dev = list_first_entry(&group->devices, struct group_device, list);
3156 if (prev_dev != dev) {
3157 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n");
3162 prev_dom = group->default_domain;
3168 dev_def_dom = iommu_get_def_domain_type(dev);
3171 * If the user hasn't requested any specific type of domain and
3172 * if the device supports both the domains, then default to the
3173 * domain the device was booted with
3175 type = dev_def_dom ? : iommu_def_domain_type;
3176 } else if (dev_def_dom && type != dev_def_dom) {
3177 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n",
3178 iommu_domain_type_str(type));
3184 * Switch to a new domain only if the requested domain type is different
3185 * from the existing default domain type
3187 if (prev_dom->type == type) {
3192 /* Sets group->default_domain to the newly allocated domain */
3193 ret = iommu_group_alloc_default_domain(dev->bus, group, type);
3197 ret = iommu_create_device_direct_mappings(group, dev);
3199 goto free_new_domain;
3201 ret = __iommu_attach_device(group->default_domain, dev);
3203 goto free_new_domain;
3205 group->domain = group->default_domain;
3208 * Release the mutex here because ops->probe_finalize() call-back of
3209 * some vendor IOMMU drivers calls arm_iommu_attach_device() which
3210 * in-turn might call back into IOMMU core code, where it tries to take
3211 * group->mutex, resulting in a deadlock.
3213 mutex_unlock(&group->mutex);
3215 /* Make sure dma_ops is appropriatley set */
3216 iommu_group_do_probe_finalize(dev, group->default_domain);
3217 iommu_domain_free(prev_dom);
3221 iommu_domain_free(group->default_domain);
3222 group->default_domain = prev_dom;
3223 group->domain = prev_dom;
3226 mutex_unlock(&group->mutex);
3232 * Changing the default domain through sysfs requires the users to ubind the
3233 * drivers from the devices in the iommu group. Return failure if this doesn't
3236 * We need to consider the race between this and the device release path.
3237 * device_lock(dev) is used here to guarantee that the device release path
3238 * will not be entered at the same time.
3240 static ssize_t iommu_group_store_type(struct iommu_group *group,
3241 const char *buf, size_t count)
3243 struct group_device *grp_dev;
3247 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3250 if (WARN_ON(!group))
3253 if (sysfs_streq(buf, "identity"))
3254 req_type = IOMMU_DOMAIN_IDENTITY;
3255 else if (sysfs_streq(buf, "DMA"))
3256 req_type = IOMMU_DOMAIN_DMA;
3257 else if (sysfs_streq(buf, "auto"))
3263 * Lock/Unlock the group mutex here before device lock to
3264 * 1. Make sure that the iommu group has only one device (this is a
3265 * prerequisite for step 2)
3266 * 2. Get struct *dev which is needed to lock device
3268 mutex_lock(&group->mutex);
3269 if (iommu_group_device_count(group) != 1) {
3270 mutex_unlock(&group->mutex);
3271 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n");
3275 /* Since group has only one device */
3276 grp_dev = list_first_entry(&group->devices, struct group_device, list);
3281 * Don't hold the group mutex because taking group mutex first and then
3282 * the device lock could potentially cause a deadlock as below. Assume
3283 * two threads T1 and T2. T1 is trying to change default domain of an
3284 * iommu group and T2 is trying to hot unplug a device or release [1] VF
3285 * of a PCIe device which is in the same iommu group. T1 takes group
3286 * mutex and before it could take device lock assume T2 has taken device
3287 * lock and is yet to take group mutex. Now, both the threads will be
3288 * waiting for the other thread to release lock. Below, lock order was
3291 * mutex_lock(&group->mutex);
3292 * iommu_change_dev_def_domain();
3293 * mutex_unlock(&group->mutex);
3294 * device_unlock(dev);
3296 * [1] Typical device release path
3297 * device_lock() from device/driver core code
3299 * -> iommu_bus_notifier()
3300 * -> iommu_release_device()
3301 * -> ops->release_device() vendor driver calls back iommu core code
3302 * -> mutex_lock() from iommu core code
3304 mutex_unlock(&group->mutex);
3306 /* Check if the device in the group still has a driver bound to it */
3308 if (device_is_bound(dev)) {
3309 pr_err_ratelimited("Device is still bound to driver\n");
3314 ret = iommu_change_dev_def_domain(group, dev, req_type);