iommu: Don't use sme_active() in generic code
[linux-2.6-microblaze.git] / drivers / iommu / iommu.c
index 9f0a284..d658c7c 100644 (file)
 
 static struct kset *iommu_group_kset;
 static DEFINE_IDA(iommu_group_ida);
-#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
-static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
-#else
-static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
-#endif
+
+static unsigned int iommu_def_domain_type __read_mostly;
 static bool iommu_dma_strict __read_mostly = true;
+static u32 iommu_cmd_line __read_mostly;
 
 struct iommu_group {
        struct kobject kobj;
@@ -61,12 +59,25 @@ struct iommu_group_attribute {
 };
 
 static const char * const iommu_group_resv_type_string[] = {
-       [IOMMU_RESV_DIRECT]     = "direct",
-       [IOMMU_RESV_RESERVED]   = "reserved",
-       [IOMMU_RESV_MSI]        = "msi",
-       [IOMMU_RESV_SW_MSI]     = "msi",
+       [IOMMU_RESV_DIRECT]                     = "direct",
+       [IOMMU_RESV_DIRECT_RELAXABLE]           = "direct-relaxable",
+       [IOMMU_RESV_RESERVED]                   = "reserved",
+       [IOMMU_RESV_MSI]                        = "msi",
+       [IOMMU_RESV_SW_MSI]                     = "msi",
 };
 
+#define IOMMU_CMD_LINE_DMA_API         BIT(0)
+
+static void iommu_set_cmd_line_dma_api(void)
+{
+       iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
+}
+
+static bool iommu_cmd_line_dma_api(void)
+{
+       return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
+}
+
 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)          \
 struct iommu_group_attribute iommu_group_attr_##_name =                \
        __ATTR(_name, _mode, _show, _store)
@@ -79,12 +90,55 @@ struct iommu_group_attribute iommu_group_attr_##_name =             \
 static LIST_HEAD(iommu_device_list);
 static DEFINE_SPINLOCK(iommu_device_lock);
 
+/*
+ * Use a function instead of an array here because the domain-type is a
+ * bit-field, so an array would waste memory.
+ */
+static const char *iommu_domain_type_str(unsigned int t)
+{
+       switch (t) {
+       case IOMMU_DOMAIN_BLOCKED:
+               return "Blocked";
+       case IOMMU_DOMAIN_IDENTITY:
+               return "Passthrough";
+       case IOMMU_DOMAIN_UNMANAGED:
+               return "Unmanaged";
+       case IOMMU_DOMAIN_DMA:
+               return "Translated";
+       default:
+               return "Unknown";
+       }
+}
+
+static int __init iommu_subsys_init(void)
+{
+       bool cmd_line = iommu_cmd_line_dma_api();
+
+       if (!cmd_line) {
+               if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
+                       iommu_set_default_passthrough(false);
+               else
+                       iommu_set_default_translated(false);
+
+               if (iommu_default_passthrough() && mem_encrypt_active()) {
+                       pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
+                       iommu_set_default_translated(false);
+               }
+       }
+
+       pr_info("Default domain type: %s %s\n",
+               iommu_domain_type_str(iommu_def_domain_type),
+               cmd_line ? "(set via kernel command line)" : "");
+
+       return 0;
+}
+subsys_initcall(iommu_subsys_init);
+
 int iommu_device_register(struct iommu_device *iommu)
 {
        spin_lock(&iommu_device_lock);
        list_add_tail(&iommu->list, &iommu_device_list);
        spin_unlock(&iommu_device_lock);
-
        return 0;
 }
 
@@ -95,15 +149,43 @@ void iommu_device_unregister(struct iommu_device *iommu)
        spin_unlock(&iommu_device_lock);
 }
 
+static struct iommu_param *iommu_get_dev_param(struct device *dev)
+{
+       struct iommu_param *param = dev->iommu_param;
+
+       if (param)
+               return param;
+
+       param = kzalloc(sizeof(*param), GFP_KERNEL);
+       if (!param)
+               return NULL;
+
+       mutex_init(&param->lock);
+       dev->iommu_param = param;
+       return param;
+}
+
+static void iommu_free_dev_param(struct device *dev)
+{
+       kfree(dev->iommu_param);
+       dev->iommu_param = NULL;
+}
+
 int iommu_probe_device(struct device *dev)
 {
        const struct iommu_ops *ops = dev->bus->iommu_ops;
-       int ret = -EINVAL;
+       int ret;
 
        WARN_ON(dev->iommu_group);
+       if (!ops)
+               return -EINVAL;
 
-       if (ops)
-               ret = ops->add_device(dev);
+       if (!iommu_get_dev_param(dev))
+               return -ENOMEM;
+
+       ret = ops->add_device(dev);
+       if (ret)
+               iommu_free_dev_param(dev);
 
        return ret;
 }
@@ -114,6 +196,8 @@ void iommu_release_device(struct device *dev)
 
        if (dev->iommu_group)
                ops->remove_device(dev);
+
+       iommu_free_dev_param(dev);
 }
 
 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
@@ -134,7 +218,11 @@ static int __init iommu_set_def_domain_type(char *str)
        if (ret)
                return ret;
 
-       iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
+       if (pt)
+               iommu_set_default_passthrough(true);
+       else
+               iommu_set_default_translated(true);
+
        return 0;
 }
 early_param("iommu.passthrough", iommu_set_def_domain_type);
@@ -198,58 +286,58 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
  * @new: new region to insert
  * @regions: list of regions
  *
- * The new element is sorted by address with respect to the other
- * regions of the same type. In case it overlaps with another
- * region of the same type, regions are merged. In case it
- * overlaps with another region of different type, regions are
- * not merged.
+ * Elements are sorted by start address and overlapping segments
+ * of the same type are merged.
  */
-static int iommu_insert_resv_region(struct iommu_resv_region *new,
-                                   struct list_head *regions)
+int iommu_insert_resv_region(struct iommu_resv_region *new,
+                            struct list_head *regions)
 {
-       struct iommu_resv_region *region;
-       phys_addr_t start = new->start;
-       phys_addr_t end = new->start + new->length - 1;
-       struct list_head *pos = regions->next;
-
-       while (pos != regions) {
-               struct iommu_resv_region *entry =
-                       list_entry(pos, struct iommu_resv_region, list);
-               phys_addr_t a = entry->start;
-               phys_addr_t b = entry->start + entry->length - 1;
-               int type = entry->type;
-
-               if (end < a) {
-                       goto insert;
-               } else if (start > b) {
-                       pos = pos->next;
-               } else if ((start >= a) && (end <= b)) {
-                       if (new->type == type)
-                               goto done;
-                       else
-                               pos = pos->next;
+       struct iommu_resv_region *iter, *tmp, *nr, *top;
+       LIST_HEAD(stack);
+
+       nr = iommu_alloc_resv_region(new->start, new->length,
+                                    new->prot, new->type);
+       if (!nr)
+               return -ENOMEM;
+
+       /* First add the new element based on start address sorting */
+       list_for_each_entry(iter, regions, list) {
+               if (nr->start < iter->start ||
+                   (nr->start == iter->start && nr->type <= iter->type))
+                       break;
+       }
+       list_add_tail(&nr->list, &iter->list);
+
+       /* Merge overlapping segments of type nr->type in @regions, if any */
+       list_for_each_entry_safe(iter, tmp, regions, list) {
+               phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
+
+               /* no merge needed on elements of different types than @nr */
+               if (iter->type != nr->type) {
+                       list_move_tail(&iter->list, &stack);
+                       continue;
+               }
+
+               /* look for the last stack element of same type as @iter */
+               list_for_each_entry_reverse(top, &stack, list)
+                       if (top->type == iter->type)
+                               goto check_overlap;
+
+               list_move_tail(&iter->list, &stack);
+               continue;
+
+check_overlap:
+               top_end = top->start + top->length - 1;
+
+               if (iter->start > top_end + 1) {
+                       list_move_tail(&iter->list, &stack);
                } else {
-                       if (new->type == type) {
-                               phys_addr_t new_start = min(a, start);
-                               phys_addr_t new_end = max(b, end);
-
-                               list_del(&entry->list);
-                               entry->start = new_start;
-                               entry->length = new_end - new_start + 1;
-                               iommu_insert_resv_region(entry, regions);
-                       } else {
-                               pos = pos->next;
-                       }
+                       top->length = max(top_end, iter_end) - top->start + 1;
+                       list_del(&iter->list);
+                       kfree(iter);
                }
        }
-insert:
-       region = iommu_alloc_resv_region(new->start, new->length,
-                                        new->prot, new->type);
-       if (!region)
-               return -ENOMEM;
-
-       list_add_tail(&region->list, pos);
-done:
+       list_splice(&stack, regions);
        return 0;
 }
 
@@ -561,7 +649,8 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
                start = ALIGN(entry->start, pg_size);
                end   = ALIGN(entry->start + entry->length, pg_size);
 
-               if (entry->type != IOMMU_RESV_DIRECT)
+               if (entry->type != IOMMU_RESV_DIRECT &&
+                   entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
                        continue;
 
                for (addr = start; addr < end; addr += pg_size) {
@@ -842,6 +931,206 @@ int iommu_group_unregister_notifier(struct iommu_group *group,
 }
 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
 
+/**
+ * iommu_register_device_fault_handler() - Register a device fault handler
+ * @dev: the device
+ * @handler: the fault handler
+ * @data: private data passed as argument to the handler
+ *
+ * When an IOMMU fault event is received, this handler gets called with the
+ * fault event and data as argument. The handler should return 0 on success. If
+ * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
+ * complete the fault by calling iommu_page_response() with one of the following
+ * response code:
+ * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
+ * - IOMMU_PAGE_RESP_INVALID: terminate the fault
+ * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
+ *   page faults if possible.
+ *
+ * Return 0 if the fault handler was installed successfully, or an error.
+ */
+int iommu_register_device_fault_handler(struct device *dev,
+                                       iommu_dev_fault_handler_t handler,
+                                       void *data)
+{
+       struct iommu_param *param = dev->iommu_param;
+       int ret = 0;
+
+       if (!param)
+               return -EINVAL;
+
+       mutex_lock(&param->lock);
+       /* Only allow one fault handler registered for each device */
+       if (param->fault_param) {
+               ret = -EBUSY;
+               goto done_unlock;
+       }
+
+       get_device(dev);
+       param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
+       if (!param->fault_param) {
+               put_device(dev);
+               ret = -ENOMEM;
+               goto done_unlock;
+       }
+       param->fault_param->handler = handler;
+       param->fault_param->data = data;
+       mutex_init(&param->fault_param->lock);
+       INIT_LIST_HEAD(&param->fault_param->faults);
+
+done_unlock:
+       mutex_unlock(&param->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
+
+/**
+ * iommu_unregister_device_fault_handler() - Unregister the device fault handler
+ * @dev: the device
+ *
+ * Remove the device fault handler installed with
+ * iommu_register_device_fault_handler().
+ *
+ * Return 0 on success, or an error.
+ */
+int iommu_unregister_device_fault_handler(struct device *dev)
+{
+       struct iommu_param *param = dev->iommu_param;
+       int ret = 0;
+
+       if (!param)
+               return -EINVAL;
+
+       mutex_lock(&param->lock);
+
+       if (!param->fault_param)
+               goto unlock;
+
+       /* we cannot unregister handler if there are pending faults */
+       if (!list_empty(&param->fault_param->faults)) {
+               ret = -EBUSY;
+               goto unlock;
+       }
+
+       kfree(param->fault_param);
+       param->fault_param = NULL;
+       put_device(dev);
+unlock:
+       mutex_unlock(&param->lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
+
+/**
+ * iommu_report_device_fault() - Report fault event to device driver
+ * @dev: the device
+ * @evt: fault event data
+ *
+ * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
+ * handler. When this function fails and the fault is recoverable, it is the
+ * caller's responsibility to complete the fault.
+ *
+ * Return 0 on success, or an error.
+ */
+int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
+{
+       struct iommu_param *param = dev->iommu_param;
+       struct iommu_fault_event *evt_pending = NULL;
+       struct iommu_fault_param *fparam;
+       int ret = 0;
+
+       if (!param || !evt)
+               return -EINVAL;
+
+       /* we only report device fault if there is a handler registered */
+       mutex_lock(&param->lock);
+       fparam = param->fault_param;
+       if (!fparam || !fparam->handler) {
+               ret = -EINVAL;
+               goto done_unlock;
+       }
+
+       if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
+           (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
+               evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
+                                     GFP_KERNEL);
+               if (!evt_pending) {
+                       ret = -ENOMEM;
+                       goto done_unlock;
+               }
+               mutex_lock(&fparam->lock);
+               list_add_tail(&evt_pending->list, &fparam->faults);
+               mutex_unlock(&fparam->lock);
+       }
+
+       ret = fparam->handler(&evt->fault, fparam->data);
+       if (ret && evt_pending) {
+               mutex_lock(&fparam->lock);
+               list_del(&evt_pending->list);
+               mutex_unlock(&fparam->lock);
+               kfree(evt_pending);
+       }
+done_unlock:
+       mutex_unlock(&param->lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_report_device_fault);
+
+int iommu_page_response(struct device *dev,
+                       struct iommu_page_response *msg)
+{
+       bool pasid_valid;
+       int ret = -EINVAL;
+       struct iommu_fault_event *evt;
+       struct iommu_fault_page_request *prm;
+       struct iommu_param *param = dev->iommu_param;
+       struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+
+       if (!domain || !domain->ops->page_response)
+               return -ENODEV;
+
+       if (!param || !param->fault_param)
+               return -EINVAL;
+
+       if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
+           msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
+               return -EINVAL;
+
+       /* Only send response if there is a fault report pending */
+       mutex_lock(&param->fault_param->lock);
+       if (list_empty(&param->fault_param->faults)) {
+               dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
+               goto done_unlock;
+       }
+       /*
+        * Check if we have a matching page request pending to respond,
+        * otherwise return -EINVAL
+        */
+       list_for_each_entry(evt, &param->fault_param->faults, list) {
+               prm = &evt->fault.prm;
+               pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+
+               if ((pasid_valid && prm->pasid != msg->pasid) ||
+                   prm->grpid != msg->grpid)
+                       continue;
+
+               /* Sanitize the reply */
+               msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
+
+               ret = domain->ops->page_response(dev, evt, msg);
+               list_del(&evt->list);
+               kfree(evt);
+               break;
+       }
+
+done_unlock:
+       mutex_unlock(&param->fault_param->lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_page_response);
+
 /**
  * iommu_group_id - Return ID for a group
  * @group: the group to ID
@@ -1628,7 +1917,7 @@ EXPORT_SYMBOL_GPL(iommu_map);
 
 static size_t __iommu_unmap(struct iommu_domain *domain,
                            unsigned long iova, size_t size,
-                           bool sync)
+                           struct iommu_iotlb_gather *iotlb_gather)
 {
        const struct iommu_ops *ops = domain->ops;
        size_t unmapped_page, unmapped = 0;
@@ -1665,13 +1954,10 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
        while (unmapped < size) {
                size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
 
-               unmapped_page = ops->unmap(domain, iova, pgsize);
+               unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
                if (!unmapped_page)
                        break;
 
-               if (sync && ops->iotlb_range_add)
-                       ops->iotlb_range_add(domain, iova, pgsize);
-
                pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
                         iova, unmapped_page);
 
@@ -1679,9 +1965,6 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
                unmapped += unmapped_page;
        }
 
-       if (sync && ops->iotlb_sync)
-               ops->iotlb_sync(domain);
-
        trace_unmap(orig_iova, size, unmapped);
        return unmapped;
 }
@@ -1689,14 +1972,22 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
 size_t iommu_unmap(struct iommu_domain *domain,
                   unsigned long iova, size_t size)
 {
-       return __iommu_unmap(domain, iova, size, true);
+       struct iommu_iotlb_gather iotlb_gather;
+       size_t ret;
+
+       iommu_iotlb_gather_init(&iotlb_gather);
+       ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
+       iommu_tlb_sync(domain, &iotlb_gather);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_unmap);
 
 size_t iommu_unmap_fast(struct iommu_domain *domain,
-                       unsigned long iova, size_t size)
+                       unsigned long iova, size_t size,
+                       struct iommu_iotlb_gather *iotlb_gather)
 {
-       return __iommu_unmap(domain, iova, size, false);
+       return __iommu_unmap(domain, iova, size, iotlb_gather);
 }
 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
 
@@ -1895,24 +2186,22 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
        return region;
 }
 
-/* Request that a device is direct mapped by the IOMMU */
-int iommu_request_dm_for_dev(struct device *dev)
+static int
+request_default_domain_for_dev(struct device *dev, unsigned long type)
 {
-       struct iommu_domain *dm_domain;
+       struct iommu_domain *domain;
        struct iommu_group *group;
        int ret;
 
        /* Device must already be in a group before calling this function */
-       group = iommu_group_get_for_dev(dev);
-       if (IS_ERR(group))
-               return PTR_ERR(group);
+       group = iommu_group_get(dev);
+       if (!group)
+               return -EINVAL;
 
        mutex_lock(&group->mutex);
 
-       /* Check if the default domain is already direct mapped */
        ret = 0;
-       if (group->default_domain &&
-           group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
+       if (group->default_domain && group->default_domain->type == type)
                goto out;
 
        /* Don't change mappings of existing devices */
@@ -1920,25 +2209,27 @@ int iommu_request_dm_for_dev(struct device *dev)
        if (iommu_group_device_count(group) != 1)
                goto out;
 
-       /* Allocate a direct mapped domain */
        ret = -ENOMEM;
-       dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
-       if (!dm_domain)
+       domain = __iommu_domain_alloc(dev->bus, type);
+       if (!domain)
                goto out;
 
        /* Attach the device to the domain */
-       ret = __iommu_attach_group(dm_domain, group);
+       ret = __iommu_attach_group(domain, group);
        if (ret) {
-               iommu_domain_free(dm_domain);
+               iommu_domain_free(domain);
                goto out;
        }
 
-       /* Make the direct mapped domain the default for this group */
+       iommu_group_create_direct_mappings(group, dev);
+
+       /* Make the domain the default for this group */
        if (group->default_domain)
                iommu_domain_free(group->default_domain);
-       group->default_domain = dm_domain;
+       group->default_domain = domain;
 
-       dev_info(dev, "Using iommu direct mapping\n");
+       dev_info(dev, "Using iommu %s mapping\n",
+                type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
 
        ret = 0;
 out:
@@ -1948,6 +2239,40 @@ out:
        return ret;
 }
 
+/* Request that a device is direct mapped by the IOMMU */
+int iommu_request_dm_for_dev(struct device *dev)
+{
+       return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
+}
+
+/* Request that a device can't be direct mapped by the IOMMU */
+int iommu_request_dma_domain_for_dev(struct device *dev)
+{
+       return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
+}
+
+void iommu_set_default_passthrough(bool cmd_line)
+{
+       if (cmd_line)
+               iommu_set_cmd_line_dma_api();
+
+       iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
+}
+
+void iommu_set_default_translated(bool cmd_line)
+{
+       if (cmd_line)
+               iommu_set_cmd_line_dma_api();
+
+       iommu_def_domain_type = IOMMU_DOMAIN_DMA;
+}
+
+bool iommu_default_passthrough(void)
+{
+       return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
+}
+EXPORT_SYMBOL_GPL(iommu_default_passthrough);
+
 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
 {
        const struct iommu_ops *ops = NULL;