iommu/vt-d: Delegate the identity domain to upper layer
[linux-2.6-microblaze.git] / drivers / iommu / intel-iommu.c
index a209199..dc7d376 100644 (file)
@@ -350,6 +350,7 @@ static void domain_context_clear(struct intel_iommu *iommu,
                                 struct device *dev);
 static int domain_detach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu);
+static bool device_is_rmrr_locked(struct device *dev);
 
 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
 int dmar_disabled = 0;
@@ -357,6 +358,7 @@ int dmar_disabled = 0;
 int dmar_disabled = 1;
 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
 
+int intel_iommu_sm;
 int intel_iommu_enabled = 0;
 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
 
@@ -364,17 +366,12 @@ static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
-static int intel_iommu_sm;
 static int iommu_identity_mapping;
 
 #define IDENTMAP_ALL           1
 #define IDENTMAP_GFX           2
 #define IDENTMAP_AZALIA                4
 
-#define sm_supported(iommu)    (intel_iommu_sm && ecap_smts((iommu)->ecap))
-#define pasid_supported(iommu) (sm_supported(iommu) &&                 \
-                                ecap_pasid((iommu)->ecap))
-
 int intel_iommu_gfx_mapped;
 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 
@@ -1911,9 +1908,7 @@ static void domain_exit(struct dmar_domain *domain)
        struct page *freelist;
 
        /* Remove associated devices and clear attached or cached domains */
-       rcu_read_lock();
        domain_remove_dev_info(domain);
-       rcu_read_unlock();
 
        /* destroy iovas */
        put_iova_domain(&domain->iovad);
@@ -2814,7 +2809,9 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width);
 
 static int __init si_domain_init(int hw)
 {
-       int nid, ret;
+       struct dmar_rmrr_unit *rmrr;
+       struct device *dev;
+       int i, nid, ret;
 
        si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
        if (!si_domain)
@@ -2825,8 +2822,6 @@ static int __init si_domain_init(int hw)
                return -EFAULT;
        }
 
-       pr_debug("Identity mapping domain allocated\n");
-
        if (hw)
                return 0;
 
@@ -2842,6 +2837,31 @@ static int __init si_domain_init(int hw)
                }
        }
 
+       /*
+        * Normally we use DMA domains for devices which have RMRRs. But we
+        * loose this requirement for graphic and usb devices. Identity map
+        * the RMRRs for graphic and USB devices so that they could use the
+        * si_domain.
+        */
+       for_each_rmrr_units(rmrr) {
+               for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+                                         i, dev) {
+                       unsigned long long start = rmrr->base_address;
+                       unsigned long long end = rmrr->end_address;
+
+                       if (device_is_rmrr_locked(dev))
+                               continue;
+
+                       if (WARN_ON(end < start ||
+                                   end >> agaw_to_width(si_domain->agaw)))
+                               continue;
+
+                       ret = iommu_domain_identity_map(si_domain, start, end);
+                       if (ret)
+                               return ret;
+               }
+       }
+
        return 0;
 }
 
@@ -2849,9 +2869,6 @@ static int identity_mapping(struct device *dev)
 {
        struct device_domain_info *info;
 
-       if (likely(!iommu_identity_mapping))
-               return 0;
-
        info = dev->archdata.iommu;
        if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
                return (info->domain == si_domain);
@@ -2936,29 +2953,37 @@ static bool device_is_rmrr_locked(struct device *dev)
        return true;
 }
 
-static int iommu_should_identity_map(struct device *dev, int startup)
+/*
+ * Return the required default domain type for a specific device.
+ *
+ * @dev: the device in query
+ * @startup: true if this is during early boot
+ *
+ * Returns:
+ *  - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
+ *  - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
+ *  - 0: both identity and dynamic domains work for this device
+ */
+static int device_def_domain_type(struct device *dev, int startup)
 {
        if (dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(dev);
 
                if (device_is_rmrr_locked(dev))
-                       return 0;
+                       return IOMMU_DOMAIN_DMA;
 
                /*
                 * Prevent any device marked as untrusted from getting
                 * placed into the statically identity mapping domain.
                 */
                if (pdev->untrusted)
-                       return 0;
+                       return IOMMU_DOMAIN_DMA;
 
                if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
-                       return 1;
+                       return IOMMU_DOMAIN_IDENTITY;
 
                if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
-                       return 1;
-
-               if (!(iommu_identity_mapping & IDENTMAP_ALL))
-                       return 0;
+                       return IOMMU_DOMAIN_IDENTITY;
 
                /*
                 * We want to start off with all devices in the 1:1 domain, and
@@ -2979,14 +3004,14 @@ static int iommu_should_identity_map(struct device *dev, int startup)
                 */
                if (!pci_is_pcie(pdev)) {
                        if (!pci_is_root_bus(pdev->bus))
-                               return 0;
+                               return IOMMU_DOMAIN_DMA;
                        if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
-                               return 0;
+                               return IOMMU_DOMAIN_DMA;
                } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
-                       return 0;
+                       return IOMMU_DOMAIN_DMA;
        } else {
                if (device_has_rmrr(dev))
-                       return 0;
+                       return IOMMU_DOMAIN_DMA;
        }
 
        /*
@@ -3008,7 +3033,13 @@ static int iommu_should_identity_map(struct device *dev, int startup)
                return dma_mask >= dma_get_required_mask(dev);
        }
 
-       return 1;
+       return (iommu_identity_mapping & IDENTMAP_ALL) ?
+                       IOMMU_DOMAIN_IDENTITY : 0;
+}
+
+static inline int iommu_should_identity_map(struct device *dev, int startup)
+{
+       return device_def_domain_type(dev, startup) == IOMMU_DOMAIN_IDENTITY;
 }
 
 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
@@ -3423,11 +3454,9 @@ static int __init init_dmars(void)
 
        check_tylersburg_isoch();
 
-       if (iommu_identity_mapping) {
-               ret = si_domain_init(hw_pass_through);
-               if (ret)
-                       goto free_iommu;
-       }
+       ret = si_domain_init(hw_pass_through);
+       if (ret)
+               goto free_iommu;
 
 
        /*
@@ -3516,11 +3545,6 @@ domains_done:
                ret = dmar_set_interrupt(iommu);
                if (ret)
                        goto free_iommu;
-
-               if (!translation_pre_enabled(iommu))
-                       iommu_enable_translation(iommu);
-
-               iommu_disable_protect_mem_regions(iommu);
        }
 
        return 0;
@@ -3625,9 +3649,6 @@ static bool iommu_need_mapping(struct device *dev)
        if (iommu_dummy(dev))
                return false;
 
-       if (!iommu_identity_mapping)
-               return true;
-
        found = identity_mapping(dev);
        if (found) {
                if (iommu_should_identity_map(dev, 0))
@@ -4908,7 +4929,6 @@ int __init intel_iommu_init(void)
                goto out_free_reserved_range;
        }
        up_write(&dmar_global_lock);
-       pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
 
 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
        swiotlb = 0;
@@ -4931,6 +4951,16 @@ int __init intel_iommu_init(void)
                register_memory_notifier(&intel_iommu_memory_nb);
        cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
                          intel_iommu_cpu_dead);
+
+       /* Finally, we enable the DMA remapping hardware. */
+       for_each_iommu(iommu, drhd) {
+               if (!translation_pre_enabled(iommu))
+                       iommu_enable_translation(iommu);
+
+               iommu_disable_protect_mem_regions(iommu);
+       }
+       pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
+
        intel_iommu_enabled = 1;
        intel_iommu_debugfs_init();
 
@@ -5039,32 +5069,40 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
        struct dmar_domain *dmar_domain;
        struct iommu_domain *domain;
 
-       if (type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
+       switch (type) {
+       case IOMMU_DOMAIN_UNMANAGED:
+               dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
+               if (!dmar_domain) {
+                       pr_err("Can't allocate dmar_domain\n");
+                       return NULL;
+               }
+               if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+                       pr_err("Domain initialization failed\n");
+                       domain_exit(dmar_domain);
+                       return NULL;
+               }
+               domain_update_iommu_cap(dmar_domain);
 
-       dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
-       if (!dmar_domain) {
-               pr_err("Can't allocate dmar_domain\n");
-               return NULL;
-       }
-       if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
-               pr_err("Domain initialization failed\n");
-               domain_exit(dmar_domain);
+               domain = &dmar_domain->domain;
+               domain->geometry.aperture_start = 0;
+               domain->geometry.aperture_end   =
+                               __DOMAIN_MAX_ADDR(dmar_domain->gaw);
+               domain->geometry.force_aperture = true;
+
+               return domain;
+       case IOMMU_DOMAIN_IDENTITY:
+               return &si_domain->domain;
+       default:
                return NULL;
        }
-       domain_update_iommu_cap(dmar_domain);
 
-       domain = &dmar_domain->domain;
-       domain->geometry.aperture_start = 0;
-       domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
-       domain->geometry.force_aperture = true;
-
-       return domain;
+       return NULL;
 }
 
 static void intel_iommu_domain_free(struct iommu_domain *domain)
 {
-       domain_exit(to_dmar_domain(domain));
+       if (domain != &si_domain->domain)
+               domain_exit(to_dmar_domain(domain));
 }
 
 /*
@@ -5254,9 +5292,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
 
                old_domain = find_domain(dev);
                if (old_domain) {
-                       rcu_read_lock();
                        dmar_remove_one_dev_info(dev);
-                       rcu_read_unlock();
 
                        if (!domain_type_is_vm_or_si(old_domain) &&
                            list_empty(&old_domain->devices))
@@ -5487,6 +5523,19 @@ static void intel_iommu_get_resv_regions(struct device *device,
        }
        rcu_read_unlock();
 
+#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
+       if (dev_is_pci(device)) {
+               struct pci_dev *pdev = to_pci_dev(device);
+
+               if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
+                       reg = iommu_alloc_resv_region(0, 1UL << 24, 0,
+                                                     IOMMU_RESV_DIRECT);
+                       if (reg)
+                               list_add_tail(&reg->list, head);
+               }
+       }
+#endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
+
        reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
                                      IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
                                      0, IOMMU_RESV_MSI);
@@ -5557,6 +5606,19 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
        return ret;
 }
 
+static void intel_iommu_apply_resv_region(struct device *dev,
+                                         struct iommu_domain *domain,
+                                         struct iommu_resv_region *region)
+{
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       unsigned long start, end;
+
+       start = IOVA_PFN(region->start);
+       end   = IOVA_PFN(region->start + region->length - 1);
+
+       WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
+}
+
 #ifdef CONFIG_INTEL_IOMMU_SVM
 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
 {
@@ -5722,6 +5784,7 @@ const struct iommu_ops intel_iommu_ops = {
        .remove_device          = intel_iommu_remove_device,
        .get_resv_regions       = intel_iommu_get_resv_regions,
        .put_resv_regions       = intel_iommu_put_resv_regions,
+       .apply_resv_region      = intel_iommu_apply_resv_region,
        .device_group           = pci_device_group,
        .dev_has_feat           = intel_iommu_dev_has_feat,
        .dev_feat_enabled       = intel_iommu_dev_feat_enabled,