iommu/vt-d: Delegate the identity domain to upper layer
[linux-2.6-microblaze.git] / drivers / iommu / intel-iommu.c
index 28cb713..dc7d376 100644 (file)
@@ -350,6 +350,7 @@ static void domain_context_clear(struct intel_iommu *iommu,
                                 struct device *dev);
 static int domain_detach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu);
+static bool device_is_rmrr_locked(struct device *dev);
 
 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
 int dmar_disabled = 0;
@@ -357,6 +358,7 @@ int dmar_disabled = 0;
 int dmar_disabled = 1;
 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
 
+int intel_iommu_sm;
 int intel_iommu_enabled = 0;
 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
 
@@ -364,17 +366,12 @@ static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
-static int intel_iommu_sm;
 static int iommu_identity_mapping;
 
 #define IDENTMAP_ALL           1
 #define IDENTMAP_GFX           2
 #define IDENTMAP_AZALIA                4
 
-#define sm_supported(iommu)    (intel_iommu_sm && ecap_smts((iommu)->ecap))
-#define pasid_supported(iommu) (sm_supported(iommu) &&                 \
-                                ecap_pasid((iommu)->ecap))
-
 int intel_iommu_gfx_mapped;
 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 
@@ -1391,7 +1388,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
 
                /* pdev will be returned if device is not a vf */
                pf_pdev = pci_physfn(pdev);
-               info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
+               info->pfsid = pci_dev_id(pf_pdev);
        }
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
@@ -1911,9 +1908,7 @@ static void domain_exit(struct dmar_domain *domain)
        struct page *freelist;
 
        /* Remove associated devices and clear attached or cached domains */
-       rcu_read_lock();
        domain_remove_dev_info(domain);
-       rcu_read_unlock();
 
        /* destroy iovas */
        put_iova_domain(&domain->iovad);
@@ -2341,32 +2336,33 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 }
 
 static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
-                         struct scatterlist *sg, unsigned long phys_pfn,
-                         unsigned long nr_pages, int prot)
-{
-       int ret;
-       struct intel_iommu *iommu;
-
-       /* Do the real mapping first */
-       ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
-       if (ret)
-               return ret;
-
-       /* Notify about the new mapping */
-       if (domain_type_is_vm(domain)) {
-              /* VM typed domains can have more than one IOMMUs */
-              int iommu_id;
-              for_each_domain_iommu(iommu_id, domain) {
-                      iommu = g_iommus[iommu_id];
-                      __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
-              }
-       } else {
-              /* General domains only have one IOMMU */
-              iommu = domain_get_iommu(domain);
-              __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
-       }
+                         struct scatterlist *sg, unsigned long phys_pfn,
+                         unsigned long nr_pages, int prot)
+{
+       int ret;
+       struct intel_iommu *iommu;
 
-       return 0;
+       /* Do the real mapping first */
+       ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
+       if (ret)
+               return ret;
+
+       /* Notify about the new mapping */
+       if (domain_type_is_vm(domain)) {
+               /* VM typed domains can have more than one IOMMUs */
+               int iommu_id;
+
+               for_each_domain_iommu(iommu_id, domain) {
+                       iommu = g_iommus[iommu_id];
+                       __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+               }
+       } else {
+               /* General domains only have one IOMMU */
+               iommu = domain_get_iommu(domain);
+               __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+       }
+
+       return 0;
 }
 
 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
@@ -2485,6 +2481,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        info->domain = domain;
        info->iommu = iommu;
        info->pasid_table = NULL;
+       info->auxd_enabled = 0;
+       INIT_LIST_HEAD(&info->auxiliary_domains);
 
        if (dev && dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -2811,7 +2809,9 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width);
 
 static int __init si_domain_init(int hw)
 {
-       int nid, ret;
+       struct dmar_rmrr_unit *rmrr;
+       struct device *dev;
+       int i, nid, ret;
 
        si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
        if (!si_domain)
@@ -2822,8 +2822,6 @@ static int __init si_domain_init(int hw)
                return -EFAULT;
        }
 
-       pr_debug("Identity mapping domain allocated\n");
-
        if (hw)
                return 0;
 
@@ -2839,6 +2837,31 @@ static int __init si_domain_init(int hw)
                }
        }
 
+       /*
+        * Normally we use DMA domains for devices which have RMRRs. But we
+        * loose this requirement for graphic and usb devices. Identity map
+        * the RMRRs for graphic and USB devices so that they could use the
+        * si_domain.
+        */
+       for_each_rmrr_units(rmrr) {
+               for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+                                         i, dev) {
+                       unsigned long long start = rmrr->base_address;
+                       unsigned long long end = rmrr->end_address;
+
+                       if (device_is_rmrr_locked(dev))
+                               continue;
+
+                       if (WARN_ON(end < start ||
+                                   end >> agaw_to_width(si_domain->agaw)))
+                               continue;
+
+                       ret = iommu_domain_identity_map(si_domain, start, end);
+                       if (ret)
+                               return ret;
+               }
+       }
+
        return 0;
 }
 
@@ -2846,9 +2869,6 @@ static int identity_mapping(struct device *dev)
 {
        struct device_domain_info *info;
 
-       if (likely(!iommu_identity_mapping))
-               return 0;
-
        info = dev->archdata.iommu;
        if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
                return (info->domain == si_domain);
@@ -2933,29 +2953,37 @@ static bool device_is_rmrr_locked(struct device *dev)
        return true;
 }
 
-static int iommu_should_identity_map(struct device *dev, int startup)
+/*
+ * Return the required default domain type for a specific device.
+ *
+ * @dev: the device in query
+ * @startup: true if this is during early boot
+ *
+ * Returns:
+ *  - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
+ *  - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
+ *  - 0: both identity and dynamic domains work for this device
+ */
+static int device_def_domain_type(struct device *dev, int startup)
 {
        if (dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(dev);
 
                if (device_is_rmrr_locked(dev))
-                       return 0;
+                       return IOMMU_DOMAIN_DMA;
 
                /*
                 * Prevent any device marked as untrusted from getting
                 * placed into the statically identity mapping domain.
                 */
                if (pdev->untrusted)
-                       return 0;
+                       return IOMMU_DOMAIN_DMA;
 
                if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
-                       return 1;
+                       return IOMMU_DOMAIN_IDENTITY;
 
                if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
-                       return 1;
-
-               if (!(iommu_identity_mapping & IDENTMAP_ALL))
-                       return 0;
+                       return IOMMU_DOMAIN_IDENTITY;
 
                /*
                 * We want to start off with all devices in the 1:1 domain, and
@@ -2976,14 +3004,14 @@ static int iommu_should_identity_map(struct device *dev, int startup)
                 */
                if (!pci_is_pcie(pdev)) {
                        if (!pci_is_root_bus(pdev->bus))
-                               return 0;
+                               return IOMMU_DOMAIN_DMA;
                        if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
-                               return 0;
+                               return IOMMU_DOMAIN_DMA;
                } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
-                       return 0;
+                       return IOMMU_DOMAIN_DMA;
        } else {
                if (device_has_rmrr(dev))
-                       return 0;
+                       return IOMMU_DOMAIN_DMA;
        }
 
        /*
@@ -3005,7 +3033,13 @@ static int iommu_should_identity_map(struct device *dev, int startup)
                return dma_mask >= dma_get_required_mask(dev);
        }
 
-       return 1;
+       return (iommu_identity_mapping & IDENTMAP_ALL) ?
+                       IOMMU_DOMAIN_IDENTITY : 0;
+}
+
+static inline int iommu_should_identity_map(struct device *dev, int startup)
+{
+       return device_def_domain_type(dev, startup) == IOMMU_DOMAIN_IDENTITY;
 }
 
 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
@@ -3412,16 +3446,17 @@ static int __init init_dmars(void)
                iommu_identity_mapping |= IDENTMAP_ALL;
 
 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
-       iommu_identity_mapping |= IDENTMAP_GFX;
+       dmar_map_gfx = 0;
 #endif
 
+       if (!dmar_map_gfx)
+               iommu_identity_mapping |= IDENTMAP_GFX;
+
        check_tylersburg_isoch();
 
-       if (iommu_identity_mapping) {
-               ret = si_domain_init(hw_pass_through);
-               if (ret)
-                       goto free_iommu;
-       }
+       ret = si_domain_init(hw_pass_through);
+       if (ret)
+               goto free_iommu;
 
 
        /*
@@ -3496,7 +3531,13 @@ domains_done:
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
                if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
+                       /*
+                        * Call dmar_alloc_hwirq() with dmar_global_lock held,
+                        * could cause possible lock race condition.
+                        */
+                       up_write(&dmar_global_lock);
                        ret = intel_svm_enable_prq(iommu);
+                       down_write(&dmar_global_lock);
                        if (ret)
                                goto free_iommu;
                }
@@ -3504,11 +3545,6 @@ domains_done:
                ret = dmar_set_interrupt(iommu);
                if (ret)
                        goto free_iommu;
-
-               if (!translation_pre_enabled(iommu))
-                       iommu_enable_translation(iommu);
-
-               iommu_disable_protect_mem_regions(iommu);
        }
 
        return 0;
@@ -3606,45 +3642,37 @@ out:
 }
 
 /* Check if the dev needs to go through non-identity map and unmap process.*/
-static int iommu_no_mapping(struct device *dev)
+static bool iommu_need_mapping(struct device *dev)
 {
        int found;
 
        if (iommu_dummy(dev))
-               return 1;
-
-       if (!iommu_identity_mapping)
-               return 0;
+               return false;
 
        found = identity_mapping(dev);
        if (found) {
                if (iommu_should_identity_map(dev, 0))
-                       return 1;
-               else {
-                       /*
-                        * 32 bit DMA is removed from si_domain and fall back
-                        * to non-identity mapping.
-                        */
-                       dmar_remove_one_dev_info(dev);
-                       dev_info(dev, "32bit DMA uses non-identity mapping\n");
-                       return 0;
-               }
+                       return false;
+
+               /*
+                * 32 bit DMA is removed from si_domain and fall back to
+                * non-identity mapping.
+                */
+               dmar_remove_one_dev_info(dev);
+               dev_info(dev, "32bit DMA uses non-identity mapping\n");
        } else {
                /*
                 * In case of a detached 64 bit DMA device from vm, the device
                 * is put into si_domain for identity mapping.
                 */
-               if (iommu_should_identity_map(dev, 0)) {
-                       int ret;
-                       ret = domain_add_dev_info(si_domain, dev);
-                       if (!ret) {
-                               dev_info(dev, "64bit DMA uses identity mapping\n");
-                               return 1;
-                       }
+               if (iommu_should_identity_map(dev, 0) &&
+                   !domain_add_dev_info(si_domain, dev)) {
+                       dev_info(dev, "64bit DMA uses identity mapping\n");
+                       return false;
                }
        }
 
-       return 0;
+       return true;
 }
 
 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
@@ -3660,9 +3688,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
 
        BUG_ON(dir == DMA_NONE);
 
-       if (iommu_no_mapping(dev))
-               return paddr;
-
        domain = get_valid_domain_for_dev(dev);
        if (!domain)
                return DMA_MAPPING_ERROR;
@@ -3711,15 +3736,20 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
                                 enum dma_data_direction dir,
                                 unsigned long attrs)
 {
-       return __intel_map_single(dev, page_to_phys(page) + offset, size,
-                                 dir, *dev->dma_mask);
+       if (iommu_need_mapping(dev))
+               return __intel_map_single(dev, page_to_phys(page) + offset,
+                               size, dir, *dev->dma_mask);
+       return dma_direct_map_page(dev, page, offset, size, dir, attrs);
 }
 
 static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
                                     size_t size, enum dma_data_direction dir,
                                     unsigned long attrs)
 {
-       return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
+       if (iommu_need_mapping(dev))
+               return __intel_map_single(dev, phys_addr, size, dir,
+                               *dev->dma_mask);
+       return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
 }
 
 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3730,9 +3760,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
        unsigned long iova_pfn;
        struct intel_iommu *iommu;
        struct page *freelist;
-
-       if (iommu_no_mapping(dev))
-               return;
+       struct pci_dev *pdev = NULL;
 
        domain = find_domain(dev);
        BUG_ON(!domain);
@@ -3745,11 +3773,14 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
        start_pfn = mm_to_dma_pfn(iova_pfn);
        last_pfn = start_pfn + nrpages - 1;
 
+       if (dev_is_pci(dev))
+               pdev = to_pci_dev(dev);
+
        dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
 
        freelist = domain_unmap(domain, start_pfn, last_pfn);
 
-       if (intel_iommu_strict) {
+       if (intel_iommu_strict || (pdev && pdev->untrusted)) {
                iommu_flush_iotlb_psi(iommu, domain, start_pfn,
                                      nrpages, !freelist, 0);
                /* free iova */
@@ -3769,7 +3800,17 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
                             size_t size, enum dma_data_direction dir,
                             unsigned long attrs)
 {
-       intel_unmap(dev, dev_addr, size);
+       if (iommu_need_mapping(dev))
+               intel_unmap(dev, dev_addr, size);
+       else
+               dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
+}
+
+static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+       if (iommu_need_mapping(dev))
+               intel_unmap(dev, dev_addr, size);
 }
 
 static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3779,28 +3820,17 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
        struct page *page = NULL;
        int order;
 
+       if (!iommu_need_mapping(dev))
+               return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
+
        size = PAGE_ALIGN(size);
        order = get_order(size);
 
-       if (!iommu_no_mapping(dev))
-               flags &= ~(GFP_DMA | GFP_DMA32);
-       else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
-               if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
-                       flags |= GFP_DMA;
-               else
-                       flags |= GFP_DMA32;
-       }
-
        if (gfpflags_allow_blocking(flags)) {
                unsigned int count = size >> PAGE_SHIFT;
 
                page = dma_alloc_from_contiguous(dev, count, order,
                                                 flags & __GFP_NOWARN);
-               if (page && iommu_no_mapping(dev) &&
-                   page_to_phys(page) + size > dev->coherent_dma_mask) {
-                       dma_release_from_contiguous(dev, page, count);
-                       page = NULL;
-               }
        }
 
        if (!page)
@@ -3826,6 +3856,9 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
        int order;
        struct page *page = virt_to_page(vaddr);
 
+       if (!iommu_need_mapping(dev))
+               return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+
        size = PAGE_ALIGN(size);
        order = get_order(size);
 
@@ -3843,6 +3876,9 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
        struct scatterlist *sg;
        int i;
 
+       if (!iommu_need_mapping(dev))
+               return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
+
        for_each_sg(sglist, sg, nelems, i) {
                nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
        }
@@ -3850,20 +3886,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
        intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
 }
 
-static int intel_nontranslate_map_sg(struct device *hddev,
-       struct scatterlist *sglist, int nelems, int dir)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nelems, i) {
-               BUG_ON(!sg_page(sg));
-               sg->dma_address = sg_phys(sg);
-               sg->dma_length = sg->length;
-       }
-       return nelems;
-}
-
 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
                        enum dma_data_direction dir, unsigned long attrs)
 {
@@ -3878,8 +3900,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
        struct intel_iommu *iommu;
 
        BUG_ON(dir == DMA_NONE);
-       if (iommu_no_mapping(dev))
-               return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
+       if (!iommu_need_mapping(dev))
+               return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
 
        domain = get_valid_domain_for_dev(dev);
        if (!domain)
@@ -3929,7 +3951,7 @@ static const struct dma_map_ops intel_dma_ops = {
        .map_page = intel_map_page,
        .unmap_page = intel_unmap_page,
        .map_resource = intel_map_resource,
-       .unmap_resource = intel_unmap_page,
+       .unmap_resource = intel_unmap_resource,
        .dma_supported = dma_direct_supported,
 };
 
@@ -4055,9 +4077,7 @@ static void __init init_no_remapping_devices(void)
 
                /* This IOMMU has *only* gfx devices. Either bypass it or
                   set the gfx_mapped flag, as appropriate */
-               if (dmar_map_gfx) {
-                       intel_iommu_gfx_mapped = 1;
-               } else {
+               if (!dmar_map_gfx) {
                        drhd->ignored = 1;
                        for_each_active_dev_scope(drhd->devices,
                                                  drhd->devices_cnt, i, dev)
@@ -4086,7 +4106,7 @@ static int init_iommu_hw(void)
                                iommu_disable_protect_mem_regions(iommu);
                        continue;
                }
-       
+
                iommu_flush_write_buffer(iommu);
 
                iommu_set_root_entry(iommu);
@@ -4896,6 +4916,9 @@ int __init intel_iommu_init(void)
                goto out_free_reserved_range;
        }
 
+       if (dmar_map_gfx)
+               intel_iommu_gfx_mapped = 1;
+
        init_no_remapping_devices();
 
        ret = init_dmars();
@@ -4906,7 +4929,6 @@ int __init intel_iommu_init(void)
                goto out_free_reserved_range;
        }
        up_write(&dmar_global_lock);
-       pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
 
 #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
        swiotlb = 0;
@@ -4929,6 +4951,16 @@ int __init intel_iommu_init(void)
                register_memory_notifier(&intel_iommu_memory_nb);
        cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
                          intel_iommu_cpu_dead);
+
+       /* Finally, we enable the DMA remapping hardware. */
+       for_each_iommu(iommu, drhd) {
+               if (!translation_pre_enabled(iommu))
+                       iommu_enable_translation(iommu);
+
+               iommu_disable_protect_mem_regions(iommu);
+       }
+       pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
+
        intel_iommu_enabled = 1;
        intel_iommu_debugfs_init();
 
@@ -5037,63 +5069,175 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
        struct dmar_domain *dmar_domain;
        struct iommu_domain *domain;
 
-       if (type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
+       switch (type) {
+       case IOMMU_DOMAIN_UNMANAGED:
+               dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
+               if (!dmar_domain) {
+                       pr_err("Can't allocate dmar_domain\n");
+                       return NULL;
+               }
+               if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+                       pr_err("Domain initialization failed\n");
+                       domain_exit(dmar_domain);
+                       return NULL;
+               }
+               domain_update_iommu_cap(dmar_domain);
 
-       dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
-       if (!dmar_domain) {
-               pr_err("Can't allocate dmar_domain\n");
-               return NULL;
-       }
-       if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
-               pr_err("Domain initialization failed\n");
-               domain_exit(dmar_domain);
+               domain = &dmar_domain->domain;
+               domain->geometry.aperture_start = 0;
+               domain->geometry.aperture_end   =
+                               __DOMAIN_MAX_ADDR(dmar_domain->gaw);
+               domain->geometry.force_aperture = true;
+
+               return domain;
+       case IOMMU_DOMAIN_IDENTITY:
+               return &si_domain->domain;
+       default:
                return NULL;
        }
-       domain_update_iommu_cap(dmar_domain);
-
-       domain = &dmar_domain->domain;
-       domain->geometry.aperture_start = 0;
-       domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
-       domain->geometry.force_aperture = true;
 
-       return domain;
+       return NULL;
 }
 
 static void intel_iommu_domain_free(struct iommu_domain *domain)
 {
-       domain_exit(to_dmar_domain(domain));
+       if (domain != &si_domain->domain)
+               domain_exit(to_dmar_domain(domain));
 }
 
-static int intel_iommu_attach_device(struct iommu_domain *domain,
-                                    struct device *dev)
+/*
+ * Check whether a @domain could be attached to the @dev through the
+ * aux-domain attach/detach APIs.
+ */
+static inline bool
+is_aux_domain(struct device *dev, struct iommu_domain *domain)
 {
-       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
-       struct intel_iommu *iommu;
-       int addr_width;
-       u8 bus, devfn;
+       struct device_domain_info *info = dev->archdata.iommu;
 
-       if (device_is_rmrr_locked(dev)) {
-               dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
-               return -EPERM;
-       }
+       return info && info->auxd_enabled &&
+                       domain->type == IOMMU_DOMAIN_UNMANAGED;
+}
 
-       /* normally dev is not mapped */
-       if (unlikely(domain_context_mapped(dev))) {
-               struct dmar_domain *old_domain;
+static void auxiliary_link_device(struct dmar_domain *domain,
+                                 struct device *dev)
+{
+       struct device_domain_info *info = dev->archdata.iommu;
 
-               old_domain = find_domain(dev);
-               if (old_domain) {
-                       rcu_read_lock();
-                       dmar_remove_one_dev_info(dev);
-                       rcu_read_unlock();
+       assert_spin_locked(&device_domain_lock);
+       if (WARN_ON(!info))
+               return;
 
-                       if (!domain_type_is_vm_or_si(old_domain) &&
-                            list_empty(&old_domain->devices))
-                               domain_exit(old_domain);
+       domain->auxd_refcnt++;
+       list_add(&domain->auxd, &info->auxiliary_domains);
+}
+
+static void auxiliary_unlink_device(struct dmar_domain *domain,
+                                   struct device *dev)
+{
+       struct device_domain_info *info = dev->archdata.iommu;
+
+       assert_spin_locked(&device_domain_lock);
+       if (WARN_ON(!info))
+               return;
+
+       list_del(&domain->auxd);
+       domain->auxd_refcnt--;
+
+       if (!domain->auxd_refcnt && domain->default_pasid > 0)
+               intel_pasid_free_id(domain->default_pasid);
+}
+
+static int aux_domain_add_dev(struct dmar_domain *domain,
+                             struct device *dev)
+{
+       int ret;
+       u8 bus, devfn;
+       unsigned long flags;
+       struct intel_iommu *iommu;
+
+       iommu = device_to_iommu(dev, &bus, &devfn);
+       if (!iommu)
+               return -ENODEV;
+
+       if (domain->default_pasid <= 0) {
+               int pasid;
+
+               pasid = intel_pasid_alloc_id(domain, PASID_MIN,
+                                            pci_max_pasids(to_pci_dev(dev)),
+                                            GFP_KERNEL);
+               if (pasid <= 0) {
+                       pr_err("Can't allocate default pasid\n");
+                       return -ENODEV;
                }
+               domain->default_pasid = pasid;
        }
 
+       spin_lock_irqsave(&device_domain_lock, flags);
+       /*
+        * iommu->lock must be held to attach domain to iommu and setup the
+        * pasid entry for second level translation.
+        */
+       spin_lock(&iommu->lock);
+       ret = domain_attach_iommu(domain, iommu);
+       if (ret)
+               goto attach_failed;
+
+       /* Setup the PASID entry for mediated devices: */
+       ret = intel_pasid_setup_second_level(iommu, domain, dev,
+                                            domain->default_pasid);
+       if (ret)
+               goto table_failed;
+       spin_unlock(&iommu->lock);
+
+       auxiliary_link_device(domain, dev);
+
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       return 0;
+
+table_failed:
+       domain_detach_iommu(domain, iommu);
+attach_failed:
+       spin_unlock(&iommu->lock);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+       if (!domain->auxd_refcnt && domain->default_pasid > 0)
+               intel_pasid_free_id(domain->default_pasid);
+
+       return ret;
+}
+
+static void aux_domain_remove_dev(struct dmar_domain *domain,
+                                 struct device *dev)
+{
+       struct device_domain_info *info;
+       struct intel_iommu *iommu;
+       unsigned long flags;
+
+       if (!is_aux_domain(dev, &domain->domain))
+               return;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       info = dev->archdata.iommu;
+       iommu = info->iommu;
+
+       auxiliary_unlink_device(domain, dev);
+
+       spin_lock(&iommu->lock);
+       intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
+       domain_detach_iommu(domain, iommu);
+       spin_unlock(&iommu->lock);
+
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static int prepare_domain_attach_device(struct iommu_domain *domain,
+                                       struct device *dev)
+{
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       struct intel_iommu *iommu;
+       int addr_width;
+       u8 bus, devfn;
+
        iommu = device_to_iommu(dev, &bus, &devfn);
        if (!iommu)
                return -ENODEV;
@@ -5126,7 +5270,56 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
                dmar_domain->agaw--;
        }
 
-       return domain_add_dev_info(dmar_domain, dev);
+       return 0;
+}
+
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+                                    struct device *dev)
+{
+       int ret;
+
+       if (device_is_rmrr_locked(dev)) {
+               dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
+               return -EPERM;
+       }
+
+       if (is_aux_domain(dev, domain))
+               return -EPERM;
+
+       /* normally dev is not mapped */
+       if (unlikely(domain_context_mapped(dev))) {
+               struct dmar_domain *old_domain;
+
+               old_domain = find_domain(dev);
+               if (old_domain) {
+                       dmar_remove_one_dev_info(dev);
+
+                       if (!domain_type_is_vm_or_si(old_domain) &&
+                           list_empty(&old_domain->devices))
+                               domain_exit(old_domain);
+               }
+       }
+
+       ret = prepare_domain_attach_device(domain, dev);
+       if (ret)
+               return ret;
+
+       return domain_add_dev_info(to_dmar_domain(domain), dev);
+}
+
+static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
+                                        struct device *dev)
+{
+       int ret;
+
+       if (!is_aux_domain(dev, domain))
+               return -EPERM;
+
+       ret = prepare_domain_attach_device(domain, dev);
+       if (ret)
+               return ret;
+
+       return aux_domain_add_dev(to_dmar_domain(domain), dev);
 }
 
 static void intel_iommu_detach_device(struct iommu_domain *domain,
@@ -5135,6 +5328,12 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
        dmar_remove_one_dev_info(dev);
 }
 
+static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
+                                         struct device *dev)
+{
+       aux_domain_remove_dev(to_dmar_domain(domain), dev);
+}
+
 static int intel_iommu_map(struct iommu_domain *domain,
                           unsigned long iova, phys_addr_t hpa,
                           size_t size, int iommu_prot)
@@ -5223,6 +5422,42 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
        return phys;
 }
 
+static inline bool scalable_mode_support(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       bool ret = true;
+
+       rcu_read_lock();
+       for_each_active_iommu(iommu, drhd) {
+               if (!sm_supported(iommu)) {
+                       ret = false;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
+static inline bool iommu_pasid_support(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       bool ret = true;
+
+       rcu_read_lock();
+       for_each_active_iommu(iommu, drhd) {
+               if (!pasid_supported(iommu)) {
+                       ret = false;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static bool intel_iommu_capable(enum iommu_cap cap)
 {
        if (cap == IOMMU_CAP_CACHE_COHERENCY)
@@ -5288,6 +5523,19 @@ static void intel_iommu_get_resv_regions(struct device *device,
        }
        rcu_read_unlock();
 
+#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
+       if (dev_is_pci(device)) {
+               struct pci_dev *pdev = to_pci_dev(device);
+
+               if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
+                       reg = iommu_alloc_resv_region(0, 1UL << 24, 0,
+                                                     IOMMU_RESV_DIRECT);
+                       if (reg)
+                               list_add_tail(&reg->list, head);
+               }
+       }
+#endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
+
        reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
                                      IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
                                      0, IOMMU_RESV_MSI);
@@ -5307,8 +5555,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
        }
 }
 
-#ifdef CONFIG_INTEL_IOMMU_SVM
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
+int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
 {
        struct device_domain_info *info;
        struct context_entry *context;
@@ -5317,7 +5564,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
        u64 ctx_lo;
        int ret;
 
-       domain = get_valid_domain_for_dev(sdev->dev);
+       domain = get_valid_domain_for_dev(dev);
        if (!domain)
                return -EINVAL;
 
@@ -5325,7 +5572,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
        spin_lock(&iommu->lock);
 
        ret = -EINVAL;
-       info = sdev->dev->archdata.iommu;
+       info = dev->archdata.iommu;
        if (!info || !info->pasid_supported)
                goto out;
 
@@ -5335,14 +5582,13 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
 
        ctx_lo = context[0].lo;
 
-       sdev->did = FLPT_DEFAULT_DID;
-       sdev->sid = PCI_DEVID(info->bus, info->devfn);
-
        if (!(ctx_lo & CONTEXT_PASIDE)) {
                ctx_lo |= CONTEXT_PASIDE;
                context[0].lo = ctx_lo;
                wmb();
-               iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
+               iommu->flush.flush_context(iommu,
+                                          domain->iommu_did[iommu->seq_id],
+                                          PCI_DEVID(info->bus, info->devfn),
                                           DMA_CCMD_MASK_NOBIT,
                                           DMA_CCMD_DEVICE_INVL);
        }
@@ -5351,12 +5597,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
        if (!info->pasid_enabled)
                iommu_enable_dev_iotlb(info);
 
-       if (info->ats_enabled) {
-               sdev->dev_iotlb = 1;
-               sdev->qdep = info->ats_qdep;
-               if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
-                       sdev->qdep = 0;
-       }
        ret = 0;
 
  out:
@@ -5366,6 +5606,20 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
        return ret;
 }
 
+static void intel_iommu_apply_resv_region(struct device *dev,
+                                         struct iommu_domain *domain,
+                                         struct iommu_resv_region *region)
+{
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       unsigned long start, end;
+
+       start = IOVA_PFN(region->start);
+       end   = IOVA_PFN(region->start + region->length - 1);
+
+       WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
+}
+
+#ifdef CONFIG_INTEL_IOMMU_SVM
 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
 {
        struct intel_iommu *iommu;
@@ -5387,12 +5641,142 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
 }
 #endif /* CONFIG_INTEL_IOMMU_SVM */
 
+static int intel_iommu_enable_auxd(struct device *dev)
+{
+       struct device_domain_info *info;
+       struct intel_iommu *iommu;
+       unsigned long flags;
+       u8 bus, devfn;
+       int ret;
+
+       iommu = device_to_iommu(dev, &bus, &devfn);
+       if (!iommu || dmar_disabled)
+               return -EINVAL;
+
+       if (!sm_supported(iommu) || !pasid_supported(iommu))
+               return -EINVAL;
+
+       ret = intel_iommu_enable_pasid(iommu, dev);
+       if (ret)
+               return -ENODEV;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       info = dev->archdata.iommu;
+       info->auxd_enabled = 1;
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       return 0;
+}
+
+static int intel_iommu_disable_auxd(struct device *dev)
+{
+       struct device_domain_info *info;
+       unsigned long flags;
+
+       spin_lock_irqsave(&device_domain_lock, flags);
+       info = dev->archdata.iommu;
+       if (!WARN_ON(!info))
+               info->auxd_enabled = 0;
+       spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       return 0;
+}
+
+/*
+ * A PCI express designated vendor specific extended capability is defined
+ * in the section 3.7 of Intel scalable I/O virtualization technical spec
+ * for system software and tools to detect endpoint devices supporting the
+ * Intel scalable IO virtualization without host driver dependency.
+ *
+ * Returns the address of the matching extended capability structure within
+ * the device's PCI configuration space or 0 if the device does not support
+ * it.
+ */
+static int siov_find_pci_dvsec(struct pci_dev *pdev)
+{
+       int pos;
+       u16 vendor, id;
+
+       pos = pci_find_next_ext_capability(pdev, 0, 0x23);
+       while (pos) {
+               pci_read_config_word(pdev, pos + 4, &vendor);
+               pci_read_config_word(pdev, pos + 8, &id);
+               if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
+                       return pos;
+
+               pos = pci_find_next_ext_capability(pdev, pos, 0x23);
+       }
+
+       return 0;
+}
+
+static bool
+intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
+{
+       if (feat == IOMMU_DEV_FEAT_AUX) {
+               int ret;
+
+               if (!dev_is_pci(dev) || dmar_disabled ||
+                   !scalable_mode_support() || !iommu_pasid_support())
+                       return false;
+
+               ret = pci_pasid_features(to_pci_dev(dev));
+               if (ret < 0)
+                       return false;
+
+               return !!siov_find_pci_dvsec(to_pci_dev(dev));
+       }
+
+       return false;
+}
+
+static int
+intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
+{
+       if (feat == IOMMU_DEV_FEAT_AUX)
+               return intel_iommu_enable_auxd(dev);
+
+       return -ENODEV;
+}
+
+static int
+intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
+{
+       if (feat == IOMMU_DEV_FEAT_AUX)
+               return intel_iommu_disable_auxd(dev);
+
+       return -ENODEV;
+}
+
+static bool
+intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
+{
+       struct device_domain_info *info = dev->archdata.iommu;
+
+       if (feat == IOMMU_DEV_FEAT_AUX)
+               return scalable_mode_support() && info && info->auxd_enabled;
+
+       return false;
+}
+
+static int
+intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+{
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+
+       return dmar_domain->default_pasid > 0 ?
+                       dmar_domain->default_pasid : -EINVAL;
+}
+
 const struct iommu_ops intel_iommu_ops = {
        .capable                = intel_iommu_capable,
        .domain_alloc           = intel_iommu_domain_alloc,
        .domain_free            = intel_iommu_domain_free,
        .attach_dev             = intel_iommu_attach_device,
        .detach_dev             = intel_iommu_detach_device,
+       .aux_attach_dev         = intel_iommu_aux_attach_device,
+       .aux_detach_dev         = intel_iommu_aux_detach_device,
+       .aux_get_pasid          = intel_iommu_aux_get_pasid,
        .map                    = intel_iommu_map,
        .unmap                  = intel_iommu_unmap,
        .iova_to_phys           = intel_iommu_iova_to_phys,
@@ -5400,7 +5784,12 @@ const struct iommu_ops intel_iommu_ops = {
        .remove_device          = intel_iommu_remove_device,
        .get_resv_regions       = intel_iommu_get_resv_regions,
        .put_resv_regions       = intel_iommu_put_resv_regions,
+       .apply_resv_region      = intel_iommu_apply_resv_region,
        .device_group           = pci_device_group,
+       .dev_has_feat           = intel_iommu_dev_has_feat,
+       .dev_feat_enabled       = intel_iommu_dev_feat_enabled,
+       .dev_enable_feat        = intel_iommu_dev_enable_feat,
+       .dev_disable_feat       = intel_iommu_dev_disable_feat,
        .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
 };