Merge branches 'arm/exynos', 'arm/renesas', 'arm/rockchip', 'arm/omap', 'arm/mediatek...
[linux-2.6-microblaze.git] / drivers / iommu / intel-iommu.c
index d5e8b86..2be8e23 100644 (file)
@@ -949,20 +949,6 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
        return ret;
 }
 
-static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
-{
-       struct context_entry *context;
-       unsigned long flags;
-
-       spin_lock_irqsave(&iommu->lock, flags);
-       context = iommu_context_addr(iommu, bus, devfn, 0);
-       if (context) {
-               context_clear_entry(context);
-               __iommu_flush_cache(iommu, context, sizeof(*context));
-       }
-       spin_unlock_irqrestore(&iommu->lock, flags);
-}
-
 static void free_context_table(struct intel_iommu *iommu)
 {
        int i;
@@ -1112,8 +1098,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
 }
 
 static void dma_pte_free_level(struct dmar_domain *domain, int level,
-                              struct dma_pte *pte, unsigned long pfn,
-                              unsigned long start_pfn, unsigned long last_pfn)
+                              int retain_level, struct dma_pte *pte,
+                              unsigned long pfn, unsigned long start_pfn,
+                              unsigned long last_pfn)
 {
        pfn = max(start_pfn, pfn);
        pte = &pte[pfn_level_offset(pfn, level)];
@@ -1128,12 +1115,17 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
                level_pfn = pfn & level_mask(level);
                level_pte = phys_to_virt(dma_pte_addr(pte));
 
-               if (level > 2)
-                       dma_pte_free_level(domain, level - 1, level_pte,
-                                          level_pfn, start_pfn, last_pfn);
+               if (level > 2) {
+                       dma_pte_free_level(domain, level - 1, retain_level,
+                                          level_pte, level_pfn, start_pfn,
+                                          last_pfn);
+               }
 
-               /* If range covers entire pagetable, free it */
-               if (!(start_pfn > level_pfn ||
+               /*
+                * Free the page table if we're below the level we want to
+                * retain and the range covers the entire table.
+                */
+               if (level < retain_level && !(start_pfn > level_pfn ||
                      last_pfn < level_pfn + level_size(level) - 1)) {
                        dma_clear_pte(pte);
                        domain_flush_cache(domain, pte, sizeof(*pte));
@@ -1144,10 +1136,14 @@ next:
        } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
 }
 
-/* clear last level (leaf) ptes and free page table pages. */
+/*
+ * clear last level (leaf) ptes and free page table pages below the
+ * level we wish to keep intact.
+ */
 static void dma_pte_free_pagetable(struct dmar_domain *domain,
                                   unsigned long start_pfn,
-                                  unsigned long last_pfn)
+                                  unsigned long last_pfn,
+                                  int retain_level)
 {
        BUG_ON(!domain_pfn_supported(domain, start_pfn));
        BUG_ON(!domain_pfn_supported(domain, last_pfn));
@@ -1156,7 +1152,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
        dma_pte_clear_range(domain, start_pfn, last_pfn);
 
        /* We don't need lock here; nobody else touches the iova range */
-       dma_pte_free_level(domain, agaw_to_level(domain->agaw),
+       dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
                           domain->pgd, 0, start_pfn, last_pfn);
 
        /* free pgd */
@@ -2277,8 +2273,11 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                                /*
                                 * Ensure that old small page tables are
                                 * removed to make room for superpage(s).
+                                * We're adding new large pages, so make sure
+                                * we don't remove their parent tables.
                                 */
-                               dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
+                               dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
+                                                      largepage_lvl + 1);
                        } else {
                                pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
                        }
@@ -2351,13 +2350,33 @@ static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long i
 
 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
 {
+       unsigned long flags;
+       struct context_entry *context;
+       u16 did_old;
+
        if (!iommu)
                return;
 
-       clear_context_table(iommu, bus, devfn);
-       iommu->flush.flush_context(iommu, 0, 0, 0,
-                                          DMA_CCMD_GLOBAL_INVL);
-       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+       spin_lock_irqsave(&iommu->lock, flags);
+       context = iommu_context_addr(iommu, bus, devfn, 0);
+       if (!context) {
+               spin_unlock_irqrestore(&iommu->lock, flags);
+               return;
+       }
+       did_old = context_domain_id(context);
+       context_clear_entry(context);
+       __iommu_flush_cache(iommu, context, sizeof(*context));
+       spin_unlock_irqrestore(&iommu->lock, flags);
+       iommu->flush.flush_context(iommu,
+                                  did_old,
+                                  (((u16)bus) << 8) | devfn,
+                                  DMA_CCMD_MASK_NOBIT,
+                                  DMA_CCMD_DEVICE_INVL);
+       iommu->flush.flush_iotlb(iommu,
+                                did_old,
+                                0,
+                                0,
+                                DMA_TLB_DSI_FLUSH);
 }
 
 static inline void unlink_domain_info(struct device_domain_info *info)
@@ -3818,7 +3837,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
        ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
        if (unlikely(ret)) {
                dma_pte_free_pagetable(domain, start_vpfn,
-                                      start_vpfn + size - 1);
+                                      start_vpfn + size - 1,
+                                      agaw_to_level(domain->agaw) + 1);
                free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
                return 0;
        }
@@ -4615,7 +4635,9 @@ static void intel_disable_iommus(void)
 
 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
 {
-       return container_of(dev, struct intel_iommu, iommu.dev);
+       struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
+
+       return container_of(iommu_dev, struct intel_iommu, iommu);
 }
 
 static ssize_t intel_iommu_show_version(struct device *dev,
@@ -5220,7 +5242,8 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
        sdev->sid = PCI_DEVID(info->bus, info->devfn);
 
        if (!(ctx_lo & CONTEXT_PASIDE)) {
-               context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
+               if (iommu->pasid_state_table)
+                       context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
                context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
                        intel_iommu_get_pts(iommu);