iommu/vt-d: Use cache helpers in arch_invalidate_secondary_tlbs
authorLu Baolu <baolu.lu@linux.intel.com>
Wed, 24 Apr 2024 07:16:41 +0000 (15:16 +0800)
committerJoerg Roedel <jroedel@suse.de>
Fri, 26 Apr 2024 09:57:47 +0000 (11:57 +0200)
The arch_invalidate_secondary_tlbs callback is called in the SVA mm
notification path. It invalidates all or a range of caches after the
CPU page table is modified. Use the cache tag helps in this path.

The mm_types defines vm_end as the first byte after the end address
which is different from the iommu gather API, hence convert the end
parameter from mm_types to iommu gather scheme before calling the
cache_tag helper.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20240416080656.60968-10-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/intel/iommu.h
drivers/iommu/intel/svm.c

index 1d705a9..fc0b4b0 100644 (file)
@@ -1166,6 +1166,7 @@ struct intel_svm {
        struct mm_struct *mm;
        u32 pasid;
        struct list_head devs;
+       struct dmar_domain *domain;
 };
 #else
 static inline void intel_svm_check(struct intel_iommu *iommu) {}
index 2e627fb..5ba0a7b 100644 (file)
@@ -168,88 +168,25 @@ void intel_svm_check(struct intel_iommu *iommu)
        iommu->flags |= VTD_FLAG_SVM_CAPABLE;
 }
 
-static void __flush_svm_range_dev(struct intel_svm *svm,
-                                 struct intel_svm_dev *sdev,
-                                 unsigned long address,
-                                 unsigned long pages, int ih)
-{
-       struct device_domain_info *info = dev_iommu_priv_get(sdev->dev);
-
-       if (WARN_ON(!pages))
-               return;
-
-       qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
-       if (info->ats_enabled) {
-               qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
-                                        svm->pasid, sdev->qdep, address,
-                                        order_base_2(pages));
-               quirk_extra_dev_tlb_flush(info, address, order_base_2(pages),
-                                         svm->pasid, sdev->qdep);
-       }
-}
-
-static void intel_flush_svm_range_dev(struct intel_svm *svm,
-                                     struct intel_svm_dev *sdev,
-                                     unsigned long address,
-                                     unsigned long pages, int ih)
-{
-       unsigned long shift = ilog2(__roundup_pow_of_two(pages));
-       unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
-       unsigned long start = ALIGN_DOWN(address, align);
-       unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
-
-       while (start < end) {
-               __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
-               start += align;
-       }
-}
-
-static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
-                               unsigned long pages, int ih)
-{
-       struct intel_svm_dev *sdev;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(sdev, &svm->devs, list)
-               intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
-       rcu_read_unlock();
-}
-
-static void intel_flush_svm_all(struct intel_svm *svm)
-{
-       struct device_domain_info *info;
-       struct intel_svm_dev *sdev;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(sdev, &svm->devs, list) {
-               info = dev_iommu_priv_get(sdev->dev);
-
-               qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
-               if (info->ats_enabled) {
-                       qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
-                                                svm->pasid, sdev->qdep,
-                                                0, 64 - VTD_PAGE_SHIFT);
-                       quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
-                                                 svm->pasid, sdev->qdep);
-               }
-       }
-       rcu_read_unlock();
-}
-
 /* Pages have been freed at this point */
 static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
                                        struct mm_struct *mm,
                                        unsigned long start, unsigned long end)
 {
        struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+       struct dmar_domain *domain = svm->domain;
 
        if (start == 0 && end == -1UL) {
-               intel_flush_svm_all(svm);
+               cache_tag_flush_all(domain);
                return;
        }
 
-       intel_flush_svm_range(svm, start,
-                             (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
+       /*
+        * The mm_types defines vm_end as the first byte after the end address,
+        * different from IOMMU subsystem using the last address of an address
+        * range.
+        */
+       cache_tag_flush_range(domain, start, end - 1, 0);
 }
 
 static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -336,6 +273,7 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
                INIT_LIST_HEAD_RCU(&svm->devs);
 
                svm->notifier.ops = &intel_mmuops;
+               svm->domain = to_dmar_domain(domain);
                ret = mmu_notifier_register(&svm->notifier, mm);
                if (ret) {
                        kfree(svm);
@@ -747,6 +685,7 @@ struct iommu_domain *intel_svm_domain_alloc(void)
        if (!domain)
                return NULL;
        domain->domain.ops = &intel_svm_domain_ops;
+       domain->use_first_level = true;
        INIT_LIST_HEAD(&domain->cache_tags);
        spin_lock_init(&domain->cache_lock);