iommu/amd: Modify logic for checking GT and PPR features
authorSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Thu, 21 Sep 2023 09:21:42 +0000 (09:21 +0000)
committerJoerg Roedel <jroedel@suse.de>
Mon, 25 Sep 2023 10:39:04 +0000 (12:39 +0200)
In order to support v2 page table, IOMMU driver need to check if the
hardware can support Guest Translation (GT) and Peripheral Page Request
(PPR) features. Currently, IOMMU driver uses global (amd_iommu_v2_present)
and per-iommu (struct amd_iommu.is_iommu_v2) variables to track the
features. There variables area redundant since we could simply just check
the global EFR mask.

Therefore, replace it with a helper function with appropriate name.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Co-developed-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Link: https://lore.kernel.org/r/20230921092147.5930-10-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/amd_iommu_types.h
drivers/iommu/amd/init.c
drivers/iommu/amd/iommu.c

index e1b0eee..5395a21 100644 (file)
@@ -102,6 +102,12 @@ static inline int check_feature_gpt_level(void)
        return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
 }
 
+static inline bool amd_iommu_gt_ppr_supported(void)
+{
+       return (check_feature(FEATURE_GT) &&
+               check_feature(FEATURE_PPR));
+}
+
 static inline u64 iommu_virt_to_phys(void *vaddr)
 {
        return (u64)__sme_set(virt_to_phys(vaddr));
index 22bdfb0..29e76c1 100644 (file)
@@ -679,9 +679,6 @@ struct amd_iommu {
        /* Extended features 2 */
        u64 features2;
 
-       /* IOMMUv2 */
-       bool is_iommu_v2;
-
        /* PCI device id of the IOMMU device */
        u16 devid;
 
@@ -890,8 +887,6 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
 /* Smallest max PASID supported by any IOMMU in the system */
 extern u32 amd_iommu_max_pasid;
 
-extern bool amd_iommu_v2_present;
-
 extern bool amd_iommu_force_isolation;
 
 /* Max levels of glxval supported */
index d0d506a..06b3190 100644 (file)
@@ -187,7 +187,6 @@ bool amd_iommu_iotlb_sup __read_mostly = true;
 
 u32 amd_iommu_max_pasid __read_mostly = ~0;
 
-bool amd_iommu_v2_present __read_mostly;
 static bool amd_iommu_pc_present __read_mostly;
 bool amdr_ivrs_remap_support __read_mostly;
 
@@ -2101,12 +2100,6 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
                        amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
        }
 
-       if (check_feature(FEATURE_GT) &&
-           check_feature(FEATURE_PPR)) {
-               iommu->is_iommu_v2   = true;
-               amd_iommu_v2_present = true;
-       }
-
        if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
                return -ENOMEM;
 
@@ -3676,7 +3669,7 @@ bool amd_iommu_v2_supported(void)
         * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
         * setting up IOMMUv1 page table.
         */
-       return amd_iommu_v2_present && !amd_iommu_snp_en;
+       return amd_iommu_gt_ppr_supported() && !amd_iommu_snp_en;
 }
 EXPORT_SYMBOL(amd_iommu_v2_supported);
 
index 6b9c4a9..126f587 100644 (file)
@@ -397,7 +397,7 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
         */
        if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
            dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
-               dev_data->iommu_v2 = iommu->is_iommu_v2;
+               dev_data->iommu_v2 = amd_iommu_gt_ppr_supported();
        }
 
        dev_iommu_priv_set(dev, dev_data);