iommu/vt-d: Setup context and enable RID2PASID support
authorLu Baolu <baolu.lu@linux.intel.com>
Mon, 10 Dec 2018 01:59:03 +0000 (09:59 +0800)
committerJoerg Roedel <jroedel@suse.de>
Tue, 11 Dec 2018 09:45:59 +0000 (10:45 +0100)
This patch enables the translation for requests without PASID in
the scalable mode by setting up the root and context entries.

Cc: Ashok Raj <ashok.raj@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Sanjay Kumar <sanjay.k.kumar@intel.com>
Signed-off-by: Liu Yi L <yi.l.liu@intel.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/intel-iommu.c
drivers/iommu/intel-pasid.h
include/linux/intel-iommu.h

index 13c3c2d..21a6853 100644 (file)
@@ -1197,6 +1197,8 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
        unsigned long flag;
 
        addr = virt_to_phys(iommu->root_entry);
+       if (sm_supported(iommu))
+               addr |= DMA_RTADDR_SMT;
 
        raw_spin_lock_irqsave(&iommu->register_lock, flag);
        dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
@@ -1918,6 +1920,56 @@ static void domain_exit(struct dmar_domain *domain)
        free_domain_mem(domain);
 }
 
+/*
+ * Get the PASID directory size for scalable mode context entry.
+ * Value of X in the PDTS field of a scalable mode context entry
+ * indicates PASID directory with 2^(X + 7) entries.
+ */
+static inline unsigned long context_get_sm_pds(struct pasid_table *table)
+{
+       int pds, max_pde;
+
+       max_pde = table->max_pasid >> PASID_PDE_SHIFT;
+       pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
+       if (pds < 7)
+               return 0;
+
+       return pds - 7;
+}
+
+/*
+ * Set the RID_PASID field of a scalable mode context entry. The
+ * IOMMU hardware will use the PASID value set in this field for
+ * DMA translations of DMA requests without PASID.
+ */
+static inline void
+context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
+{
+       context->hi |= pasid & ((1 << 20) - 1);
+       context->hi |= (1 << 20);
+}
+
+/*
+ * Set the DTE(Device-TLB Enable) field of a scalable mode context
+ * entry.
+ */
+static inline void context_set_sm_dte(struct context_entry *context)
+{
+       context->lo |= (1 << 2);
+}
+
+/*
+ * Set the PRE(Page Request Enable) field of a scalable mode context
+ * entry.
+ */
+static inline void context_set_sm_pre(struct context_entry *context)
+{
+       context->lo |= (1 << 4);
+}
+
+/* Convert value to context PASID directory size field coding. */
+#define context_pdts(pds)      (((pds) & 0x7) << 9)
+
 static int domain_context_mapping_one(struct dmar_domain *domain,
                                      struct intel_iommu *iommu,
                                      struct pasid_table *table,
@@ -1928,8 +1980,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
        struct device_domain_info *info = NULL;
        struct context_entry *context;
        unsigned long flags;
-       struct dma_pte *pgd;
-       int ret, agaw;
+       int ret;
 
        WARN_ON(did == 0);
 
@@ -1975,41 +2026,67 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                }
        }
 
-       pgd = domain->pgd;
-
        context_clear_entry(context);
-       context_set_domain_id(context, did);
 
-       /*
-        * Skip top levels of page tables for iommu which has less agaw
-        * than default.  Unnecessary for PT mode.
-        */
-       if (translation != CONTEXT_TT_PASS_THROUGH) {
-               for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
-                       ret = -ENOMEM;
-                       pgd = phys_to_virt(dma_pte_addr(pgd));
-                       if (!dma_pte_present(pgd))
-                               goto out_unlock;
-               }
+       if (sm_supported(iommu)) {
+               unsigned long pds;
 
-               info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
-               if (info && info->ats_supported)
-                       translation = CONTEXT_TT_DEV_IOTLB;
-               else
-                       translation = CONTEXT_TT_MULTI_LEVEL;
+               WARN_ON(!table);
+
+               /* Setup the PASID DIR pointer: */
+               pds = context_get_sm_pds(table);
+               context->lo = (u64)virt_to_phys(table->table) |
+                               context_pdts(pds);
+
+               /* Setup the RID_PASID field: */
+               context_set_sm_rid2pasid(context, PASID_RID2PASID);
 
-               context_set_address_root(context, virt_to_phys(pgd));
-               context_set_address_width(context, agaw);
-       } else {
                /*
-                * In pass through mode, AW must be programmed to
-                * indicate the largest AGAW value supported by
-                * hardware. And ASR is ignored by hardware.
+                * Setup the Device-TLB enable bit and Page request
+                * Enable bit:
                 */
-               context_set_address_width(context, iommu->msagaw);
+               info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
+               if (info && info->ats_supported)
+                       context_set_sm_dte(context);
+               if (info && info->pri_supported)
+                       context_set_sm_pre(context);
+       } else {
+               struct dma_pte *pgd = domain->pgd;
+               int agaw;
+
+               context_set_domain_id(context, did);
+               context_set_translation_type(context, translation);
+
+               if (translation != CONTEXT_TT_PASS_THROUGH) {
+                       /*
+                        * Skip top levels of page tables for iommu which has
+                        * less agaw than default. Unnecessary for PT mode.
+                        */
+                       for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+                               ret = -ENOMEM;
+                               pgd = phys_to_virt(dma_pte_addr(pgd));
+                               if (!dma_pte_present(pgd))
+                                       goto out_unlock;
+                       }
+
+                       info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
+                       if (info && info->ats_supported)
+                               translation = CONTEXT_TT_DEV_IOTLB;
+                       else
+                               translation = CONTEXT_TT_MULTI_LEVEL;
+
+                       context_set_address_root(context, virt_to_phys(pgd));
+                       context_set_address_width(context, agaw);
+               } else {
+                       /*
+                        * In pass through mode, AW must be programmed to
+                        * indicate the largest AGAW value supported by
+                        * hardware. And ASR is ignored by hardware.
+                        */
+                       context_set_address_width(context, iommu->msagaw);
+               }
        }
 
-       context_set_translation_type(context, translation);
        context_set_fault_enable(context);
        context_set_present(context);
        domain_flush_cache(domain, context, sizeof(*context));
@@ -5180,7 +5257,6 @@ static void intel_iommu_put_resv_regions(struct device *dev,
 }
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
-#define MAX_NR_PASID_BITS (20)
 static inline unsigned long intel_iommu_get_pts(struct device *dev)
 {
        int pts, max_pasid;
index d6f4fea..55bb871 100644 (file)
@@ -17,6 +17,7 @@
 #define PASID_PTE_PRESENT              1
 #define PDE_PFN_MASK                   PAGE_MASK
 #define PASID_PDE_SHIFT                        6
+#define MAX_NR_PASID_BITS              20
 
 /*
  * Domain ID reserved for pasid entries programmed for first-level
index cb3ebda..5fdd33e 100644 (file)
 
 /* DMA_RTADDR_REG */
 #define DMA_RTADDR_RTT (((u64)1) << 11)
+#define DMA_RTADDR_SMT (((u64)1) << 10)
 
 /* CCMD_REG */
 #define DMA_CCMD_ICC (((u64)1) << 63)