iommu: Add support for the map_pages() callback
authorIsaac J. Manjarres <isaacm@codeaurora.org>
Wed, 16 Jun 2021 13:38:49 +0000 (06:38 -0700)
committerJoerg Roedel <jroedel@suse.de>
Mon, 26 Jul 2021 10:37:07 +0000 (12:37 +0200)
Since iommu_pgsize can calculate how many pages of the
same size can be mapped/unmapped before the next largest
page size boundary, add support for invoking an IOMMU
driver's map_pages() callback, if it provides one.

Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
Suggested-by: Will Deacon <will@kernel.org>
Signed-off-by: Georgi Djakov <quic_c_gdjako@quicinc.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/1623850736-389584-9-git-send-email-quic_c_gdjako@quicinc.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/iommu.c

index 725622c..70a729c 100644 (file)
@@ -2429,6 +2429,30 @@ out_set_count:
        return pgsize;
 }
 
+static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+                            phys_addr_t paddr, size_t size, int prot,
+                            gfp_t gfp, size_t *mapped)
+{
+       const struct iommu_ops *ops = domain->ops;
+       size_t pgsize, count;
+       int ret;
+
+       pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
+
+       pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
+                iova, &paddr, pgsize, count);
+
+       if (ops->map_pages) {
+               ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
+                                    gfp, mapped);
+       } else {
+               ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
+               *mapped = ret ? 0 : pgsize;
+       }
+
+       return ret;
+}
+
 static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
                       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 {
@@ -2439,7 +2463,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
        phys_addr_t orig_paddr = paddr;
        int ret = 0;
 
-       if (unlikely(ops->map == NULL ||
+       if (unlikely(!(ops->map || ops->map_pages) ||
                     domain->pgsize_bitmap == 0UL))
                return -ENODEV;
 
@@ -2463,18 +2487,21 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
        pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
 
        while (size) {
-               size_t pgsize = iommu_pgsize(domain, iova, paddr, size, NULL);
+               size_t mapped = 0;
 
-               pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
-                        iova, &paddr, pgsize);
-               ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
+               ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
+                                       &mapped);
+               /*
+                * Some pages may have been mapped, even if an error occurred,
+                * so we should account for those so they can be unmapped.
+                */
+               size -= mapped;
 
                if (ret)
                        break;
 
-               iova += pgsize;
-               paddr += pgsize;
-               size -= pgsize;
+               iova += mapped;
+               paddr += mapped;
        }
 
        /* unroll mapping in case something went wrong */