iommu: Do physical merging in iommu_map_sg()
authorRobin Murphy <robin.murphy@arm.com>
Thu, 11 Oct 2018 15:56:42 +0000 (16:56 +0100)
committerJoerg Roedel <jroedel@suse.de>
Tue, 6 Nov 2018 15:30:39 +0000 (16:30 +0100)
The original motivation for iommu_map_sg() was to give IOMMU drivers the
chance to map an IOVA-contiguous scatterlist as efficiently as they
could. It turns out that there isn't really much driver-specific
business involved there, so now that the default implementation is
mandatory let's just improve that - the main thing we're after is to use
larger pages wherever possible, and as long as domain->pgsize_bitmap
reflects reality, iommu_map() can already do that in a generic way. All
we need to do is detect physically-contiguous segments and batch them
into a single map operation, since whatever we do here is transparent to
our caller and not bound by any segment-length restrictions on the list
itself.

Speaking of efficiency, there's really very little point in duplicating
the checks that iommu_map() is going to do anyway, so those get cleared
up in the process.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/iommu.c

index edbdf5d..f8ec49e 100644 (file)
@@ -1712,33 +1712,32 @@ EXPORT_SYMBOL_GPL(iommu_unmap_fast);
 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
                    struct scatterlist *sg, unsigned int nents, int prot)
 {
-       struct scatterlist *s;
-       size_t mapped = 0;
-       unsigned int i, min_pagesz;
+       size_t len = 0, mapped = 0;
+       phys_addr_t start;
+       unsigned int i = 0;
        int ret;
 
-       if (unlikely(domain->pgsize_bitmap == 0UL))
-               return 0;
-
-       min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
+       while (i <= nents) {
+               phys_addr_t s_phys = sg_phys(sg);
 
-       for_each_sg(sg, s, nents, i) {
-               phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+               if (len && s_phys != start + len) {
+                       ret = iommu_map(domain, iova + mapped, start, len, prot);
+                       if (ret)
+                               goto out_err;
 
-               /*
-                * We are mapping on IOMMU page boundaries, so offset within
-                * the page must be 0. However, the IOMMU may support pages
-                * smaller than PAGE_SIZE, so s->offset may still represent
-                * an offset of that boundary within the CPU page.
-                */
-               if (!IS_ALIGNED(s->offset, min_pagesz))
-                       goto out_err;
+                       mapped += len;
+                       len = 0;
+               }
 
-               ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
-               if (ret)
-                       goto out_err;
+               if (len) {
+                       len += sg->length;
+               } else {
+                       len = sg->length;
+                       start = s_phys;
+               }
 
-               mapped += s->length;
+               if (++i < nents)
+                       sg = sg_next(sg);
        }
 
        return mapped;