iommu/iova: Move fast alloc size roundup into alloc_iova_fast()
authorJohn Garry via iommu <iommu@lists.linux-foundation.org>
Tue, 7 Dec 2021 11:17:26 +0000 (19:17 +0800)
committerJoerg Roedel <jroedel@suse.de>
Fri, 17 Dec 2021 08:10:40 +0000 (09:10 +0100)
It really is a property of the IOVA rcache code that we need to alloc a
power-of-2 size, so relocate the functionality to resize into
alloc_iova_fast(), rather than the callsites.

Signed-off-by: John Garry <john.garry@huawei.com>
Acked-by: Will Deacon <will@kernel.org>
Reviewed-by: Xie Yongji <xieyongji@bytedance.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/1638875846-23993-1-git-send-email-john.garry@huawei.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/dma-iommu.c
drivers/iommu/iova.c
drivers/vdpa/vdpa_user/iova_domain.c

index b42e38a..84dee53 100644 (file)
@@ -442,14 +442,6 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
 
        shift = iova_shift(iovad);
        iova_len = size >> shift;
-       /*
-        * Freeing non-power-of-two-sized allocations back into the IOVA caches
-        * will come back to bite us badly, so we have to waste a bit of space
-        * rounding up anything cacheable to make sure that can't happen. The
-        * order of the unadjusted size will still match upon freeing.
-        */
-       if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
-               iova_len = roundup_pow_of_two(iova_len);
 
        dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
 
index 9e8bc80..ff567cb 100644 (file)
@@ -497,6 +497,15 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
        unsigned long iova_pfn;
        struct iova *new_iova;
 
+       /*
+        * Freeing non-power-of-two-sized allocations back into the IOVA caches
+        * will come back to bite us badly, so we have to waste a bit of space
+        * rounding up anything cacheable to make sure that can't happen. The
+        * order of the unadjusted size will still match upon freeing.
+        */
+       if (size < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
+               size = roundup_pow_of_two(size);
+
        iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
        if (iova_pfn)
                return iova_pfn;
index 1daae26..2b1143f 100644 (file)
@@ -292,14 +292,6 @@ vduse_domain_alloc_iova(struct iova_domain *iovad,
        unsigned long iova_len = iova_align(iovad, size) >> shift;
        unsigned long iova_pfn;
 
-       /*
-        * Freeing non-power-of-two-sized allocations back into the IOVA caches
-        * will come back to bite us badly, so we have to waste a bit of space
-        * rounding up anything cacheable to make sure that can't happen. The
-        * order of the unadjusted size will still match upon freeing.
-        */
-       if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
-               iova_len = roundup_pow_of_two(iova_len);
        iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
 
        return iova_pfn << shift;