Merge branch 'stable/for-linus-5.15' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / kernel / dma / direct.c
index 8dca4f9..4c6c5e0 100644 (file)
@@ -75,6 +75,15 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
                min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
 }
 
+static void __dma_direct_free_pages(struct device *dev, struct page *page,
+                                   size_t size)
+{
+       if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
+           swiotlb_free(dev, page, size))
+               return;
+       dma_free_contiguous(dev, page, size);
+}
+
 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
                gfp_t gfp)
 {
@@ -86,6 +95,16 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
 
        gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
                                           &phys_limit);
+       if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
+           is_swiotlb_for_alloc(dev)) {
+               page = swiotlb_alloc(dev, size);
+               if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+                       __dma_direct_free_pages(dev, page, size);
+                       return NULL;
+               }
+               return page;
+       }
+
        page = dma_alloc_contiguous(dev, size, gfp);
        if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
                dma_free_contiguous(dev, page, size);
@@ -142,7 +161,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
                gfp |= __GFP_NOWARN;
 
        if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
-           !force_dma_unencrypted(dev)) {
+           !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
                page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
                if (!page)
                        return NULL;
@@ -157,7 +176,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
        if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
            !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
            !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
-           !dev_is_dma_coherent(dev))
+           !dev_is_dma_coherent(dev) &&
+           !is_swiotlb_for_alloc(dev))
                return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
 
        if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
@@ -167,11 +187,16 @@ void *dma_direct_alloc(struct device *dev, size_t size,
        /*
         * Remapping or decrypting memory may block. If either is required and
         * we can't block, allocate the memory from the atomic pools.
+        * If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must
+        * set up another device coherent pool by shared-dma-pool and use
+        * dma_alloc_from_dev_coherent instead.
         */
        if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
            !gfpflags_allow_blocking(gfp) &&
            (force_dma_unencrypted(dev) ||
-            (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev))))
+            (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+             !dev_is_dma_coherent(dev))) &&
+           !is_swiotlb_for_alloc(dev))
                return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
        /* we always manually zero the memory once we are done */
@@ -242,7 +267,7 @@ out_encrypt_pages:
                        return NULL;
        }
 out_free_pages:
-       dma_free_contiguous(dev, page, size);
+       __dma_direct_free_pages(dev, page, size);
        return NULL;
 }
 
@@ -252,7 +277,7 @@ void dma_direct_free(struct device *dev, size_t size,
        unsigned int page_order = get_order(size);
 
        if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
-           !force_dma_unencrypted(dev)) {
+           !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
                /* cpu_addr is a struct page cookie, not a kernel address */
                dma_free_contiguous(dev, cpu_addr, size);
                return;
@@ -261,7 +286,8 @@ void dma_direct_free(struct device *dev, size_t size,
        if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
            !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
            !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
-           !dev_is_dma_coherent(dev)) {
+           !dev_is_dma_coherent(dev) &&
+           !is_swiotlb_for_alloc(dev)) {
                arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
                return;
        }
@@ -286,7 +312,7 @@ void dma_direct_free(struct device *dev, size_t size,
        else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
                arch_dma_clear_uncached(cpu_addr, size);
 
-       dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
+       __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
 }
 
 struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
@@ -296,7 +322,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
        void *ret;
 
        if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
-           force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp))
+           force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
+           !is_swiotlb_for_alloc(dev))
                return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
 
        page = __dma_direct_alloc_pages(dev, size, gfp);
@@ -323,7 +350,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
        *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
        return page;
 out_free_pages:
-       dma_free_contiguous(dev, page, size);
+       __dma_direct_free_pages(dev, page, size);
        return NULL;
 }
 
@@ -342,7 +369,7 @@ void dma_direct_free_pages(struct device *dev, size_t size,
        if (force_dma_unencrypted(dev))
                set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
 
-       dma_free_contiguous(dev, page, size);
+       __dma_direct_free_pages(dev, page, size);
 }
 
 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
@@ -356,7 +383,7 @@ void dma_direct_sync_sg_for_device(struct device *dev,
        for_each_sg(sgl, sg, nents, i) {
                phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
 
-               if (unlikely(is_swiotlb_buffer(paddr)))
+               if (unlikely(is_swiotlb_buffer(dev, paddr)))
                        swiotlb_sync_single_for_device(dev, paddr, sg->length,
                                                       dir);
 
@@ -382,7 +409,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
                if (!dev_is_dma_coherent(dev))
                        arch_sync_dma_for_cpu(paddr, sg->length, dir);
 
-               if (unlikely(is_swiotlb_buffer(paddr)))
+               if (unlikely(is_swiotlb_buffer(dev, paddr)))
                        swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
                                                    dir);
 
@@ -510,8 +537,8 @@ int dma_direct_supported(struct device *dev, u64 mask)
 size_t dma_direct_max_mapping_size(struct device *dev)
 {
        /* If SWIOTLB is active, use its maximum mapping size */
-       if (is_swiotlb_active() &&
-           (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
+       if (is_swiotlb_active(dev) &&
+           (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
                return swiotlb_max_mapping_size(dev);
        return SIZE_MAX;
 }
@@ -519,7 +546,7 @@ size_t dma_direct_max_mapping_size(struct device *dev)
 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
 {
        return !dev_is_dma_coherent(dev) ||
-               is_swiotlb_buffer(dma_to_phys(dev, dma_addr));
+              is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
 }
 
 /**