Merge tag 'docs-5.9-3' of git://git.lwn.net/linux
[linux-2.6-microblaze.git] / kernel / dma / pool.c
index 6bc74a2..1281c0f 100644 (file)
@@ -3,7 +3,9 @@
  * Copyright (C) 2012 ARM Ltd.
  * Copyright (C) 2020 Google LLC
  */
+#include <linux/cma.h>
 #include <linux/debugfs.h>
+#include <linux/dma-contiguous.h>
 #include <linux/dma-direct.h>
 #include <linux/dma-noncoherent.h>
 #include <linux/init.h>
@@ -55,11 +57,34 @@ static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
                pool_size_kernel += size;
 }
 
+static bool cma_in_zone(gfp_t gfp)
+{
+       unsigned long size;
+       phys_addr_t end;
+       struct cma *cma;
+
+       cma = dev_get_cma_area(NULL);
+       if (!cma)
+               return false;
+
+       size = cma_get_size(cma);
+       if (!size)
+               return false;
+
+       /* CMA can't cross zone boundaries, see cma_activate_area() */
+       end = cma_get_base(cma) + size - 1;
+       if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
+               return end <= DMA_BIT_MASK(zone_dma_bits);
+       if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
+               return end <= DMA_BIT_MASK(32);
+       return true;
+}
+
 static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
                              gfp_t gfp)
 {
        unsigned int order;
-       struct page *page;
+       struct page *page = NULL;
        void *addr;
        int ret = -ENOMEM;
 
@@ -68,7 +93,11 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
 
        do {
                pool_size = 1 << (PAGE_SHIFT + order);
-               page = alloc_pages(gfp, order);
+               if (cma_in_zone(gfp))
+                       page = dma_alloc_from_contiguous(NULL, 1 << order,
+                                                        order, false);
+               if (!page)
+                       page = alloc_pages(gfp, order);
        } while (!page && order-- > 0);
        if (!page)
                goto out;
@@ -196,93 +225,75 @@ static int __init dma_atomic_pool_init(void)
 }
 postcore_initcall(dma_atomic_pool_init);
 
-static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev)
+static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
 {
-       u64 phys_mask;
-       gfp_t gfp;
-
-       gfp = dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
-                                         &phys_mask);
-       if (IS_ENABLED(CONFIG_ZONE_DMA) && gfp == GFP_DMA)
+       if (prev == NULL) {
+               if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
+                       return atomic_pool_dma32;
+               if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
+                       return atomic_pool_dma;
+               return atomic_pool_kernel;
+       }
+       if (prev == atomic_pool_kernel)
+               return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
+       if (prev == atomic_pool_dma32)
                return atomic_pool_dma;
-       if (IS_ENABLED(CONFIG_ZONE_DMA32) && gfp == GFP_DMA32)
-               return atomic_pool_dma32;
-       return atomic_pool_kernel;
+       return NULL;
 }
 
-static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool)
+static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
+               struct gen_pool *pool, void **cpu_addr,
+               bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
 {
-       if (bad_pool == atomic_pool_kernel)
-               return atomic_pool_dma32 ? : atomic_pool_dma;
+       unsigned long addr;
+       phys_addr_t phys;
 
-       if (bad_pool == atomic_pool_dma32)
-               return atomic_pool_dma;
+       addr = gen_pool_alloc(pool, size);
+       if (!addr)
+               return NULL;
 
-       return NULL;
-}
+       phys = gen_pool_virt_to_phys(pool, addr);
+       if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
+               gen_pool_free(pool, addr, size);
+               return NULL;
+       }
 
-static inline struct gen_pool *dma_guess_pool(struct device *dev,
-                                             struct gen_pool *bad_pool)
-{
-       if (bad_pool)
-               return dma_get_safer_pool(bad_pool);
+       if (gen_pool_avail(pool) < atomic_pool_size)
+               schedule_work(&atomic_pool_work);
 
-       return dma_guess_pool_from_device(dev);
+       *cpu_addr = (void *)addr;
+       memset(*cpu_addr, 0, size);
+       return pfn_to_page(__phys_to_pfn(phys));
 }
 
-void *dma_alloc_from_pool(struct device *dev, size_t size,
-                         struct page **ret_page, gfp_t flags)
+struct page *dma_alloc_from_pool(struct device *dev, size_t size,
+               void **cpu_addr, gfp_t gfp,
+               bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
 {
        struct gen_pool *pool = NULL;
-       unsigned long val = 0;
-       void *ptr = NULL;
-       phys_addr_t phys;
-
-       while (1) {
-               pool = dma_guess_pool(dev, pool);
-               if (!pool) {
-                       WARN(1, "Failed to get suitable pool for %s\n",
-                            dev_name(dev));
-                       break;
-               }
-
-               val = gen_pool_alloc(pool, size);
-               if (!val)
-                       continue;
-
-               phys = gen_pool_virt_to_phys(pool, val);
-               if (dma_coherent_ok(dev, phys, size))
-                       break;
-
-               gen_pool_free(pool, val, size);
-               val = 0;
-       }
-
-
-       if (val) {
-               *ret_page = pfn_to_page(__phys_to_pfn(phys));
-               ptr = (void *)val;
-               memset(ptr, 0, size);
+       struct page *page;
 
-               if (gen_pool_avail(pool) < atomic_pool_size)
-                       schedule_work(&atomic_pool_work);
+       while ((pool = dma_guess_pool(pool, gfp))) {
+               page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
+                                            phys_addr_ok);
+               if (page)
+                       return page;
        }
 
-       return ptr;
+       WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
+       return NULL;
 }
 
 bool dma_free_from_pool(struct device *dev, void *start, size_t size)
 {
        struct gen_pool *pool = NULL;
 
-       while (1) {
-               pool = dma_guess_pool(dev, pool);
-               if (!pool)
-                       return false;
-
-               if (gen_pool_has_addr(pool, (unsigned long)start, size)) {
-                       gen_pool_free(pool, (unsigned long)start, size);
-                       return true;
-               }
+       while ((pool = dma_guess_pool(pool, 0))) {
+               if (!gen_pool_has_addr(pool, (unsigned long)start, size))
+                       continue;
+               gen_pool_free(pool, (unsigned long)start, size);
+               return true;
        }
+
+       return false;
 }