mm/vmalloc: fallback to a single page allocator
authorUladzislau Rezki <urezki@gmail.com>
Tue, 29 Jun 2021 02:40:23 +0000 (19:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 Jun 2021 17:53:52 +0000 (10:53 -0700)
Currently for order-0 pages we use a bulk-page allocator to get set of
pages.  From the other hand not allocating all pages is something that
might occur.  In that case we should fallbak to the single-page allocator
trying to get missing pages, because it is more permissive(direct reclaim,
etc).

Introduce a vm_area_alloc_pages() function where the described logic is
implemented.

Link: https://lkml.kernel.org/r/20210521130718.GA17882@pc638.lan
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmalloc.c

index ed0a32e..0c80caa 100644 (file)
@@ -2758,6 +2758,54 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
 EXPORT_SYMBOL_GPL(vmap_pfn);
 #endif /* CONFIG_VMAP_PFN */
 
+static inline unsigned int
+vm_area_alloc_pages(gfp_t gfp, int nid,
+               unsigned int order, unsigned long nr_pages, struct page **pages)
+{
+       unsigned int nr_allocated = 0;
+
+       /*
+        * For order-0 pages we make use of bulk allocator, if
+        * the page array is partly or not at all populated due
+        * to fails, fallback to a single page allocator that is
+        * more permissive.
+        */
+       if (!order)
+               nr_allocated = alloc_pages_bulk_array_node(
+                       gfp, nid, nr_pages, pages);
+       else
+               /*
+                * Compound pages required for remap_vmalloc_page if
+                * high-order pages.
+                */
+               gfp |= __GFP_COMP;
+
+       /* High-order pages or fallback path if "bulk" fails. */
+       while (nr_allocated < nr_pages) {
+               struct page *page;
+               int i;
+
+               page = alloc_pages_node(nid, gfp, order);
+               if (unlikely(!page))
+                       break;
+
+               /*
+                * Careful, we allocate and map page-order pages, but
+                * tracking is done per PAGE_SIZE page so as to keep the
+                * vm_struct APIs independent of the physical/mapped size.
+                */
+               for (i = 0; i < (1U << order); i++)
+                       pages[nr_allocated + i] = page + i;
+
+               if (gfpflags_allow_blocking(gfp))
+                       cond_resched();
+
+               nr_allocated += 1U << order;
+       }
+
+       return nr_allocated;
+}
+
 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                                 pgprot_t prot, unsigned int page_shift,
                                 int node)
@@ -2790,37 +2838,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                return NULL;
        }
 
-       area->nr_pages = 0;
        set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
        page_order = vm_area_page_order(area);
 
-       if (!page_order) {
-               area->nr_pages = alloc_pages_bulk_array_node(
-                       gfp_mask, node, nr_small_pages, area->pages);
-       } else {
-               /*
-                * Careful, we allocate and map page_order pages, but tracking is done
-                * per PAGE_SIZE page so as to keep the vm_struct APIs independent of
-                * the physical/mapped size.
-                */
-               while (area->nr_pages < nr_small_pages) {
-                       struct page *page;
-                       int i;
-
-                       /* Compound pages required for remap_vmalloc_page */
-                       page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order);
-                       if (unlikely(!page))
-                               break;
-
-                       for (i = 0; i < (1U << page_order); i++)
-                               area->pages[area->nr_pages + i] = page + i;
-
-                       if (gfpflags_allow_blocking(gfp_mask))
-                               cond_resched();
-
-                       area->nr_pages += 1U << page_order;
-               }
-       }
+       area->nr_pages = vm_area_alloc_pages(gfp_mask, node,
+               page_order, nr_small_pages, area->pages);
 
        atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
 
@@ -2835,7 +2857,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                goto fail;
        }
 
-       if (vmap_pages_range(addr, addr + size, prot, area->pages, page_shift) < 0) {
+       if (vmap_pages_range(addr, addr + size, prot, area->pages,
+                       page_shift) < 0) {
                warn_alloc(gfp_mask, NULL,
                        "vmalloc error: size %lu, failed to map pages",
                        area->nr_pages * PAGE_SIZE);