mm/sparse-vmemmap: refactor core of vmemmap_populate_basepages() to helper
authorJoao Martins <joao.m.martins@oracle.com>
Fri, 29 Apr 2022 06:16:15 +0000 (23:16 -0700)
committerakpm <akpm@linux-foundation.org>
Fri, 29 Apr 2022 06:16:15 +0000 (23:16 -0700)
In preparation for describing a memmap with compound pages, move the
actual pte population logic into a separate function
vmemmap_populate_address() and have a new helper vmemmap_populate_range()
walk through all base pages it needs to populate.

While doing that, change the helper to use a pte_t* as return value,
rather than an hardcoded errno of 0 or -ENOMEM.

Link: https://lkml.kernel.org/r/20220420155310.9712-3-joao.m.martins@oracle.com
Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/sparse-vmemmap.c

index fb68e77..ef15664 100644 (file)
@@ -608,38 +608,57 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
        return pgd;
 }
 
-int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
-                                        int node, struct vmem_altmap *altmap)
+static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node,
+                                             struct vmem_altmap *altmap)
 {
-       unsigned long addr = start;
        pgd_t *pgd;
        p4d_t *p4d;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
 
+       pgd = vmemmap_pgd_populate(addr, node);
+       if (!pgd)
+               return NULL;
+       p4d = vmemmap_p4d_populate(pgd, addr, node);
+       if (!p4d)
+               return NULL;
+       pud = vmemmap_pud_populate(p4d, addr, node);
+       if (!pud)
+               return NULL;
+       pmd = vmemmap_pmd_populate(pud, addr, node);
+       if (!pmd)
+               return NULL;
+       pte = vmemmap_pte_populate(pmd, addr, node, altmap);
+       if (!pte)
+               return NULL;
+       vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
+
+       return pte;
+}
+
+static int __meminit vmemmap_populate_range(unsigned long start,
+                                           unsigned long end, int node,
+                                           struct vmem_altmap *altmap)
+{
+       unsigned long addr = start;
+       pte_t *pte;
+
        for (; addr < end; addr += PAGE_SIZE) {
-               pgd = vmemmap_pgd_populate(addr, node);
-               if (!pgd)
-                       return -ENOMEM;
-               p4d = vmemmap_p4d_populate(pgd, addr, node);
-               if (!p4d)
-                       return -ENOMEM;
-               pud = vmemmap_pud_populate(p4d, addr, node);
-               if (!pud)
-                       return -ENOMEM;
-               pmd = vmemmap_pmd_populate(pud, addr, node);
-               if (!pmd)
-                       return -ENOMEM;
-               pte = vmemmap_pte_populate(pmd, addr, node, altmap);
+               pte = vmemmap_populate_address(addr, node, altmap);
                if (!pte)
                        return -ENOMEM;
-               vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
        }
 
        return 0;
 }
 
+int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
+                                        int node, struct vmem_altmap *altmap)
+{
+       return vmemmap_populate_range(start, end, node, altmap);
+}
+
 struct page * __meminit __populate_section_memmap(unsigned long pfn,
                unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
                struct dev_pagemap *pgmap)