Linux 6.9-rc1
[linux-2.6-microblaze.git] / mm / hugetlb_vmemmap.c
index 6c7117c..da177e4 100644 (file)
 #define pr_fmt(fmt)    "HugeTLB: " fmt
 
 #include <linux/pgtable.h>
+#include <linux/moduleparam.h>
 #include <linux/bootmem_info.h>
+#include <linux/mmdebug.h>
+#include <linux/pagewalk.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include "hugetlb_vmemmap.h"
@@ -25,6 +28,8 @@
  * @reuse_addr:                the virtual address of the @reuse_page page.
  * @vmemmap_pages:     the list head of the vmemmap pages that can be freed
  *                     or is mapped from.
+ * @flags:             used to modify behavior in vmemmap page table walking
+ *                     operations.
  */
 struct vmemmap_remap_walk {
        void                    (*remap_pte)(pte_t *pte, unsigned long addr,
@@ -33,36 +38,33 @@ struct vmemmap_remap_walk {
        struct page             *reuse_page;
        unsigned long           reuse_addr;
        struct list_head        *vmemmap_pages;
-};
 
-/*
- * There are a lot of struct page structures associated with each HugeTLB page.
- * For tail pages, the value of compound_head is the same. So we can reuse first
- * page of head page structures. We map the virtual addresses of all the pages
- * of tail page structures to the head page struct, and then free these page
- * frames. Therefore, we need to reserve one pages as vmemmap areas.
- */
-#define RESERVE_VMEMMAP_NR             1U
-#define RESERVE_VMEMMAP_SIZE           (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
+/* Skip the TLB flush when we split the PMD */
+#define VMEMMAP_SPLIT_NO_TLB_FLUSH     BIT(0)
+/* Skip the TLB flush when we remap the PTE */
+#define VMEMMAP_REMAP_NO_TLB_FLUSH     BIT(1)
+       unsigned long           flags;
+};
 
-static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start,
+                            struct vmemmap_remap_walk *walk)
 {
        pmd_t __pmd;
        int i;
        unsigned long addr = start;
-       struct page *page = pmd_page(*pmd);
-       pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
+       pte_t *pgtable;
 
+       pgtable = pte_alloc_one_kernel(&init_mm);
        if (!pgtable)
                return -ENOMEM;
 
        pmd_populate_kernel(&init_mm, &__pmd, pgtable);
 
-       for (i = 0; i < PMD_SIZE / PAGE_SIZE; i++, addr += PAGE_SIZE) {
+       for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
                pte_t entry, *pte;
                pgprot_t pgprot = PAGE_KERNEL;
 
-               entry = mk_pte(page + i, pgprot);
+               entry = mk_pte(head + i, pgprot);
                pte = pte_offset_kernel(&__pmd, addr);
                set_pte_at(&init_mm, addr, pte, entry);
        }
@@ -74,13 +76,14 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
                 * be treated as indepdenent small pages (as they can be freed
                 * individually).
                 */
-               if (!PageReserved(page))
-                       split_page(page, get_order(PMD_SIZE));
+               if (!PageReserved(head))
+                       split_page(head, get_order(PMD_SIZE));
 
                /* Make pte visible before pmd. See comment in pmd_install(). */
                smp_wmb();
                pmd_populate_kernel(&init_mm, pmd, pgtable);
-               flush_tlb_kernel_range(start, start + PMD_SIZE);
+               if (!(walk->flags & VMEMMAP_SPLIT_NO_TLB_FLUSH))
+                       flush_tlb_kernel_range(start, start + PMD_SIZE);
        } else {
                pte_free_kernel(&init_mm, pgtable);
        }
@@ -89,135 +92,86 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
        return 0;
 }
 
-static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
+static int vmemmap_pmd_entry(pmd_t *pmd, unsigned long addr,
+                            unsigned long next, struct mm_walk *walk)
 {
-       int leaf;
-
-       spin_lock(&init_mm.page_table_lock);
-       leaf = pmd_leaf(*pmd);
-       spin_unlock(&init_mm.page_table_lock);
-
-       if (!leaf)
-               return 0;
+       int ret = 0;
+       struct page *head;
+       struct vmemmap_remap_walk *vmemmap_walk = walk->private;
 
-       return __split_vmemmap_huge_pmd(pmd, start);
-}
-
-static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
-                             unsigned long end,
-                             struct vmemmap_remap_walk *walk)
-{
-       pte_t *pte = pte_offset_kernel(pmd, addr);
+       /* Only splitting, not remapping the vmemmap pages. */
+       if (!vmemmap_walk->remap_pte)
+               walk->action = ACTION_CONTINUE;
 
+       spin_lock(&init_mm.page_table_lock);
+       head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
        /*
-        * The reuse_page is found 'first' in table walk before we start
-        * remapping (which is calling @walk->remap_pte).
+        * Due to HugeTLB alignment requirements and the vmemmap
+        * pages being at the start of the hotplugged memory
+        * region in memory_hotplug.memmap_on_memory case. Checking
+        * the vmemmap page associated with the first vmemmap page
+        * if it is self-hosted is sufficient.
+        *
+        * [                  hotplugged memory                  ]
+        * [        section        ][...][        section        ]
+        * [ vmemmap ][              usable memory               ]
+        *   ^  | ^                        |
+        *   +--+ |                        |
+        *        +------------------------+
         */
-       if (!walk->reuse_page) {
-               walk->reuse_page = pte_page(*pte);
-               /*
-                * Because the reuse address is part of the range that we are
-                * walking, skip the reuse address range.
-                */
-               addr += PAGE_SIZE;
-               pte++;
-               walk->nr_walked++;
-       }
+       if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && unlikely(!vmemmap_walk->nr_walked)) {
+               struct page *page = head ? head + pte_index(addr) :
+                                   pte_page(ptep_get(pte_offset_kernel(pmd, addr)));
 
-       for (; addr != end; addr += PAGE_SIZE, pte++) {
-               walk->remap_pte(pte, addr, walk);
-               walk->nr_walked++;
+               if (PageVmemmapSelfHosted(page))
+                       ret = -ENOTSUPP;
        }
-}
-
-static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
-                            unsigned long end,
-                            struct vmemmap_remap_walk *walk)
-{
-       pmd_t *pmd;
-       unsigned long next;
-
-       pmd = pmd_offset(pud, addr);
-       do {
-               int ret;
-
-               ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK);
-               if (ret)
-                       return ret;
-
-               next = pmd_addr_end(addr, end);
-               vmemmap_pte_range(pmd, addr, next, walk);
-       } while (pmd++, addr = next, addr != end);
+       spin_unlock(&init_mm.page_table_lock);
+       if (!head || ret)
+               return ret;
 
-       return 0;
+       return vmemmap_split_pmd(pmd, head, addr & PMD_MASK, vmemmap_walk);
 }
 
-static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
-                            unsigned long end,
-                            struct vmemmap_remap_walk *walk)
+static int vmemmap_pte_entry(pte_t *pte, unsigned long addr,
+                            unsigned long next, struct mm_walk *walk)
 {
-       pud_t *pud;
-       unsigned long next;
+       struct vmemmap_remap_walk *vmemmap_walk = walk->private;
 
-       pud = pud_offset(p4d, addr);
-       do {
-               int ret;
-
-               next = pud_addr_end(addr, end);
-               ret = vmemmap_pmd_range(pud, addr, next, walk);
-               if (ret)
-                       return ret;
-       } while (pud++, addr = next, addr != end);
+       /*
+        * The reuse_page is found 'first' in page table walking before
+        * starting remapping.
+        */
+       if (!vmemmap_walk->reuse_page)
+               vmemmap_walk->reuse_page = pte_page(ptep_get(pte));
+       else
+               vmemmap_walk->remap_pte(pte, addr, vmemmap_walk);
+       vmemmap_walk->nr_walked++;
 
        return 0;
 }
 
-static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
-                            unsigned long end,
-                            struct vmemmap_remap_walk *walk)
-{
-       p4d_t *p4d;
-       unsigned long next;
-
-       p4d = p4d_offset(pgd, addr);
-       do {
-               int ret;
-
-               next = p4d_addr_end(addr, end);
-               ret = vmemmap_pud_range(p4d, addr, next, walk);
-               if (ret)
-                       return ret;
-       } while (p4d++, addr = next, addr != end);
-
-       return 0;
-}
+static const struct mm_walk_ops vmemmap_remap_ops = {
+       .pmd_entry      = vmemmap_pmd_entry,
+       .pte_entry      = vmemmap_pte_entry,
+};
 
 static int vmemmap_remap_range(unsigned long start, unsigned long end,
                               struct vmemmap_remap_walk *walk)
 {
-       unsigned long addr = start;
-       unsigned long next;
-       pgd_t *pgd;
-
-       VM_BUG_ON(!PAGE_ALIGNED(start));
-       VM_BUG_ON(!PAGE_ALIGNED(end));
+       int ret;
 
-       pgd = pgd_offset_k(addr);
-       do {
-               int ret;
+       VM_BUG_ON(!PAGE_ALIGNED(start | end));
 
-               next = pgd_addr_end(addr, end);
-               ret = vmemmap_p4d_range(pgd, addr, next, walk);
-               if (ret)
-                       return ret;
-       } while (pgd++, addr = next, addr != end);
+       mmap_read_lock(&init_mm);
+       ret = walk_page_range_novma(&init_mm, start, end, &vmemmap_remap_ops,
+                                   NULL, walk);
+       mmap_read_unlock(&init_mm);
+       if (ret)
+               return ret;
 
-       /*
-        * We only change the mapping of the vmemmap virtual address range
-        * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
-        * belongs to the range.
-        */
-       flush_tlb_kernel_range(start + PAGE_SIZE, end);
+       if (walk->remap_pte && !(walk->flags & VMEMMAP_REMAP_NO_TLB_FLUSH))
+               flush_tlb_kernel_range(start, end);
 
        return 0;
 }
@@ -241,10 +195,8 @@ static void free_vmemmap_page_list(struct list_head *list)
 {
        struct page *page, *next;
 
-       list_for_each_entry_safe(page, next, list, lru) {
-               list_del(&page->lru);
+       list_for_each_entry_safe(page, next, list, lru)
                free_vmemmap_page(page);
-       }
 }
 
 static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
@@ -255,10 +207,24 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
         * to the tail pages.
         */
        pgprot_t pgprot = PAGE_KERNEL_RO;
-       pte_t entry = mk_pte(walk->reuse_page, pgprot);
-       struct page *page = pte_page(*pte);
+       struct page *page = pte_page(ptep_get(pte));
+       pte_t entry;
+
+       /* Remapping the head page requires r/w */
+       if (unlikely(addr == walk->reuse_addr)) {
+               pgprot = PAGE_KERNEL;
+               list_del(&walk->reuse_page->lru);
+
+               /*
+                * Makes sure that preceding stores to the page contents from
+                * vmemmap_remap_free() become visible before the set_pte_at()
+                * write.
+                */
+               smp_wmb();
+       }
 
-       list_add_tail(&page->lru, walk->vmemmap_pages);
+       entry = mk_pte(walk->reuse_page, pgprot);
+       list_add(&page->lru, walk->vmemmap_pages);
        set_pte_at(&init_mm, addr, pte, entry);
 }
 
@@ -266,7 +232,7 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
  * How many struct page structs need to be reset. When we reuse the head
  * struct page, the special metadata (e.g. page->flags or page->mapping)
  * cannot copy to the tail struct page structs. The invalid value will be
- * checked in the free_tail_pages_check(). In order to avoid the message
+ * checked in the free_tail_page_prepare(). In order to avoid the message
  * of "corrupted mapping in tail page". We need to reset at least 3 (one
  * head struct page struct and two tail struct page structs) struct page
  * structs.
@@ -275,11 +241,10 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
 
 static inline void reset_struct_pages(struct page *start)
 {
-       int i;
        struct page *from = start + NR_RESET_STRUCT_PAGE;
 
-       for (i = 0; i < NR_RESET_STRUCT_PAGE; i++)
-               memcpy(start + i, from, sizeof(*from));
+       BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page));
+       memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE);
 }
 
 static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
@@ -289,7 +254,7 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
        struct page *page;
        void *to;
 
-       BUG_ON(pte_page(*pte) != walk->reuse_page);
+       BUG_ON(pte_page(ptep_get(pte)) != walk->reuse_page);
 
        page = list_first_entry(walk->vmemmap_pages, struct page, lru);
        list_del(&page->lru);
@@ -297,9 +262,39 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
        copy_page(to, (void *)walk->reuse_addr);
        reset_struct_pages(to);
 
+       /*
+        * Makes sure that preceding stores to the page contents become visible
+        * before the set_pte_at() write.
+        */
+       smp_wmb();
        set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
 }
 
+/**
+ * vmemmap_remap_split - split the vmemmap virtual address range [@start, @end)
+ *                      backing PMDs of the directmap into PTEs
+ * @start:     start address of the vmemmap virtual address range that we want
+ *             to remap.
+ * @end:       end address of the vmemmap virtual address range that we want to
+ *             remap.
+ * @reuse:     reuse address.
+ *
+ * Return: %0 on success, negative error code otherwise.
+ */
+static int vmemmap_remap_split(unsigned long start, unsigned long end,
+                              unsigned long reuse)
+{
+       struct vmemmap_remap_walk walk = {
+               .remap_pte      = NULL,
+               .flags          = VMEMMAP_SPLIT_NO_TLB_FLUSH,
+       };
+
+       /* See the comment in the vmemmap_remap_free(). */
+       BUG_ON(start - reuse != PAGE_SIZE);
+
+       return vmemmap_remap_range(reuse, end, &walk);
+}
+
 /**
  * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
  *                     to the page which @reuse is mapped to, then free vmemmap
@@ -309,19 +304,41 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
  * @end:       end address of the vmemmap virtual address range that we want to
  *             remap.
  * @reuse:     reuse address.
+ * @vmemmap_pages: list to deposit vmemmap pages to be freed.  It is callers
+ *             responsibility to free pages.
+ * @flags:     modifications to vmemmap_remap_walk flags
  *
  * Return: %0 on success, negative error code otherwise.
  */
 static int vmemmap_remap_free(unsigned long start, unsigned long end,
-                             unsigned long reuse)
+                             unsigned long reuse,
+                             struct list_head *vmemmap_pages,
+                             unsigned long flags)
 {
        int ret;
-       LIST_HEAD(vmemmap_pages);
        struct vmemmap_remap_walk walk = {
                .remap_pte      = vmemmap_remap_pte,
                .reuse_addr     = reuse,
-               .vmemmap_pages  = &vmemmap_pages,
+               .vmemmap_pages  = vmemmap_pages,
+               .flags          = flags,
        };
+       int nid = page_to_nid((struct page *)reuse);
+       gfp_t gfp_mask = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
+
+       /*
+        * Allocate a new head vmemmap page to avoid breaking a contiguous
+        * block of struct page memory when freeing it back to page allocator
+        * in free_vmemmap_page_list(). This will allow the likely contiguous
+        * struct page backing memory to be kept contiguous and allowing for
+        * more allocations of hugepages. Fallback to the currently
+        * mapped head page in case should it fail to allocate.
+        */
+       walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0);
+       if (walk.reuse_page) {
+               copy_page(page_to_virt(walk.reuse_page),
+                         (void *)walk.reuse_addr);
+               list_add(&walk.reuse_page->lru, vmemmap_pages);
+       }
 
        /*
         * In order to make remapping routine most efficient for the huge pages,
@@ -338,7 +355,6 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
         */
        BUG_ON(start - reuse != PAGE_SIZE);
 
-       mmap_read_lock(&init_mm);
        ret = vmemmap_remap_range(reuse, end, &walk);
        if (ret && walk.nr_walked) {
                end = reuse + walk.nr_walked * PAGE_SIZE;
@@ -351,21 +367,20 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
                walk = (struct vmemmap_remap_walk) {
                        .remap_pte      = vmemmap_restore_pte,
                        .reuse_addr     = reuse,
-                       .vmemmap_pages  = &vmemmap_pages,
+                       .vmemmap_pages  = vmemmap_pages,
+                       .flags          = 0,
                };
 
                vmemmap_remap_range(reuse, end, &walk);
        }
-       mmap_read_unlock(&init_mm);
-
-       free_vmemmap_page_list(&vmemmap_pages);
 
        return ret;
 }
 
 static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
-                                  gfp_t gfp_mask, struct list_head *list)
+                                  struct list_head *list)
 {
+       gfp_t gfp_mask = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
        unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
        int nid = page_to_nid((struct page *)start);
        struct page *page, *next;
@@ -374,13 +389,13 @@ static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
                page = alloc_pages_node(nid, gfp_mask, 0);
                if (!page)
                        goto out;
-               list_add_tail(&page->lru, list);
+               list_add(&page->lru, list);
        }
 
        return 0;
 out:
        list_for_each_entry_safe(page, next, list, lru)
-               __free_pages(page, 0);
+               __free_page(page);
        return -ENOMEM;
 }
 
@@ -393,31 +408,28 @@ out:
  * @end:       end address of the vmemmap virtual address range that we want to
  *             remap.
  * @reuse:     reuse address.
- * @gfp_mask:  GFP flag for allocating vmemmap pages.
+ * @flags:     modifications to vmemmap_remap_walk flags
  *
  * Return: %0 on success, negative error code otherwise.
  */
 static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
-                              unsigned long reuse, gfp_t gfp_mask)
+                              unsigned long reuse, unsigned long flags)
 {
        LIST_HEAD(vmemmap_pages);
        struct vmemmap_remap_walk walk = {
                .remap_pte      = vmemmap_restore_pte,
                .reuse_addr     = reuse,
                .vmemmap_pages  = &vmemmap_pages,
+               .flags          = flags,
        };
 
        /* See the comment in the vmemmap_remap_free(). */
        BUG_ON(start - reuse != PAGE_SIZE);
 
-       if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
+       if (alloc_vmemmap_page_list(start, end, &vmemmap_pages))
                return -ENOMEM;
 
-       mmap_read_lock(&init_mm);
-       vmemmap_remap_range(reuse, end, &walk);
-       mmap_read_unlock(&init_mm);
-
-       return 0;
+       return vmemmap_remap_range(reuse, end, &walk);
 }
 
 DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
@@ -426,176 +438,263 @@ EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
 static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
 core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
 
-/*
- * Previously discarded vmemmap pages will be allocated and remapping
- * after this function returns zero.
- */
-int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
+static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
+                                          struct folio *folio, unsigned long flags)
 {
        int ret;
-       unsigned long vmemmap_addr = (unsigned long)head;
-       unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
+       unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
+       unsigned long vmemmap_reuse;
 
-       if (!HPageVmemmapOptimized(head))
+       VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
+       if (!folio_test_hugetlb_vmemmap_optimized(folio))
                return 0;
 
-       vmemmap_addr    += RESERVE_VMEMMAP_SIZE;
-       vmemmap_pages   = hugetlb_optimize_vmemmap_pages(h);
-       vmemmap_end     = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
-       vmemmap_reuse   = vmemmap_addr - PAGE_SIZE;
+       vmemmap_end     = vmemmap_start + hugetlb_vmemmap_size(h);
+       vmemmap_reuse   = vmemmap_start;
+       vmemmap_start   += HUGETLB_VMEMMAP_RESERVE_SIZE;
 
        /*
-        * The pages which the vmemmap virtual address range [@vmemmap_addr,
+        * The pages which the vmemmap virtual address range [@vmemmap_start,
         * @vmemmap_end) are mapped to are freed to the buddy allocator, and
         * the range is mapped to the page which @vmemmap_reuse is mapped to.
         * When a HugeTLB page is freed to the buddy allocator, previously
         * discarded vmemmap pages must be allocated and remapping.
         */
-       ret = vmemmap_remap_alloc(vmemmap_addr, vmemmap_end, vmemmap_reuse,
-                                 GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
+       ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, flags);
        if (!ret) {
-               ClearHPageVmemmapOptimized(head);
+               folio_clear_hugetlb_vmemmap_optimized(folio);
                static_branch_dec(&hugetlb_optimize_vmemmap_key);
        }
 
        return ret;
 }
 
-static unsigned int vmemmap_optimizable_pages(struct hstate *h,
-                                             struct page *head)
+/**
+ * hugetlb_vmemmap_restore_folio - restore previously optimized (by
+ *                             hugetlb_vmemmap_optimize_folio()) vmemmap pages which
+ *                             will be reallocated and remapped.
+ * @h:         struct hstate.
+ * @folio:     the folio whose vmemmap pages will be restored.
+ *
+ * Return: %0 if @folio's vmemmap pages have been reallocated and remapped,
+ * negative error code otherwise.
+ */
+int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
 {
-       if (!READ_ONCE(vmemmap_optimize_enabled))
-               return 0;
-
-       if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
-               pmd_t *pmdp, pmd;
-               struct page *vmemmap_page;
-               unsigned long vaddr = (unsigned long)head;
+       return __hugetlb_vmemmap_restore_folio(h, folio, 0);
+}
 
-               /*
-                * Only the vmemmap page's vmemmap page can be self-hosted.
-                * Walking the page tables to find the backing page of the
-                * vmemmap page.
-                */
-               pmdp = pmd_off_k(vaddr);
-               /*
-                * The READ_ONCE() is used to stabilize *pmdp in a register or
-                * on the stack so that it will stop changing under the code.
-                * The only concurrent operation where it can be changed is
-                * split_vmemmap_huge_pmd() (*pmdp will be stable after this
-                * operation).
-                */
-               pmd = READ_ONCE(*pmdp);
-               if (pmd_leaf(pmd))
-                       vmemmap_page = pmd_page(pmd) + pte_index(vaddr);
-               else
-                       vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr));
-               /*
-                * Due to HugeTLB alignment requirements and the vmemmap pages
-                * being at the start of the hotplugged memory region in
-                * memory_hotplug.memmap_on_memory case. Checking any vmemmap
-                * page's vmemmap page if it is marked as VmemmapSelfHosted is
-                * sufficient.
-                *
-                * [                  hotplugged memory                  ]
-                * [        section        ][...][        section        ]
-                * [ vmemmap ][              usable memory               ]
-                *   ^   |     |                                        |
-                *   +---+     |                                        |
-                *     ^       |                                        |
-                *     +-------+                                        |
-                *          ^                                           |
-                *          +-------------------------------------------+
-                */
-               if (PageVmemmapSelfHosted(vmemmap_page))
-                       return 0;
+/**
+ * hugetlb_vmemmap_restore_folios - restore vmemmap for every folio on the list.
+ * @h:                 hstate.
+ * @folio_list:                list of folios.
+ * @non_hvo_folios:    Output list of folios for which vmemmap exists.
+ *
+ * Return: number of folios for which vmemmap was restored, or an error code
+ *             if an error was encountered restoring vmemmap for a folio.
+ *             Folios that have vmemmap are moved to the non_hvo_folios
+ *             list.  Processing of entries stops when the first error is
+ *             encountered. The folio that experienced the error and all
+ *             non-processed folios will remain on folio_list.
+ */
+long hugetlb_vmemmap_restore_folios(const struct hstate *h,
+                                       struct list_head *folio_list,
+                                       struct list_head *non_hvo_folios)
+{
+       struct folio *folio, *t_folio;
+       long restored = 0;
+       long ret = 0;
+
+       list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
+               if (folio_test_hugetlb_vmemmap_optimized(folio)) {
+                       ret = __hugetlb_vmemmap_restore_folio(h, folio,
+                                                             VMEMMAP_REMAP_NO_TLB_FLUSH);
+                       if (ret)
+                               break;
+                       restored++;
+               }
+
+               /* Add non-optimized folios to output list */
+               list_move(&folio->lru, non_hvo_folios);
        }
 
-       return hugetlb_optimize_vmemmap_pages(h);
+       if (restored)
+               flush_tlb_all();
+       if (!ret)
+               ret = restored;
+       return ret;
+}
+
+/* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
+static bool vmemmap_should_optimize_folio(const struct hstate *h, struct folio *folio)
+{
+       if (folio_test_hugetlb_vmemmap_optimized(folio))
+               return false;
+
+       if (!READ_ONCE(vmemmap_optimize_enabled))
+               return false;
+
+       if (!hugetlb_vmemmap_optimizable(h))
+               return false;
+
+       return true;
 }
 
-void hugetlb_vmemmap_free(struct hstate *h, struct page *head)
+static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
+                                           struct folio *folio,
+                                           struct list_head *vmemmap_pages,
+                                           unsigned long flags)
 {
-       unsigned long vmemmap_addr = (unsigned long)head;
-       unsigned long vmemmap_end, vmemmap_reuse, vmemmap_pages;
+       int ret = 0;
+       unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
+       unsigned long vmemmap_reuse;
 
-       vmemmap_pages = vmemmap_optimizable_pages(h, head);
-       if (!vmemmap_pages)
-               return;
+       VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
+       if (!vmemmap_should_optimize_folio(h, folio))
+               return ret;
 
        static_branch_inc(&hugetlb_optimize_vmemmap_key);
+       /*
+        * Very Subtle
+        * If VMEMMAP_REMAP_NO_TLB_FLUSH is set, TLB flushing is not performed
+        * immediately after remapping.  As a result, subsequent accesses
+        * and modifications to struct pages associated with the hugetlb
+        * page could be to the OLD struct pages.  Set the vmemmap optimized
+        * flag here so that it is copied to the new head page.  This keeps
+        * the old and new struct pages in sync.
+        * If there is an error during optimization, we will immediately FLUSH
+        * the TLB and clear the flag below.
+        */
+       folio_set_hugetlb_vmemmap_optimized(folio);
 
-       vmemmap_addr    += RESERVE_VMEMMAP_SIZE;
-       vmemmap_end     = vmemmap_addr + (vmemmap_pages << PAGE_SHIFT);
-       vmemmap_reuse   = vmemmap_addr - PAGE_SIZE;
+       vmemmap_end     = vmemmap_start + hugetlb_vmemmap_size(h);
+       vmemmap_reuse   = vmemmap_start;
+       vmemmap_start   += HUGETLB_VMEMMAP_RESERVE_SIZE;
 
        /*
-        * Remap the vmemmap virtual address range [@vmemmap_addr, @vmemmap_end)
-        * to the page which @vmemmap_reuse is mapped to, then free the pages
-        * which the range [@vmemmap_addr, @vmemmap_end] is mapped to.
+        * Remap the vmemmap virtual address range [@vmemmap_start, @vmemmap_end)
+        * to the page which @vmemmap_reuse is mapped to.  Add pages previously
+        * mapping the range to vmemmap_pages list so that they can be freed by
+        * the caller.
         */
-       if (vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse))
+       ret = vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse,
+                                vmemmap_pages, flags);
+       if (ret) {
                static_branch_dec(&hugetlb_optimize_vmemmap_key);
-       else
-               SetHPageVmemmapOptimized(head);
+               folio_clear_hugetlb_vmemmap_optimized(folio);
+       }
+
+       return ret;
+}
+
+/**
+ * hugetlb_vmemmap_optimize_folio - optimize @folio's vmemmap pages.
+ * @h:         struct hstate.
+ * @folio:     the folio whose vmemmap pages will be optimized.
+ *
+ * This function only tries to optimize @folio's vmemmap pages and does not
+ * guarantee that the optimization will succeed after it returns. The caller
+ * can use folio_test_hugetlb_vmemmap_optimized(@folio) to detect if @folio's
+ * vmemmap pages have been optimized.
+ */
+void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
+{
+       LIST_HEAD(vmemmap_pages);
+
+       __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, 0);
+       free_vmemmap_page_list(&vmemmap_pages);
 }
 
-void __init hugetlb_vmemmap_init(struct hstate *h)
+static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *folio)
 {
-       unsigned int nr_pages = pages_per_huge_page(h);
-       unsigned int vmemmap_pages;
+       unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
+       unsigned long vmemmap_reuse;
+
+       if (!vmemmap_should_optimize_folio(h, folio))
+               return 0;
+
+       vmemmap_end     = vmemmap_start + hugetlb_vmemmap_size(h);
+       vmemmap_reuse   = vmemmap_start;
+       vmemmap_start   += HUGETLB_VMEMMAP_RESERVE_SIZE;
 
        /*
-        * There are only (RESERVE_VMEMMAP_SIZE / sizeof(struct page)) struct
-        * page structs that can be used when HVO is enabled, add a BUILD_BUG_ON
-        * to catch invalid usage of the tail page structs.
+        * Split PMDs on the vmemmap virtual address range [@vmemmap_start,
+        * @vmemmap_end]
         */
-       BUILD_BUG_ON(__NR_USED_SUBPAGE >=
-                    RESERVE_VMEMMAP_SIZE / sizeof(struct page));
+       return vmemmap_remap_split(vmemmap_start, vmemmap_end, vmemmap_reuse);
+}
 
-       if (!is_power_of_2(sizeof(struct page))) {
-               pr_warn_once("cannot optimize vmemmap pages because \"struct page\" crosses page boundaries\n");
-               return;
+void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
+{
+       struct folio *folio;
+       LIST_HEAD(vmemmap_pages);
+
+       list_for_each_entry(folio, folio_list, lru) {
+               int ret = hugetlb_vmemmap_split_folio(h, folio);
+
+               /*
+                * Spliting the PMD requires allocating a page, thus lets fail
+                * early once we encounter the first OOM. No point in retrying
+                * as it can be dynamically done on remap with the memory
+                * we get back from the vmemmap deduplication.
+                */
+               if (ret == -ENOMEM)
+                       break;
        }
 
-       vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
-       /*
-        * The head page is not to be freed to buddy allocator, the other tail
-        * pages will map to the head page, so they can be freed.
-        *
-        * Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true
-        * on some architectures (e.g. aarch64). See Documentation/arm64/
-        * hugetlbpage.rst for more details.
-        */
-       if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
-               h->optimize_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;
+       flush_tlb_all();
+
+       list_for_each_entry(folio, folio_list, lru) {
+               int ret;
+
+               ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
+                                                      VMEMMAP_REMAP_NO_TLB_FLUSH);
 
-       pr_info("can optimize %d vmemmap pages for %s\n",
-               h->optimize_vmemmap_pages, h->name);
+               /*
+                * Pages to be freed may have been accumulated.  If we
+                * encounter an ENOMEM,  free what we have and try again.
+                * This can occur in the case that both spliting fails
+                * halfway and head page allocation also failed. In this
+                * case __hugetlb_vmemmap_optimize_folio() would free memory
+                * allowing more vmemmap remaps to occur.
+                */
+               if (ret == -ENOMEM && !list_empty(&vmemmap_pages)) {
+                       flush_tlb_all();
+                       free_vmemmap_page_list(&vmemmap_pages);
+                       INIT_LIST_HEAD(&vmemmap_pages);
+                       __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
+                                                        VMEMMAP_REMAP_NO_TLB_FLUSH);
+               }
+       }
+
+       flush_tlb_all();
+       free_vmemmap_page_list(&vmemmap_pages);
 }
 
-#ifdef CONFIG_PROC_SYSCTL
 static struct ctl_table hugetlb_vmemmap_sysctls[] = {
        {
                .procname       = "hugetlb_optimize_vmemmap",
                .data           = &vmemmap_optimize_enabled,
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(vmemmap_optimize_enabled),
                .mode           = 0644,
                .proc_handler   = proc_dobool,
        },
        { }
 };
 
-static __init int hugetlb_vmemmap_sysctls_init(void)
+static int __init hugetlb_vmemmap_init(void)
 {
-       /*
-        * If "struct page" crosses page boundaries, the vmemmap pages cannot
-        * be optimized.
-        */
-       if (is_power_of_2(sizeof(struct page)))
-               register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
+       const struct hstate *h;
 
+       /* HUGETLB_VMEMMAP_RESERVE_SIZE should cover all used struct pages */
+       BUILD_BUG_ON(__NR_USED_SUBPAGE > HUGETLB_VMEMMAP_RESERVE_PAGES);
+
+       for_each_hstate(h) {
+               if (hugetlb_vmemmap_optimizable(h)) {
+                       register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
+                       break;
+               }
+       }
        return 0;
 }
-late_initcall(hugetlb_vmemmap_sysctls_init);
-#endif /* CONFIG_PROC_SYSCTL */
+late_initcall(hugetlb_vmemmap_init);