1 // SPDX-License-Identifier: GPL-2.0
3 * HugeTLB Vmemmap Optimization (HVO)
5 * Copyright (c) 2020, ByteDance. All rights reserved.
7 * Author: Muchun Song <songmuchun@bytedance.com>
9 * See Documentation/mm/vmemmap_dedup.rst
11 #define pr_fmt(fmt) "HugeTLB: " fmt
13 #include <linux/pgtable.h>
14 #include <linux/moduleparam.h>
15 #include <linux/bootmem_info.h>
16 #include <linux/mmdebug.h>
17 #include <asm/pgalloc.h>
18 #include <asm/tlbflush.h>
19 #include "hugetlb_vmemmap.h"
22 * struct vmemmap_remap_walk - walk vmemmap page table
24 * @remap_pte: called for each lowest-level entry (PTE).
25 * @nr_walked: the number of walked pte.
26 * @reuse_page: the page which is reused for the tail vmemmap pages.
27 * @reuse_addr: the virtual address of the @reuse_page page.
28 * @vmemmap_pages: the list head of the vmemmap pages that can be freed
31 struct vmemmap_remap_walk {
32 void (*remap_pte)(pte_t *pte, unsigned long addr,
33 struct vmemmap_remap_walk *walk);
34 unsigned long nr_walked;
35 struct page *reuse_page;
36 unsigned long reuse_addr;
37 struct list_head *vmemmap_pages;
40 static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
44 unsigned long addr = start;
48 spin_lock(&init_mm.page_table_lock);
49 head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL;
50 spin_unlock(&init_mm.page_table_lock);
55 pgtable = pte_alloc_one_kernel(&init_mm);
59 pmd_populate_kernel(&init_mm, &__pmd, pgtable);
61 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
63 pgprot_t pgprot = PAGE_KERNEL;
65 entry = mk_pte(head + i, pgprot);
66 pte = pte_offset_kernel(&__pmd, addr);
67 set_pte_at(&init_mm, addr, pte, entry);
70 spin_lock(&init_mm.page_table_lock);
71 if (likely(pmd_leaf(*pmd))) {
73 * Higher order allocations from buddy allocator must be able to
74 * be treated as indepdenent small pages (as they can be freed
77 if (!PageReserved(head))
78 split_page(head, get_order(PMD_SIZE));
80 /* Make pte visible before pmd. See comment in pmd_install(). */
82 pmd_populate_kernel(&init_mm, pmd, pgtable);
83 flush_tlb_kernel_range(start, start + PMD_SIZE);
85 pte_free_kernel(&init_mm, pgtable);
87 spin_unlock(&init_mm.page_table_lock);
92 static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
94 struct vmemmap_remap_walk *walk)
96 pte_t *pte = pte_offset_kernel(pmd, addr);
99 * The reuse_page is found 'first' in table walk before we start
100 * remapping (which is calling @walk->remap_pte).
102 if (!walk->reuse_page) {
103 walk->reuse_page = pte_page(ptep_get(pte));
105 * Because the reuse address is part of the range that we are
106 * walking, skip the reuse address range.
113 for (; addr != end; addr += PAGE_SIZE, pte++) {
114 walk->remap_pte(pte, addr, walk);
119 static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
121 struct vmemmap_remap_walk *walk)
126 pmd = pmd_offset(pud, addr);
130 ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK);
134 next = pmd_addr_end(addr, end);
135 vmemmap_pte_range(pmd, addr, next, walk);
136 } while (pmd++, addr = next, addr != end);
141 static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
143 struct vmemmap_remap_walk *walk)
148 pud = pud_offset(p4d, addr);
152 next = pud_addr_end(addr, end);
153 ret = vmemmap_pmd_range(pud, addr, next, walk);
156 } while (pud++, addr = next, addr != end);
161 static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
163 struct vmemmap_remap_walk *walk)
168 p4d = p4d_offset(pgd, addr);
172 next = p4d_addr_end(addr, end);
173 ret = vmemmap_pud_range(p4d, addr, next, walk);
176 } while (p4d++, addr = next, addr != end);
181 static int vmemmap_remap_range(unsigned long start, unsigned long end,
182 struct vmemmap_remap_walk *walk)
184 unsigned long addr = start;
188 VM_BUG_ON(!PAGE_ALIGNED(start));
189 VM_BUG_ON(!PAGE_ALIGNED(end));
191 pgd = pgd_offset_k(addr);
195 next = pgd_addr_end(addr, end);
196 ret = vmemmap_p4d_range(pgd, addr, next, walk);
199 } while (pgd++, addr = next, addr != end);
201 flush_tlb_kernel_range(start, end);
207 * Free a vmemmap page. A vmemmap page can be allocated from the memblock
208 * allocator or buddy allocator. If the PG_reserved flag is set, it means
209 * that it allocated from the memblock allocator, just free it via the
210 * free_bootmem_page(). Otherwise, use __free_page().
212 static inline void free_vmemmap_page(struct page *page)
214 if (PageReserved(page))
215 free_bootmem_page(page);
220 /* Free a list of the vmemmap pages */
221 static void free_vmemmap_page_list(struct list_head *list)
223 struct page *page, *next;
225 list_for_each_entry_safe(page, next, list, lru)
226 free_vmemmap_page(page);
229 static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
230 struct vmemmap_remap_walk *walk)
233 * Remap the tail pages as read-only to catch illegal write operation
236 pgprot_t pgprot = PAGE_KERNEL_RO;
237 struct page *page = pte_page(ptep_get(pte));
240 /* Remapping the head page requires r/w */
241 if (unlikely(addr == walk->reuse_addr)) {
242 pgprot = PAGE_KERNEL;
243 list_del(&walk->reuse_page->lru);
246 * Makes sure that preceding stores to the page contents from
247 * vmemmap_remap_free() become visible before the set_pte_at()
253 entry = mk_pte(walk->reuse_page, pgprot);
254 list_add_tail(&page->lru, walk->vmemmap_pages);
255 set_pte_at(&init_mm, addr, pte, entry);
259 * How many struct page structs need to be reset. When we reuse the head
260 * struct page, the special metadata (e.g. page->flags or page->mapping)
261 * cannot copy to the tail struct page structs. The invalid value will be
262 * checked in the free_tail_page_prepare(). In order to avoid the message
263 * of "corrupted mapping in tail page". We need to reset at least 3 (one
264 * head struct page struct and two tail struct page structs) struct page
267 #define NR_RESET_STRUCT_PAGE 3
269 static inline void reset_struct_pages(struct page *start)
271 struct page *from = start + NR_RESET_STRUCT_PAGE;
273 BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page));
274 memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE);
277 static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
278 struct vmemmap_remap_walk *walk)
280 pgprot_t pgprot = PAGE_KERNEL;
284 BUG_ON(pte_page(ptep_get(pte)) != walk->reuse_page);
286 page = list_first_entry(walk->vmemmap_pages, struct page, lru);
287 list_del(&page->lru);
288 to = page_to_virt(page);
289 copy_page(to, (void *)walk->reuse_addr);
290 reset_struct_pages(to);
293 * Makes sure that preceding stores to the page contents become visible
294 * before the set_pte_at() write.
297 set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
301 * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
302 * to the page which @reuse is mapped to, then free vmemmap
303 * which the range are mapped to.
304 * @start: start address of the vmemmap virtual address range that we want
306 * @end: end address of the vmemmap virtual address range that we want to
308 * @reuse: reuse address.
310 * Return: %0 on success, negative error code otherwise.
312 static int vmemmap_remap_free(unsigned long start, unsigned long end,
316 LIST_HEAD(vmemmap_pages);
317 struct vmemmap_remap_walk walk = {
318 .remap_pte = vmemmap_remap_pte,
320 .vmemmap_pages = &vmemmap_pages,
322 int nid = page_to_nid((struct page *)start);
323 gfp_t gfp_mask = GFP_KERNEL | __GFP_THISNODE | __GFP_NORETRY |
327 * Allocate a new head vmemmap page to avoid breaking a contiguous
328 * block of struct page memory when freeing it back to page allocator
329 * in free_vmemmap_page_list(). This will allow the likely contiguous
330 * struct page backing memory to be kept contiguous and allowing for
331 * more allocations of hugepages. Fallback to the currently
332 * mapped head page in case should it fail to allocate.
334 walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0);
335 if (walk.reuse_page) {
336 copy_page(page_to_virt(walk.reuse_page),
337 (void *)walk.reuse_addr);
338 list_add(&walk.reuse_page->lru, &vmemmap_pages);
342 * In order to make remapping routine most efficient for the huge pages,
343 * the routine of vmemmap page table walking has the following rules
344 * (see more details from the vmemmap_pte_range()):
346 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
347 * should be continuous.
348 * - The @reuse address is part of the range [@reuse, @end) that we are
349 * walking which is passed to vmemmap_remap_range().
350 * - The @reuse address is the first in the complete range.
352 * So we need to make sure that @start and @reuse meet the above rules.
354 BUG_ON(start - reuse != PAGE_SIZE);
356 mmap_read_lock(&init_mm);
357 ret = vmemmap_remap_range(reuse, end, &walk);
358 if (ret && walk.nr_walked) {
359 end = reuse + walk.nr_walked * PAGE_SIZE;
361 * vmemmap_pages contains pages from the previous
362 * vmemmap_remap_range call which failed. These
363 * are pages which were removed from the vmemmap.
364 * They will be restored in the following call.
366 walk = (struct vmemmap_remap_walk) {
367 .remap_pte = vmemmap_restore_pte,
369 .vmemmap_pages = &vmemmap_pages,
372 vmemmap_remap_range(reuse, end, &walk);
374 mmap_read_unlock(&init_mm);
376 free_vmemmap_page_list(&vmemmap_pages);
381 static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
382 struct list_head *list)
384 gfp_t gfp_mask = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
385 unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
386 int nid = page_to_nid((struct page *)start);
387 struct page *page, *next;
390 page = alloc_pages_node(nid, gfp_mask, 0);
393 list_add_tail(&page->lru, list);
398 list_for_each_entry_safe(page, next, list, lru)
404 * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
405 * to the page which is from the @vmemmap_pages
407 * @start: start address of the vmemmap virtual address range that we want
409 * @end: end address of the vmemmap virtual address range that we want to
411 * @reuse: reuse address.
413 * Return: %0 on success, negative error code otherwise.
415 static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
418 LIST_HEAD(vmemmap_pages);
419 struct vmemmap_remap_walk walk = {
420 .remap_pte = vmemmap_restore_pte,
422 .vmemmap_pages = &vmemmap_pages,
425 /* See the comment in the vmemmap_remap_free(). */
426 BUG_ON(start - reuse != PAGE_SIZE);
428 if (alloc_vmemmap_page_list(start, end, &vmemmap_pages))
431 mmap_read_lock(&init_mm);
432 vmemmap_remap_range(reuse, end, &walk);
433 mmap_read_unlock(&init_mm);
438 DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
439 EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
441 static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
442 core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
445 * hugetlb_vmemmap_restore - restore previously optimized (by
446 * hugetlb_vmemmap_optimize()) vmemmap pages which
447 * will be reallocated and remapped.
449 * @head: the head page whose vmemmap pages will be restored.
451 * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
452 * negative error code otherwise.
454 int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
457 unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
458 unsigned long vmemmap_reuse;
460 VM_WARN_ON_ONCE(!PageHuge(head));
461 if (!HPageVmemmapOptimized(head))
464 vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
465 vmemmap_reuse = vmemmap_start;
466 vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
469 * The pages which the vmemmap virtual address range [@vmemmap_start,
470 * @vmemmap_end) are mapped to are freed to the buddy allocator, and
471 * the range is mapped to the page which @vmemmap_reuse is mapped to.
472 * When a HugeTLB page is freed to the buddy allocator, previously
473 * discarded vmemmap pages must be allocated and remapping.
475 ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse);
477 ClearHPageVmemmapOptimized(head);
478 static_branch_dec(&hugetlb_optimize_vmemmap_key);
484 /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
485 static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
487 if (!READ_ONCE(vmemmap_optimize_enabled))
490 if (!hugetlb_vmemmap_optimizable(h))
493 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
495 struct page *vmemmap_page;
496 unsigned long vaddr = (unsigned long)head;
499 * Only the vmemmap page's vmemmap page can be self-hosted.
500 * Walking the page tables to find the backing page of the
503 pmdp = pmd_off_k(vaddr);
505 * The READ_ONCE() is used to stabilize *pmdp in a register or
506 * on the stack so that it will stop changing under the code.
507 * The only concurrent operation where it can be changed is
508 * split_vmemmap_huge_pmd() (*pmdp will be stable after this
511 pmd = READ_ONCE(*pmdp);
513 vmemmap_page = pmd_page(pmd) + pte_index(vaddr);
515 vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr));
517 * Due to HugeTLB alignment requirements and the vmemmap pages
518 * being at the start of the hotplugged memory region in
519 * memory_hotplug.memmap_on_memory case. Checking any vmemmap
520 * page's vmemmap page if it is marked as VmemmapSelfHosted is
523 * [ hotplugged memory ]
524 * [ section ][...][ section ]
525 * [ vmemmap ][ usable memory ]
531 * +-------------------------------------------+
533 if (PageVmemmapSelfHosted(vmemmap_page))
541 * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
543 * @head: the head page whose vmemmap pages will be optimized.
545 * This function only tries to optimize @head's vmemmap pages and does not
546 * guarantee that the optimization will succeed after it returns. The caller
547 * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
548 * have been optimized.
550 void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
552 unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
553 unsigned long vmemmap_reuse;
555 VM_WARN_ON_ONCE(!PageHuge(head));
556 if (!vmemmap_should_optimize(h, head))
559 static_branch_inc(&hugetlb_optimize_vmemmap_key);
561 vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
562 vmemmap_reuse = vmemmap_start;
563 vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
566 * Remap the vmemmap virtual address range [@vmemmap_start, @vmemmap_end)
567 * to the page which @vmemmap_reuse is mapped to, then free the pages
568 * which the range [@vmemmap_start, @vmemmap_end] is mapped to.
570 if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse))
571 static_branch_dec(&hugetlb_optimize_vmemmap_key);
573 SetHPageVmemmapOptimized(head);
576 static struct ctl_table hugetlb_vmemmap_sysctls[] = {
578 .procname = "hugetlb_optimize_vmemmap",
579 .data = &vmemmap_optimize_enabled,
580 .maxlen = sizeof(vmemmap_optimize_enabled),
582 .proc_handler = proc_dobool,
587 static int __init hugetlb_vmemmap_init(void)
589 const struct hstate *h;
591 /* HUGETLB_VMEMMAP_RESERVE_SIZE should cover all used struct pages */
592 BUILD_BUG_ON(__NR_USED_SUBPAGE * sizeof(struct page) > HUGETLB_VMEMMAP_RESERVE_SIZE);
595 if (hugetlb_vmemmap_optimizable(h)) {
596 register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
602 late_initcall(hugetlb_vmemmap_init);