1 // SPDX-License-Identifier: GPL-2.0
3 * Virtual Memory Map support
5 * (C) 2007 sgi. Christoph Lameter.
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
13 * via TLBs. For those arches the virtual memory map is essentially
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
22 #include <linux/mmzone.h>
23 #include <linux/memblock.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
30 #include <linux/pgtable.h>
31 #include <linux/bootmem_info.h>
34 #include <asm/pgalloc.h>
35 #include <asm/tlbflush.h>
38 * struct vmemmap_remap_walk - walk vmemmap page table
40 * @remap_pte: called for each lowest-level entry (PTE).
41 * @reuse_page: the page which is reused for the tail vmemmap pages.
42 * @reuse_addr: the virtual address of the @reuse_page page.
43 * @vmemmap_pages: the list head of the vmemmap pages that can be freed
46 struct vmemmap_remap_walk {
47 void (*remap_pte)(pte_t *pte, unsigned long addr,
48 struct vmemmap_remap_walk *walk);
49 struct page *reuse_page;
50 unsigned long reuse_addr;
51 struct list_head *vmemmap_pages;
54 static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
56 struct vmemmap_remap_walk *walk)
58 pte_t *pte = pte_offset_kernel(pmd, addr);
61 * The reuse_page is found 'first' in table walk before we start
62 * remapping (which is calling @walk->remap_pte).
64 if (!walk->reuse_page) {
65 walk->reuse_page = pte_page(*pte);
67 * Because the reuse address is part of the range that we are
68 * walking, skip the reuse address range.
74 for (; addr != end; addr += PAGE_SIZE, pte++)
75 walk->remap_pte(pte, addr, walk);
78 static void vmemmap_pmd_range(pud_t *pud, unsigned long addr,
80 struct vmemmap_remap_walk *walk)
85 pmd = pmd_offset(pud, addr);
87 BUG_ON(pmd_leaf(*pmd));
89 next = pmd_addr_end(addr, end);
90 vmemmap_pte_range(pmd, addr, next, walk);
91 } while (pmd++, addr = next, addr != end);
94 static void vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
96 struct vmemmap_remap_walk *walk)
101 pud = pud_offset(p4d, addr);
103 next = pud_addr_end(addr, end);
104 vmemmap_pmd_range(pud, addr, next, walk);
105 } while (pud++, addr = next, addr != end);
108 static void vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
110 struct vmemmap_remap_walk *walk)
115 p4d = p4d_offset(pgd, addr);
117 next = p4d_addr_end(addr, end);
118 vmemmap_pud_range(p4d, addr, next, walk);
119 } while (p4d++, addr = next, addr != end);
122 static void vmemmap_remap_range(unsigned long start, unsigned long end,
123 struct vmemmap_remap_walk *walk)
125 unsigned long addr = start;
129 VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
130 VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
132 pgd = pgd_offset_k(addr);
134 next = pgd_addr_end(addr, end);
135 vmemmap_p4d_range(pgd, addr, next, walk);
136 } while (pgd++, addr = next, addr != end);
139 * We only change the mapping of the vmemmap virtual address range
140 * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
141 * belongs to the range.
143 flush_tlb_kernel_range(start + PAGE_SIZE, end);
147 * Free a vmemmap page. A vmemmap page can be allocated from the memblock
148 * allocator or buddy allocator. If the PG_reserved flag is set, it means
149 * that it allocated from the memblock allocator, just free it via the
150 * free_bootmem_page(). Otherwise, use __free_page().
152 static inline void free_vmemmap_page(struct page *page)
154 if (PageReserved(page))
155 free_bootmem_page(page);
160 /* Free a list of the vmemmap pages */
161 static void free_vmemmap_page_list(struct list_head *list)
163 struct page *page, *next;
165 list_for_each_entry_safe(page, next, list, lru) {
166 list_del(&page->lru);
167 free_vmemmap_page(page);
171 static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
172 struct vmemmap_remap_walk *walk)
175 * Remap the tail pages as read-only to catch illegal write operation
178 pgprot_t pgprot = PAGE_KERNEL_RO;
179 pte_t entry = mk_pte(walk->reuse_page, pgprot);
180 struct page *page = pte_page(*pte);
182 list_add(&page->lru, walk->vmemmap_pages);
183 set_pte_at(&init_mm, addr, pte, entry);
187 * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
188 * to the page which @reuse is mapped to, then free vmemmap
189 * which the range are mapped to.
190 * @start: start address of the vmemmap virtual address range that we want
192 * @end: end address of the vmemmap virtual address range that we want to
194 * @reuse: reuse address.
196 * Note: This function depends on vmemmap being base page mapped. Please make
197 * sure that we disable PMD mapping of vmemmap pages when calling this function.
199 void vmemmap_remap_free(unsigned long start, unsigned long end,
202 LIST_HEAD(vmemmap_pages);
203 struct vmemmap_remap_walk walk = {
204 .remap_pte = vmemmap_remap_pte,
206 .vmemmap_pages = &vmemmap_pages,
210 * In order to make remapping routine most efficient for the huge pages,
211 * the routine of vmemmap page table walking has the following rules
212 * (see more details from the vmemmap_pte_range()):
214 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
215 * should be continuous.
216 * - The @reuse address is part of the range [@reuse, @end) that we are
217 * walking which is passed to vmemmap_remap_range().
218 * - The @reuse address is the first in the complete range.
220 * So we need to make sure that @start and @reuse meet the above rules.
222 BUG_ON(start - reuse != PAGE_SIZE);
224 vmemmap_remap_range(reuse, end, &walk);
225 free_vmemmap_page_list(&vmemmap_pages);
228 static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
229 struct vmemmap_remap_walk *walk)
231 pgprot_t pgprot = PAGE_KERNEL;
235 BUG_ON(pte_page(*pte) != walk->reuse_page);
237 page = list_first_entry(walk->vmemmap_pages, struct page, lru);
238 list_del(&page->lru);
239 to = page_to_virt(page);
240 copy_page(to, (void *)walk->reuse_addr);
242 set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
245 static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
246 gfp_t gfp_mask, struct list_head *list)
248 unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
249 int nid = page_to_nid((struct page *)start);
250 struct page *page, *next;
253 page = alloc_pages_node(nid, gfp_mask, 0);
256 list_add_tail(&page->lru, list);
261 list_for_each_entry_safe(page, next, list, lru)
262 __free_pages(page, 0);
267 * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
268 * to the page which is from the @vmemmap_pages
270 * @start: start address of the vmemmap virtual address range that we want
272 * @end: end address of the vmemmap virtual address range that we want to
274 * @reuse: reuse address.
275 * @gfp_mask: GFP flag for allocating vmemmap pages.
277 int vmemmap_remap_alloc(unsigned long start, unsigned long end,
278 unsigned long reuse, gfp_t gfp_mask)
280 LIST_HEAD(vmemmap_pages);
281 struct vmemmap_remap_walk walk = {
282 .remap_pte = vmemmap_restore_pte,
284 .vmemmap_pages = &vmemmap_pages,
287 /* See the comment in the vmemmap_remap_free(). */
288 BUG_ON(start - reuse != PAGE_SIZE);
290 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
292 if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
295 vmemmap_remap_range(reuse, end, &walk);
301 * Allocate a block of memory to be used to back the virtual memory map
302 * or to back the page tables that are used to create the mapping.
303 * Uses the main allocators if they are available, else bootmem.
306 static void * __ref __earlyonly_bootmem_alloc(int node,
311 return memblock_alloc_try_nid_raw(size, align, goal,
312 MEMBLOCK_ALLOC_ACCESSIBLE, node);
315 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
317 /* If the main allocator is up use that, fallback to bootmem. */
318 if (slab_is_available()) {
319 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
320 int order = get_order(size);
324 page = alloc_pages_node(node, gfp_mask, order);
326 return page_address(page);
329 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
330 "vmemmap alloc failure: order:%u", order);
335 return __earlyonly_bootmem_alloc(node, size, size,
336 __pa(MAX_DMA_ADDRESS));
339 static void * __meminit altmap_alloc_block_buf(unsigned long size,
340 struct vmem_altmap *altmap);
342 /* need to make sure size is all the same during early stage */
343 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node,
344 struct vmem_altmap *altmap)
349 return altmap_alloc_block_buf(size, altmap);
351 ptr = sparse_buffer_alloc(size);
353 ptr = vmemmap_alloc_block(size, node);
357 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
359 return altmap->base_pfn + altmap->reserve + altmap->alloc
363 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
365 unsigned long allocated = altmap->alloc + altmap->align;
367 if (altmap->free > allocated)
368 return altmap->free - allocated;
372 static void * __meminit altmap_alloc_block_buf(unsigned long size,
373 struct vmem_altmap *altmap)
375 unsigned long pfn, nr_pfns, nr_align;
377 if (size & ~PAGE_MASK) {
378 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
383 pfn = vmem_altmap_next_pfn(altmap);
384 nr_pfns = size >> PAGE_SHIFT;
385 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
386 nr_align = ALIGN(pfn, nr_align) - pfn;
387 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
390 altmap->alloc += nr_pfns;
391 altmap->align += nr_align;
394 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
395 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
396 return __va(__pfn_to_phys(pfn));
399 void __meminit vmemmap_verify(pte_t *pte, int node,
400 unsigned long start, unsigned long end)
402 unsigned long pfn = pte_pfn(*pte);
403 int actual_node = early_pfn_to_nid(pfn);
405 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
406 pr_warn("[%lx-%lx] potential offnode page_structs\n",
410 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
411 struct vmem_altmap *altmap)
413 pte_t *pte = pte_offset_kernel(pmd, addr);
414 if (pte_none(*pte)) {
418 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
421 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
422 set_pte_at(&init_mm, addr, pte, entry);
427 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
429 void *p = vmemmap_alloc_block(size, node);
438 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
440 pmd_t *pmd = pmd_offset(pud, addr);
441 if (pmd_none(*pmd)) {
442 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
445 pmd_populate_kernel(&init_mm, pmd, p);
450 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
452 pud_t *pud = pud_offset(p4d, addr);
453 if (pud_none(*pud)) {
454 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
457 pud_populate(&init_mm, pud, p);
462 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
464 p4d_t *p4d = p4d_offset(pgd, addr);
465 if (p4d_none(*p4d)) {
466 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
469 p4d_populate(&init_mm, p4d, p);
474 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
476 pgd_t *pgd = pgd_offset_k(addr);
477 if (pgd_none(*pgd)) {
478 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
481 pgd_populate(&init_mm, pgd, p);
486 int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
487 int node, struct vmem_altmap *altmap)
489 unsigned long addr = start;
496 for (; addr < end; addr += PAGE_SIZE) {
497 pgd = vmemmap_pgd_populate(addr, node);
500 p4d = vmemmap_p4d_populate(pgd, addr, node);
503 pud = vmemmap_pud_populate(p4d, addr, node);
506 pmd = vmemmap_pmd_populate(pud, addr, node);
509 pte = vmemmap_pte_populate(pmd, addr, node, altmap);
512 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
518 struct page * __meminit __populate_section_memmap(unsigned long pfn,
519 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
521 unsigned long start = (unsigned long) pfn_to_page(pfn);
522 unsigned long end = start + nr_pages * sizeof(struct page);
524 if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
525 !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
528 if (vmemmap_populate(start, end, nid, altmap))
531 return pfn_to_page(pfn);