1 // SPDX-License-Identifier: GPL-2.0-only
3 * Debug helper to dump the current kernel pagetables of the system
4 * so that we can see what the various memory ranges are set to.
6 * (C) Copyright 2008 Intel Corporation
8 * Author: Arjan van de Ven <arjan@linux.intel.com>
11 #include <linux/debugfs.h>
12 #include <linux/kasan.h>
14 #include <linux/init.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/highmem.h>
18 #include <linux/pci.h>
20 #include <asm/e820/types.h>
21 #include <asm/pgtable.h>
24 * The dumper groups pagetable entries of the same type into one, and for
25 * that it needs to keep some state when walking, and flush this state
26 * when a "break" in the continuity is found.
30 pgprot_t current_prot;
31 pgprotval_t effective_prot;
32 unsigned long start_address;
33 unsigned long current_address;
34 const struct addr_marker *marker;
38 unsigned long wx_pages;
43 unsigned long start_address;
45 unsigned long max_lines;
48 /* Address space markers hints */
52 enum address_markers_idx {
55 #ifdef CONFIG_MODIFY_LDT_SYSCALL
62 KASAN_SHADOW_START_NR,
66 #ifdef CONFIG_X86_ESPFIX64
79 static struct addr_marker address_markers[] = {
80 [USER_SPACE_NR] = { 0, "User Space" },
81 [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" },
82 [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" },
83 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
84 [VMEMMAP_START_NR] = { 0UL, "Vmemmap" },
87 * These fields get initialized with the (dynamic)
88 * KASAN_SHADOW_{START,END} values in pt_dump_init().
90 [KASAN_SHADOW_START_NR] = { 0UL, "KASAN shadow" },
91 [KASAN_SHADOW_END_NR] = { 0UL, "KASAN shadow end" },
93 #ifdef CONFIG_MODIFY_LDT_SYSCALL
94 [LDT_NR] = { 0UL, "LDT remap" },
96 [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
97 #ifdef CONFIG_X86_ESPFIX64
98 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
101 [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" },
103 [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" },
104 [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" },
105 [MODULES_END_NR] = { MODULES_END, "End Modules" },
106 [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" },
107 [END_OF_SPACE_NR] = { -1, NULL }
110 #define INIT_PGD ((pgd_t *) &init_top_pgt)
112 #else /* CONFIG_X86_64 */
114 enum address_markers_idx {
119 #ifdef CONFIG_HIGHMEM
122 #ifdef CONFIG_MODIFY_LDT_SYSCALL
130 static struct addr_marker address_markers[] = {
131 [USER_SPACE_NR] = { 0, "User Space" },
132 [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" },
133 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
134 [VMALLOC_END_NR] = { 0UL, "vmalloc() End" },
135 #ifdef CONFIG_HIGHMEM
136 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
138 #ifdef CONFIG_MODIFY_LDT_SYSCALL
139 [LDT_NR] = { 0UL, "LDT remap" },
141 [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
142 [FIXADDR_START_NR] = { 0UL, "Fixmap area" },
143 [END_OF_SPACE_NR] = { -1, NULL }
146 #define INIT_PGD (swapper_pg_dir)
148 #endif /* !CONFIG_X86_64 */
150 /* Multipliers for offsets within the PTEs */
151 #define PTE_LEVEL_MULT (PAGE_SIZE)
152 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
153 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
154 #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
155 #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
157 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
160 printk(KERN_INFO fmt, ##args); \
163 seq_printf(m, fmt, ##args); \
166 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
169 printk(KERN_CONT fmt, ##args); \
172 seq_printf(m, fmt, ##args); \
176 * Print a readable form of a pgprot_t to the seq_file
178 static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
180 pgprotval_t pr = pgprot_val(prot);
181 static const char * const level_name[] =
182 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
184 if (!(pr & _PAGE_PRESENT)) {
186 pt_dump_cont_printf(m, dmsg, " ");
189 pt_dump_cont_printf(m, dmsg, "USR ");
191 pt_dump_cont_printf(m, dmsg, " ");
193 pt_dump_cont_printf(m, dmsg, "RW ");
195 pt_dump_cont_printf(m, dmsg, "ro ");
197 pt_dump_cont_printf(m, dmsg, "PWT ");
199 pt_dump_cont_printf(m, dmsg, " ");
201 pt_dump_cont_printf(m, dmsg, "PCD ");
203 pt_dump_cont_printf(m, dmsg, " ");
205 /* Bit 7 has a different meaning on level 3 vs 4 */
206 if (level <= 4 && pr & _PAGE_PSE)
207 pt_dump_cont_printf(m, dmsg, "PSE ");
209 pt_dump_cont_printf(m, dmsg, " ");
210 if ((level == 5 && pr & _PAGE_PAT) ||
211 ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
212 pt_dump_cont_printf(m, dmsg, "PAT ");
214 pt_dump_cont_printf(m, dmsg, " ");
215 if (pr & _PAGE_GLOBAL)
216 pt_dump_cont_printf(m, dmsg, "GLB ");
218 pt_dump_cont_printf(m, dmsg, " ");
220 pt_dump_cont_printf(m, dmsg, "NX ");
222 pt_dump_cont_printf(m, dmsg, "x ");
224 pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
228 * On 64 bits, sign-extend the 48 bit address to 64 bit
230 static unsigned long normalize_addr(unsigned long u)
233 if (!IS_ENABLED(CONFIG_X86_64))
236 shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
237 return (signed long)(u << shift) >> shift;
240 static void note_wx(struct pg_state *st)
242 unsigned long npages;
244 npages = (st->current_address - st->start_address) / PAGE_SIZE;
246 #ifdef CONFIG_PCI_BIOS
248 * If PCI BIOS is enabled, the PCI BIOS area is forced to WX.
249 * Inform about it, but avoid the warning.
251 if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN &&
252 st->current_address <= PAGE_OFFSET + BIOS_END) {
253 pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages);
257 /* Account the WX pages */
258 st->wx_pages += npages;
259 WARN_ONCE(__supported_pte_mask & _PAGE_NX,
260 "x86/mm: Found insecure W+X mapping at address %pS\n",
261 (void *)st->start_address);
265 * This function gets called on a break in a continuous series
266 * of PTE entries; the next one is different so we need to
267 * print what we collected so far.
269 static void note_page(struct pg_state *st, pgprot_t new_prot,
270 pgprotval_t new_eff, int level)
272 pgprotval_t prot, cur, eff;
273 static const char units[] = "BKMGTPE";
274 struct seq_file *m = st->seq;
277 * If we have a "break" in the series, we need to flush the state that
278 * we have now. "break" is either changing perms, levels or
279 * address space marker.
281 prot = pgprot_val(new_prot);
282 cur = pgprot_val(st->current_prot);
283 eff = st->effective_prot;
287 st->current_prot = new_prot;
288 st->effective_prot = new_eff;
290 st->marker = address_markers;
292 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
294 } else if (prot != cur || new_eff != eff || level != st->level ||
295 st->current_address >= st->marker[1].start_address) {
296 const char *unit = units;
298 int width = sizeof(unsigned long) * 2;
300 if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX))
304 * Now print the actual finished series
306 if (!st->marker->max_lines ||
307 st->lines < st->marker->max_lines) {
308 pt_dump_seq_printf(m, st->to_dmesg,
310 width, st->start_address,
311 width, st->current_address);
313 delta = st->current_address - st->start_address;
314 while (!(delta & 1023) && unit[1]) {
318 pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ",
320 printk_prot(m, st->current_prot, st->level,
326 * We print markers for special areas of address space,
327 * such as the start of vmalloc space etc.
328 * This helps in the interpretation.
330 if (st->current_address >= st->marker[1].start_address) {
331 if (st->marker->max_lines &&
332 st->lines > st->marker->max_lines) {
333 unsigned long nskip =
334 st->lines - st->marker->max_lines;
335 pt_dump_seq_printf(m, st->to_dmesg,
336 "... %lu entr%s skipped ... \n",
338 nskip == 1 ? "y" : "ies");
342 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
346 st->start_address = st->current_address;
347 st->current_prot = new_prot;
348 st->effective_prot = new_eff;
353 static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
355 return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
356 ((prot1 | prot2) & _PAGE_NX);
359 static void walk_pte_level(struct pg_state *st, pmd_t addr, pgprotval_t eff_in,
364 pgprotval_t prot, eff;
366 for (i = 0; i < PTRS_PER_PTE; i++) {
367 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
368 pte = pte_offset_map(&addr, st->current_address);
369 prot = pte_flags(*pte);
370 eff = effective_prot(eff_in, prot);
371 note_page(st, __pgprot(prot), eff, 5);
378 * This is an optimization for KASAN=y case. Since all kasan page tables
379 * eventually point to the kasan_early_shadow_page we could call note_page()
380 * right away without walking through lower level page tables. This saves
381 * us dozens of seconds (minutes for 5-level config) while checking for
382 * W+X mapping or reading kernel_page_tables debugfs file.
384 static inline bool kasan_page_table(struct pg_state *st, void *pt)
386 if (__pa(pt) == __pa(kasan_early_shadow_pmd) ||
387 (pgtable_l5_enabled() &&
388 __pa(pt) == __pa(kasan_early_shadow_p4d)) ||
389 __pa(pt) == __pa(kasan_early_shadow_pud)) {
390 pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]);
391 note_page(st, __pgprot(prot), 0, 5);
397 static inline bool kasan_page_table(struct pg_state *st, void *pt)
405 static void walk_pmd_level(struct pg_state *st, pud_t addr,
406 pgprotval_t eff_in, unsigned long P)
409 pmd_t *start, *pmd_start;
410 pgprotval_t prot, eff;
412 pmd_start = start = (pmd_t *)pud_page_vaddr(addr);
413 for (i = 0; i < PTRS_PER_PMD; i++) {
414 st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
415 if (!pmd_none(*start)) {
416 prot = pmd_flags(*start);
417 eff = effective_prot(eff_in, prot);
418 if (pmd_large(*start) || !pmd_present(*start)) {
419 note_page(st, __pgprot(prot), eff, 4);
420 } else if (!kasan_page_table(st, pmd_start)) {
421 walk_pte_level(st, *start, eff,
422 P + i * PMD_LEVEL_MULT);
425 note_page(st, __pgprot(0), 0, 4);
431 #define walk_pmd_level(s,a,e,p) walk_pte_level(s,__pmd(pud_val(a)),e,p)
432 #define pud_large(a) pmd_large(__pmd(pud_val(a)))
433 #define pud_none(a) pmd_none(__pmd(pud_val(a)))
438 static void walk_pud_level(struct pg_state *st, p4d_t addr, pgprotval_t eff_in,
442 pud_t *start, *pud_start;
443 pgprotval_t prot, eff;
445 pud_start = start = (pud_t *)p4d_page_vaddr(addr);
447 for (i = 0; i < PTRS_PER_PUD; i++) {
448 st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
449 if (!pud_none(*start)) {
450 prot = pud_flags(*start);
451 eff = effective_prot(eff_in, prot);
452 if (pud_large(*start) || !pud_present(*start)) {
453 note_page(st, __pgprot(prot), eff, 3);
454 } else if (!kasan_page_table(st, pud_start)) {
455 walk_pmd_level(st, *start, eff,
456 P + i * PUD_LEVEL_MULT);
459 note_page(st, __pgprot(0), 0, 3);
466 #define walk_pud_level(s,a,e,p) walk_pmd_level(s,__pud(p4d_val(a)),e,p)
467 #define p4d_large(a) pud_large(__pud(p4d_val(a)))
468 #define p4d_none(a) pud_none(__pud(p4d_val(a)))
471 static void walk_p4d_level(struct pg_state *st, pgd_t addr, pgprotval_t eff_in,
475 p4d_t *start, *p4d_start;
476 pgprotval_t prot, eff;
478 if (PTRS_PER_P4D == 1)
479 return walk_pud_level(st, __p4d(pgd_val(addr)), eff_in, P);
481 p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
483 for (i = 0; i < PTRS_PER_P4D; i++) {
484 st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
485 if (!p4d_none(*start)) {
486 prot = p4d_flags(*start);
487 eff = effective_prot(eff_in, prot);
488 if (p4d_large(*start) || !p4d_present(*start)) {
489 note_page(st, __pgprot(prot), eff, 2);
490 } else if (!kasan_page_table(st, p4d_start)) {
491 walk_pud_level(st, *start, eff,
492 P + i * P4D_LEVEL_MULT);
495 note_page(st, __pgprot(0), 0, 2);
501 #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
502 #define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
504 static inline bool is_hypervisor_range(int idx)
508 * A hole in the beginning of kernel address space reserved
511 return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
512 (idx < pgd_index(GUARD_HOLE_END_ADDR));
518 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
519 bool checkwx, bool dmesg)
521 pgd_t *start = INIT_PGD;
522 pgprotval_t prot, eff;
524 struct pg_state st = {};
531 st.check_wx = checkwx;
536 for (i = 0; i < PTRS_PER_PGD; i++) {
537 st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
538 if (!pgd_none(*start) && !is_hypervisor_range(i)) {
539 prot = pgd_flags(*start);
540 #ifdef CONFIG_X86_PAE
541 eff = _PAGE_USER | _PAGE_RW;
545 if (pgd_large(*start) || !pgd_present(*start)) {
546 note_page(&st, __pgprot(prot), eff, 1);
548 walk_p4d_level(&st, *start, eff,
552 note_page(&st, __pgprot(0), 0, 1);
558 /* Flush out the last page */
559 st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
560 note_page(&st, __pgprot(0), 0, 0);
564 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
567 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
570 void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm)
572 ptdump_walk_pgd_level_core(m, mm->pgd, false, true);
575 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
577 #ifdef CONFIG_PAGE_TABLE_ISOLATION
578 if (user && boot_cpu_has(X86_FEATURE_PTI))
579 pgd = kernel_to_user_pgdp(pgd);
581 ptdump_walk_pgd_level_core(m, pgd, false, false);
583 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
585 void ptdump_walk_user_pgd_level_checkwx(void)
587 #ifdef CONFIG_PAGE_TABLE_ISOLATION
588 pgd_t *pgd = INIT_PGD;
590 if (!(__supported_pte_mask & _PAGE_NX) ||
591 !boot_cpu_has(X86_FEATURE_PTI))
594 pr_info("x86/mm: Checking user space page tables\n");
595 pgd = kernel_to_user_pgdp(pgd);
596 ptdump_walk_pgd_level_core(NULL, pgd, true, false);
600 void ptdump_walk_pgd_level_checkwx(void)
602 ptdump_walk_pgd_level_core(NULL, NULL, true, false);
605 static int __init pt_dump_init(void)
608 * Various markers are not compile-time constants, so assign them
612 address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
613 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
614 address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
615 #ifdef CONFIG_MODIFY_LDT_SYSCALL
616 address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
619 address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
620 address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
624 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
625 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
626 # ifdef CONFIG_HIGHMEM
627 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
629 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
630 address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
631 # ifdef CONFIG_MODIFY_LDT_SYSCALL
632 address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
637 __initcall(pt_dump_init);