2 * Debug helper to dump the current kernel pagetables of the system
3 * so that we can see what the various memory ranges are set to.
5 * (C) Copyright 2008 Intel Corporation
7 * Author: Arjan van de Ven <arjan@linux.intel.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
15 #include <linux/debugfs.h>
16 #include <linux/kasan.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
22 #include <asm/pgtable.h>
25 * The dumper groups pagetable entries of the same type into one, and for
26 * that it needs to keep some state when walking, and flush this state
27 * when a "break" in the continuity is found.
31 pgprot_t current_prot;
32 pgprotval_t effective_prot;
33 unsigned long start_address;
34 unsigned long current_address;
35 const struct addr_marker *marker;
39 unsigned long wx_pages;
43 unsigned long start_address;
45 unsigned long max_lines;
48 /* Address space markers hints */
52 enum address_markers_idx {
56 #if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
62 KASAN_SHADOW_START_NR,
66 #if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
69 #ifdef CONFIG_X86_ESPFIX64
82 static struct addr_marker address_markers[] = {
83 [USER_SPACE_NR] = { 0, "User Space" },
84 [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" },
85 [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" },
86 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
87 [VMEMMAP_START_NR] = { 0UL, "Vmemmap" },
90 * These fields get initialized with the (dynamic)
91 * KASAN_SHADOW_{START,END} values in pt_dump_init().
93 [KASAN_SHADOW_START_NR] = { 0UL, "KASAN shadow" },
94 [KASAN_SHADOW_END_NR] = { 0UL, "KASAN shadow end" },
96 #ifdef CONFIG_MODIFY_LDT_SYSCALL
97 [LDT_NR] = { 0UL, "LDT remap" },
99 [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
100 #ifdef CONFIG_X86_ESPFIX64
101 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
104 [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" },
106 [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" },
107 [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" },
108 [MODULES_END_NR] = { MODULES_END, "End Modules" },
109 [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" },
110 [END_OF_SPACE_NR] = { -1, NULL }
113 #else /* CONFIG_X86_64 */
115 enum address_markers_idx {
120 #ifdef CONFIG_HIGHMEM
128 static struct addr_marker address_markers[] = {
129 [USER_SPACE_NR] = { 0, "User Space" },
130 [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" },
131 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
132 [VMALLOC_END_NR] = { 0UL, "vmalloc() End" },
133 #ifdef CONFIG_HIGHMEM
134 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
136 [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
137 [FIXADDR_START_NR] = { 0UL, "Fixmap area" },
138 [END_OF_SPACE_NR] = { -1, NULL }
141 #endif /* !CONFIG_X86_64 */
143 /* Multipliers for offsets within the PTEs */
144 #define PTE_LEVEL_MULT (PAGE_SIZE)
145 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
146 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
147 #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
148 #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
150 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \
153 printk(KERN_INFO fmt, ##args); \
156 seq_printf(m, fmt, ##args); \
159 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \
162 printk(KERN_CONT fmt, ##args); \
165 seq_printf(m, fmt, ##args); \
169 * Print a readable form of a pgprot_t to the seq_file
171 static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
173 pgprotval_t pr = pgprot_val(prot);
174 static const char * const level_name[] =
175 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
177 if (!(pr & _PAGE_PRESENT)) {
179 pt_dump_cont_printf(m, dmsg, " ");
182 pt_dump_cont_printf(m, dmsg, "USR ");
184 pt_dump_cont_printf(m, dmsg, " ");
186 pt_dump_cont_printf(m, dmsg, "RW ");
188 pt_dump_cont_printf(m, dmsg, "ro ");
190 pt_dump_cont_printf(m, dmsg, "PWT ");
192 pt_dump_cont_printf(m, dmsg, " ");
194 pt_dump_cont_printf(m, dmsg, "PCD ");
196 pt_dump_cont_printf(m, dmsg, " ");
198 /* Bit 7 has a different meaning on level 3 vs 4 */
199 if (level <= 4 && pr & _PAGE_PSE)
200 pt_dump_cont_printf(m, dmsg, "PSE ");
202 pt_dump_cont_printf(m, dmsg, " ");
203 if ((level == 5 && pr & _PAGE_PAT) ||
204 ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
205 pt_dump_cont_printf(m, dmsg, "PAT ");
207 pt_dump_cont_printf(m, dmsg, " ");
208 if (pr & _PAGE_GLOBAL)
209 pt_dump_cont_printf(m, dmsg, "GLB ");
211 pt_dump_cont_printf(m, dmsg, " ");
213 pt_dump_cont_printf(m, dmsg, "NX ");
215 pt_dump_cont_printf(m, dmsg, "x ");
217 pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
221 * On 64 bits, sign-extend the 48 bit address to 64 bit
223 static unsigned long normalize_addr(unsigned long u)
226 if (!IS_ENABLED(CONFIG_X86_64))
229 shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
230 return (signed long)(u << shift) >> shift;
234 * This function gets called on a break in a continuous series
235 * of PTE entries; the next one is different so we need to
236 * print what we collected so far.
238 static void note_page(struct seq_file *m, struct pg_state *st,
239 pgprot_t new_prot, pgprotval_t new_eff, int level)
241 pgprotval_t prot, cur, eff;
242 static const char units[] = "BKMGTPE";
245 * If we have a "break" in the series, we need to flush the state that
246 * we have now. "break" is either changing perms, levels or
247 * address space marker.
249 prot = pgprot_val(new_prot);
250 cur = pgprot_val(st->current_prot);
251 eff = st->effective_prot;
255 st->current_prot = new_prot;
256 st->effective_prot = new_eff;
258 st->marker = address_markers;
260 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
262 } else if (prot != cur || new_eff != eff || level != st->level ||
263 st->current_address >= st->marker[1].start_address) {
264 const char *unit = units;
266 int width = sizeof(unsigned long) * 2;
268 if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) {
270 "x86/mm: Found insecure W+X mapping at address %p/%pS\n",
271 (void *)st->start_address,
272 (void *)st->start_address);
273 st->wx_pages += (st->current_address -
274 st->start_address) / PAGE_SIZE;
278 * Now print the actual finished series
280 if (!st->marker->max_lines ||
281 st->lines < st->marker->max_lines) {
282 pt_dump_seq_printf(m, st->to_dmesg,
284 width, st->start_address,
285 width, st->current_address);
287 delta = st->current_address - st->start_address;
288 while (!(delta & 1023) && unit[1]) {
292 pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ",
294 printk_prot(m, st->current_prot, st->level,
300 * We print markers for special areas of address space,
301 * such as the start of vmalloc space etc.
302 * This helps in the interpretation.
304 if (st->current_address >= st->marker[1].start_address) {
305 if (st->marker->max_lines &&
306 st->lines > st->marker->max_lines) {
307 unsigned long nskip =
308 st->lines - st->marker->max_lines;
309 pt_dump_seq_printf(m, st->to_dmesg,
310 "... %lu entr%s skipped ... \n",
312 nskip == 1 ? "y" : "ies");
316 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
320 st->start_address = st->current_address;
321 st->current_prot = new_prot;
322 st->effective_prot = new_eff;
327 static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
329 return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
330 ((prot1 | prot2) & _PAGE_NX);
333 static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
334 pgprotval_t eff_in, unsigned long P)
338 pgprotval_t prot, eff;
340 start = (pte_t *)pmd_page_vaddr(addr);
341 for (i = 0; i < PTRS_PER_PTE; i++) {
342 prot = pte_flags(*start);
343 eff = effective_prot(eff_in, prot);
344 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
345 note_page(m, st, __pgprot(prot), eff, 5);
352 * This is an optimization for KASAN=y case. Since all kasan page tables
353 * eventually point to the kasan_zero_page we could call note_page()
354 * right away without walking through lower level page tables. This saves
355 * us dozens of seconds (minutes for 5-level config) while checking for
356 * W+X mapping or reading kernel_page_tables debugfs file.
358 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
361 if (__pa(pt) == __pa(kasan_zero_pmd) ||
362 (pgtable_l5_enabled && __pa(pt) == __pa(kasan_zero_p4d)) ||
363 __pa(pt) == __pa(kasan_zero_pud)) {
364 pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
365 note_page(m, st, __pgprot(prot), 0, 5);
371 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
380 static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
381 pgprotval_t eff_in, unsigned long P)
384 pmd_t *start, *pmd_start;
385 pgprotval_t prot, eff;
387 pmd_start = start = (pmd_t *)pud_page_vaddr(addr);
388 for (i = 0; i < PTRS_PER_PMD; i++) {
389 st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
390 if (!pmd_none(*start)) {
391 prot = pmd_flags(*start);
392 eff = effective_prot(eff_in, prot);
393 if (pmd_large(*start) || !pmd_present(*start)) {
394 note_page(m, st, __pgprot(prot), eff, 4);
395 } else if (!kasan_page_table(m, st, pmd_start)) {
396 walk_pte_level(m, st, *start, eff,
397 P + i * PMD_LEVEL_MULT);
400 note_page(m, st, __pgprot(0), 0, 4);
406 #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
407 #define pud_large(a) pmd_large(__pmd(pud_val(a)))
408 #define pud_none(a) pmd_none(__pmd(pud_val(a)))
413 static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
414 pgprotval_t eff_in, unsigned long P)
417 pud_t *start, *pud_start;
418 pgprotval_t prot, eff;
419 pud_t *prev_pud = NULL;
421 pud_start = start = (pud_t *)p4d_page_vaddr(addr);
423 for (i = 0; i < PTRS_PER_PUD; i++) {
424 st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
425 if (!pud_none(*start)) {
426 prot = pud_flags(*start);
427 eff = effective_prot(eff_in, prot);
428 if (pud_large(*start) || !pud_present(*start)) {
429 note_page(m, st, __pgprot(prot), eff, 3);
430 } else if (!kasan_page_table(m, st, pud_start)) {
431 walk_pmd_level(m, st, *start, eff,
432 P + i * PUD_LEVEL_MULT);
435 note_page(m, st, __pgprot(0), 0, 3);
443 #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
444 #define p4d_large(a) pud_large(__pud(p4d_val(a)))
445 #define p4d_none(a) pud_none(__pud(p4d_val(a)))
448 static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
449 pgprotval_t eff_in, unsigned long P)
452 p4d_t *start, *p4d_start;
453 pgprotval_t prot, eff;
455 if (PTRS_PER_P4D == 1)
456 return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P);
458 p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
460 for (i = 0; i < PTRS_PER_P4D; i++) {
461 st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
462 if (!p4d_none(*start)) {
463 prot = p4d_flags(*start);
464 eff = effective_prot(eff_in, prot);
465 if (p4d_large(*start) || !p4d_present(*start)) {
466 note_page(m, st, __pgprot(prot), eff, 2);
467 } else if (!kasan_page_table(m, st, p4d_start)) {
468 walk_pud_level(m, st, *start, eff,
469 P + i * P4D_LEVEL_MULT);
472 note_page(m, st, __pgprot(0), 0, 2);
478 #define pgd_large(a) (pgtable_l5_enabled ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
479 #define pgd_none(a) (pgtable_l5_enabled ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
481 static inline bool is_hypervisor_range(int idx)
485 * ffff800000000000 - ffff87ffffffffff is reserved for
488 return (idx >= pgd_index(__PAGE_OFFSET) - 16) &&
489 (idx < pgd_index(__PAGE_OFFSET));
495 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
496 bool checkwx, bool dmesg)
499 pgd_t *start = (pgd_t *) &init_top_pgt;
501 pgd_t *start = swapper_pg_dir;
503 pgprotval_t prot, eff;
505 struct pg_state st = {};
512 st.check_wx = checkwx;
516 for (i = 0; i < PTRS_PER_PGD; i++) {
517 st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
518 if (!pgd_none(*start) && !is_hypervisor_range(i)) {
519 prot = pgd_flags(*start);
520 #ifdef CONFIG_X86_PAE
521 eff = _PAGE_USER | _PAGE_RW;
525 if (pgd_large(*start) || !pgd_present(*start)) {
526 note_page(m, &st, __pgprot(prot), eff, 1);
528 walk_p4d_level(m, &st, *start, eff,
532 note_page(m, &st, __pgprot(0), 0, 1);
538 /* Flush out the last page */
539 st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
540 note_page(m, &st, __pgprot(0), 0, 0);
544 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
547 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
550 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
552 ptdump_walk_pgd_level_core(m, pgd, false, true);
555 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
557 #ifdef CONFIG_PAGE_TABLE_ISOLATION
558 if (user && static_cpu_has(X86_FEATURE_PTI))
559 pgd = kernel_to_user_pgdp(pgd);
561 ptdump_walk_pgd_level_core(m, pgd, false, false);
563 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
565 static void ptdump_walk_user_pgd_level_checkwx(void)
567 #ifdef CONFIG_PAGE_TABLE_ISOLATION
568 pgd_t *pgd = (pgd_t *) &init_top_pgt;
570 if (!static_cpu_has(X86_FEATURE_PTI))
573 pr_info("x86/mm: Checking user space page tables\n");
574 pgd = kernel_to_user_pgdp(pgd);
575 ptdump_walk_pgd_level_core(NULL, pgd, true, false);
579 void ptdump_walk_pgd_level_checkwx(void)
581 ptdump_walk_pgd_level_core(NULL, NULL, true, false);
582 ptdump_walk_user_pgd_level_checkwx();
585 static int __init pt_dump_init(void)
588 * Various markers are not compile-time constants, so assign them
592 address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
593 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
594 address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
595 #ifdef CONFIG_MODIFY_LDT_SYSCALL
596 address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
599 address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
600 address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
604 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
605 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
606 # ifdef CONFIG_HIGHMEM
607 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
609 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
610 address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
614 __initcall(pt_dump_init);