Merge tag 'nfsd-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux
[linux-2.6-microblaze.git] / arch / arm64 / mm / ptdump.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4  * Debug helper to dump the current kernel pagetables of the system
5  * so that we can see what the various memory ranges are set to.
6  *
7  * Derived from x86 and arm implementation:
8  * (C) Copyright 2008 Intel Corporation
9  *
10  * Author: Arjan van de Ven <arjan@linux.intel.com>
11  */
12 #include <linux/debugfs.h>
13 #include <linux/errno.h>
14 #include <linux/fs.h>
15 #include <linux/io.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/ptdump.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
21
22 #include <asm/fixmap.h>
23 #include <asm/kasan.h>
24 #include <asm/memory.h>
25 #include <asm/pgtable-hwdef.h>
26 #include <asm/ptdump.h>
27
28
29 enum address_markers_idx {
30         PAGE_OFFSET_NR = 0,
31         PAGE_END_NR,
32 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
33         KASAN_START_NR,
34 #endif
35 };
36
37 static struct addr_marker address_markers[] = {
38         { PAGE_OFFSET,                  "Linear Mapping start" },
39         { 0 /* PAGE_END */,             "Linear Mapping end" },
40 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
41         { 0 /* KASAN_SHADOW_START */,   "Kasan shadow start" },
42         { KASAN_SHADOW_END,             "Kasan shadow end" },
43 #endif
44         { MODULES_VADDR,                "Modules start" },
45         { MODULES_END,                  "Modules end" },
46         { VMALLOC_START,                "vmalloc() area" },
47         { VMALLOC_END,                  "vmalloc() end" },
48         { FIXADDR_TOT_START,            "Fixmap start" },
49         { FIXADDR_TOP,                  "Fixmap end" },
50         { PCI_IO_START,                 "PCI I/O start" },
51         { PCI_IO_END,                   "PCI I/O end" },
52         { VMEMMAP_START,                "vmemmap start" },
53         { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
54         { -1,                           NULL },
55 };
56
57 #define pt_dump_seq_printf(m, fmt, args...)     \
58 ({                                              \
59         if (m)                                  \
60                 seq_printf(m, fmt, ##args);     \
61 })
62
63 #define pt_dump_seq_puts(m, fmt)        \
64 ({                                      \
65         if (m)                          \
66                 seq_printf(m, fmt);     \
67 })
68
69 /*
70  * The page dumper groups page table entries of the same type into a single
71  * description. It uses pg_state to track the range information while
72  * iterating over the pte entries. When the continuity is broken it then
73  * dumps out a description of the range.
74  */
75 struct pg_state {
76         struct ptdump_state ptdump;
77         struct seq_file *seq;
78         const struct addr_marker *marker;
79         unsigned long start_address;
80         int level;
81         u64 current_prot;
82         bool check_wx;
83         unsigned long wx_pages;
84         unsigned long uxn_pages;
85 };
86
87 struct prot_bits {
88         u64             mask;
89         u64             val;
90         const char      *set;
91         const char      *clear;
92 };
93
94 static const struct prot_bits pte_bits[] = {
95         {
96                 .mask   = PTE_VALID,
97                 .val    = PTE_VALID,
98                 .set    = " ",
99                 .clear  = "F",
100         }, {
101                 .mask   = PTE_USER,
102                 .val    = PTE_USER,
103                 .set    = "USR",
104                 .clear  = "   ",
105         }, {
106                 .mask   = PTE_RDONLY,
107                 .val    = PTE_RDONLY,
108                 .set    = "ro",
109                 .clear  = "RW",
110         }, {
111                 .mask   = PTE_PXN,
112                 .val    = PTE_PXN,
113                 .set    = "NX",
114                 .clear  = "x ",
115         }, {
116                 .mask   = PTE_SHARED,
117                 .val    = PTE_SHARED,
118                 .set    = "SHD",
119                 .clear  = "   ",
120         }, {
121                 .mask   = PTE_AF,
122                 .val    = PTE_AF,
123                 .set    = "AF",
124                 .clear  = "  ",
125         }, {
126                 .mask   = PTE_NG,
127                 .val    = PTE_NG,
128                 .set    = "NG",
129                 .clear  = "  ",
130         }, {
131                 .mask   = PTE_CONT,
132                 .val    = PTE_CONT,
133                 .set    = "CON",
134                 .clear  = "   ",
135         }, {
136                 .mask   = PTE_TABLE_BIT,
137                 .val    = PTE_TABLE_BIT,
138                 .set    = "   ",
139                 .clear  = "BLK",
140         }, {
141                 .mask   = PTE_UXN,
142                 .val    = PTE_UXN,
143                 .set    = "UXN",
144                 .clear  = "   ",
145         }, {
146                 .mask   = PTE_GP,
147                 .val    = PTE_GP,
148                 .set    = "GP",
149                 .clear  = "  ",
150         }, {
151                 .mask   = PTE_ATTRINDX_MASK,
152                 .val    = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
153                 .set    = "DEVICE/nGnRnE",
154         }, {
155                 .mask   = PTE_ATTRINDX_MASK,
156                 .val    = PTE_ATTRINDX(MT_DEVICE_nGnRE),
157                 .set    = "DEVICE/nGnRE",
158         }, {
159                 .mask   = PTE_ATTRINDX_MASK,
160                 .val    = PTE_ATTRINDX(MT_NORMAL_NC),
161                 .set    = "MEM/NORMAL-NC",
162         }, {
163                 .mask   = PTE_ATTRINDX_MASK,
164                 .val    = PTE_ATTRINDX(MT_NORMAL),
165                 .set    = "MEM/NORMAL",
166         }, {
167                 .mask   = PTE_ATTRINDX_MASK,
168                 .val    = PTE_ATTRINDX(MT_NORMAL_TAGGED),
169                 .set    = "MEM/NORMAL-TAGGED",
170         }
171 };
172
173 struct pg_level {
174         const struct prot_bits *bits;
175         const char *name;
176         size_t num;
177         u64 mask;
178 };
179
180 static struct pg_level pg_level[] = {
181         { /* pgd */
182                 .name   = "PGD",
183                 .bits   = pte_bits,
184                 .num    = ARRAY_SIZE(pte_bits),
185         }, { /* p4d */
186                 .name   = "P4D",
187                 .bits   = pte_bits,
188                 .num    = ARRAY_SIZE(pte_bits),
189         }, { /* pud */
190                 .name   = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
191                 .bits   = pte_bits,
192                 .num    = ARRAY_SIZE(pte_bits),
193         }, { /* pmd */
194                 .name   = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
195                 .bits   = pte_bits,
196                 .num    = ARRAY_SIZE(pte_bits),
197         }, { /* pte */
198                 .name   = "PTE",
199                 .bits   = pte_bits,
200                 .num    = ARRAY_SIZE(pte_bits),
201         },
202 };
203
204 static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
205                         size_t num)
206 {
207         unsigned i;
208
209         for (i = 0; i < num; i++, bits++) {
210                 const char *s;
211
212                 if ((st->current_prot & bits->mask) == bits->val)
213                         s = bits->set;
214                 else
215                         s = bits->clear;
216
217                 if (s)
218                         pt_dump_seq_printf(st->seq, " %s", s);
219         }
220 }
221
222 static void note_prot_uxn(struct pg_state *st, unsigned long addr)
223 {
224         if (!st->check_wx)
225                 return;
226
227         if ((st->current_prot & PTE_UXN) == PTE_UXN)
228                 return;
229
230         WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n",
231                   (void *)st->start_address, (void *)st->start_address);
232
233         st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
234 }
235
236 static void note_prot_wx(struct pg_state *st, unsigned long addr)
237 {
238         if (!st->check_wx)
239                 return;
240         if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY)
241                 return;
242         if ((st->current_prot & PTE_PXN) == PTE_PXN)
243                 return;
244
245         WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n",
246                   (void *)st->start_address, (void *)st->start_address);
247
248         st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
249 }
250
251 static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
252                       u64 val)
253 {
254         struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
255         static const char units[] = "KMGTPE";
256         u64 prot = 0;
257
258         if (level >= 0)
259                 prot = val & pg_level[level].mask;
260
261         if (st->level == -1) {
262                 st->level = level;
263                 st->current_prot = prot;
264                 st->start_address = addr;
265                 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
266         } else if (prot != st->current_prot || level != st->level ||
267                    addr >= st->marker[1].start_address) {
268                 const char *unit = units;
269                 unsigned long delta;
270
271                 if (st->current_prot) {
272                         note_prot_uxn(st, addr);
273                         note_prot_wx(st, addr);
274                 }
275
276                 pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx   ",
277                                    st->start_address, addr);
278
279                 delta = (addr - st->start_address) >> 10;
280                 while (!(delta & 1023) && unit[1]) {
281                         delta >>= 10;
282                         unit++;
283                 }
284                 pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
285                                    pg_level[st->level].name);
286                 if (st->current_prot && pg_level[st->level].bits)
287                         dump_prot(st, pg_level[st->level].bits,
288                                   pg_level[st->level].num);
289                 pt_dump_seq_puts(st->seq, "\n");
290
291                 if (addr >= st->marker[1].start_address) {
292                         st->marker++;
293                         pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
294                 }
295
296                 st->start_address = addr;
297                 st->current_prot = prot;
298                 st->level = level;
299         }
300
301         if (addr >= st->marker[1].start_address) {
302                 st->marker++;
303                 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
304         }
305
306 }
307
308 void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
309 {
310         unsigned long end = ~0UL;
311         struct pg_state st;
312
313         if (info->base_addr < TASK_SIZE_64)
314                 end = TASK_SIZE_64;
315
316         st = (struct pg_state){
317                 .seq = s,
318                 .marker = info->markers,
319                 .level = -1,
320                 .ptdump = {
321                         .note_page = note_page,
322                         .range = (struct ptdump_range[]){
323                                 {info->base_addr, end},
324                                 {0, 0}
325                         }
326                 }
327         };
328
329         ptdump_walk_pgd(&st.ptdump, info->mm, NULL);
330 }
331
332 static void __init ptdump_initialize(void)
333 {
334         unsigned i, j;
335
336         for (i = 0; i < ARRAY_SIZE(pg_level); i++)
337                 if (pg_level[i].bits)
338                         for (j = 0; j < pg_level[i].num; j++)
339                                 pg_level[i].mask |= pg_level[i].bits[j].mask;
340 }
341
342 static struct ptdump_info kernel_ptdump_info = {
343         .mm             = &init_mm,
344         .markers        = address_markers,
345         .base_addr      = PAGE_OFFSET,
346 };
347
348 void ptdump_check_wx(void)
349 {
350         struct pg_state st = {
351                 .seq = NULL,
352                 .marker = (struct addr_marker[]) {
353                         { 0, NULL},
354                         { -1, NULL},
355                 },
356                 .level = -1,
357                 .check_wx = true,
358                 .ptdump = {
359                         .note_page = note_page,
360                         .range = (struct ptdump_range[]) {
361                                 {PAGE_OFFSET, ~0UL},
362                                 {0, 0}
363                         }
364                 }
365         };
366
367         ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
368
369         if (st.wx_pages || st.uxn_pages)
370                 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
371                         st.wx_pages, st.uxn_pages);
372         else
373                 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
374 }
375
376 static int __init ptdump_init(void)
377 {
378         address_markers[PAGE_END_NR].start_address = PAGE_END;
379 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
380         address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
381 #endif
382         ptdump_initialize();
383         ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
384         return 0;
385 }
386 device_initcall(ptdump_init);