Merge tag 'devicetree-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / arm64 / mm / ptdump.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4  * Debug helper to dump the current kernel pagetables of the system
5  * so that we can see what the various memory ranges are set to.
6  *
7  * Derived from x86 and arm implementation:
8  * (C) Copyright 2008 Intel Corporation
9  *
10  * Author: Arjan van de Ven <arjan@linux.intel.com>
11  */
12 #include <linux/debugfs.h>
13 #include <linux/errno.h>
14 #include <linux/fs.h>
15 #include <linux/io.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/ptdump.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
21
22 #include <asm/fixmap.h>
23 #include <asm/kasan.h>
24 #include <asm/memory.h>
25 #include <asm/pgtable-hwdef.h>
26 #include <asm/ptdump.h>
27
28
29 enum address_markers_idx {
30         PAGE_OFFSET_NR = 0,
31         PAGE_END_NR,
32 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
33         KASAN_START_NR,
34 #endif
35 };
36
37 static struct addr_marker address_markers[] = {
38         { PAGE_OFFSET,                  "Linear Mapping start" },
39         { 0 /* PAGE_END */,             "Linear Mapping end" },
40 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
41         { 0 /* KASAN_SHADOW_START */,   "Kasan shadow start" },
42         { KASAN_SHADOW_END,             "Kasan shadow end" },
43 #endif
44         { BPF_JIT_REGION_START,         "BPF start" },
45         { BPF_JIT_REGION_END,           "BPF end" },
46         { MODULES_VADDR,                "Modules start" },
47         { MODULES_END,                  "Modules end" },
48         { VMALLOC_START,                "vmalloc() area" },
49         { VMALLOC_END,                  "vmalloc() end" },
50         { FIXADDR_START,                "Fixmap start" },
51         { FIXADDR_TOP,                  "Fixmap end" },
52         { PCI_IO_START,                 "PCI I/O start" },
53         { PCI_IO_END,                   "PCI I/O end" },
54         { VMEMMAP_START,                "vmemmap start" },
55         { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
56         { -1,                           NULL },
57 };
58
59 #define pt_dump_seq_printf(m, fmt, args...)     \
60 ({                                              \
61         if (m)                                  \
62                 seq_printf(m, fmt, ##args);     \
63 })
64
65 #define pt_dump_seq_puts(m, fmt)        \
66 ({                                      \
67         if (m)                          \
68                 seq_printf(m, fmt);     \
69 })
70
71 /*
72  * The page dumper groups page table entries of the same type into a single
73  * description. It uses pg_state to track the range information while
74  * iterating over the pte entries. When the continuity is broken it then
75  * dumps out a description of the range.
76  */
77 struct pg_state {
78         struct ptdump_state ptdump;
79         struct seq_file *seq;
80         const struct addr_marker *marker;
81         unsigned long start_address;
82         int level;
83         u64 current_prot;
84         bool check_wx;
85         unsigned long wx_pages;
86         unsigned long uxn_pages;
87 };
88
89 struct prot_bits {
90         u64             mask;
91         u64             val;
92         const char      *set;
93         const char      *clear;
94 };
95
96 static const struct prot_bits pte_bits[] = {
97         {
98                 .mask   = PTE_VALID,
99                 .val    = PTE_VALID,
100                 .set    = " ",
101                 .clear  = "F",
102         }, {
103                 .mask   = PTE_USER,
104                 .val    = PTE_USER,
105                 .set    = "USR",
106                 .clear  = "   ",
107         }, {
108                 .mask   = PTE_RDONLY,
109                 .val    = PTE_RDONLY,
110                 .set    = "ro",
111                 .clear  = "RW",
112         }, {
113                 .mask   = PTE_PXN,
114                 .val    = PTE_PXN,
115                 .set    = "NX",
116                 .clear  = "x ",
117         }, {
118                 .mask   = PTE_SHARED,
119                 .val    = PTE_SHARED,
120                 .set    = "SHD",
121                 .clear  = "   ",
122         }, {
123                 .mask   = PTE_AF,
124                 .val    = PTE_AF,
125                 .set    = "AF",
126                 .clear  = "  ",
127         }, {
128                 .mask   = PTE_NG,
129                 .val    = PTE_NG,
130                 .set    = "NG",
131                 .clear  = "  ",
132         }, {
133                 .mask   = PTE_CONT,
134                 .val    = PTE_CONT,
135                 .set    = "CON",
136                 .clear  = "   ",
137         }, {
138                 .mask   = PTE_TABLE_BIT,
139                 .val    = PTE_TABLE_BIT,
140                 .set    = "   ",
141                 .clear  = "BLK",
142         }, {
143                 .mask   = PTE_UXN,
144                 .val    = PTE_UXN,
145                 .set    = "UXN",
146                 .clear  = "   ",
147         }, {
148                 .mask   = PTE_GP,
149                 .val    = PTE_GP,
150                 .set    = "GP",
151                 .clear  = "  ",
152         }, {
153                 .mask   = PTE_ATTRINDX_MASK,
154                 .val    = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
155                 .set    = "DEVICE/nGnRnE",
156         }, {
157                 .mask   = PTE_ATTRINDX_MASK,
158                 .val    = PTE_ATTRINDX(MT_DEVICE_nGnRE),
159                 .set    = "DEVICE/nGnRE",
160         }, {
161                 .mask   = PTE_ATTRINDX_MASK,
162                 .val    = PTE_ATTRINDX(MT_NORMAL_NC),
163                 .set    = "MEM/NORMAL-NC",
164         }, {
165                 .mask   = PTE_ATTRINDX_MASK,
166                 .val    = PTE_ATTRINDX(MT_NORMAL),
167                 .set    = "MEM/NORMAL",
168         }, {
169                 .mask   = PTE_ATTRINDX_MASK,
170                 .val    = PTE_ATTRINDX(MT_NORMAL_TAGGED),
171                 .set    = "MEM/NORMAL-TAGGED",
172         }
173 };
174
175 struct pg_level {
176         const struct prot_bits *bits;
177         const char *name;
178         size_t num;
179         u64 mask;
180 };
181
182 static struct pg_level pg_level[] = {
183         { /* pgd */
184                 .name   = "PGD",
185                 .bits   = pte_bits,
186                 .num    = ARRAY_SIZE(pte_bits),
187         }, { /* p4d */
188                 .name   = "P4D",
189                 .bits   = pte_bits,
190                 .num    = ARRAY_SIZE(pte_bits),
191         }, { /* pud */
192                 .name   = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
193                 .bits   = pte_bits,
194                 .num    = ARRAY_SIZE(pte_bits),
195         }, { /* pmd */
196                 .name   = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
197                 .bits   = pte_bits,
198                 .num    = ARRAY_SIZE(pte_bits),
199         }, { /* pte */
200                 .name   = "PTE",
201                 .bits   = pte_bits,
202                 .num    = ARRAY_SIZE(pte_bits),
203         },
204 };
205
206 static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
207                         size_t num)
208 {
209         unsigned i;
210
211         for (i = 0; i < num; i++, bits++) {
212                 const char *s;
213
214                 if ((st->current_prot & bits->mask) == bits->val)
215                         s = bits->set;
216                 else
217                         s = bits->clear;
218
219                 if (s)
220                         pt_dump_seq_printf(st->seq, " %s", s);
221         }
222 }
223
224 static void note_prot_uxn(struct pg_state *st, unsigned long addr)
225 {
226         if (!st->check_wx)
227                 return;
228
229         if ((st->current_prot & PTE_UXN) == PTE_UXN)
230                 return;
231
232         WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n",
233                   (void *)st->start_address, (void *)st->start_address);
234
235         st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
236 }
237
238 static void note_prot_wx(struct pg_state *st, unsigned long addr)
239 {
240         if (!st->check_wx)
241                 return;
242         if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY)
243                 return;
244         if ((st->current_prot & PTE_PXN) == PTE_PXN)
245                 return;
246
247         WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n",
248                   (void *)st->start_address, (void *)st->start_address);
249
250         st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
251 }
252
253 static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
254                       u64 val)
255 {
256         struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
257         static const char units[] = "KMGTPE";
258         u64 prot = 0;
259
260         if (level >= 0)
261                 prot = val & pg_level[level].mask;
262
263         if (st->level == -1) {
264                 st->level = level;
265                 st->current_prot = prot;
266                 st->start_address = addr;
267                 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
268         } else if (prot != st->current_prot || level != st->level ||
269                    addr >= st->marker[1].start_address) {
270                 const char *unit = units;
271                 unsigned long delta;
272
273                 if (st->current_prot) {
274                         note_prot_uxn(st, addr);
275                         note_prot_wx(st, addr);
276                 }
277
278                 pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx   ",
279                                    st->start_address, addr);
280
281                 delta = (addr - st->start_address) >> 10;
282                 while (!(delta & 1023) && unit[1]) {
283                         delta >>= 10;
284                         unit++;
285                 }
286                 pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
287                                    pg_level[st->level].name);
288                 if (st->current_prot && pg_level[st->level].bits)
289                         dump_prot(st, pg_level[st->level].bits,
290                                   pg_level[st->level].num);
291                 pt_dump_seq_puts(st->seq, "\n");
292
293                 if (addr >= st->marker[1].start_address) {
294                         st->marker++;
295                         pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
296                 }
297
298                 st->start_address = addr;
299                 st->current_prot = prot;
300                 st->level = level;
301         }
302
303         if (addr >= st->marker[1].start_address) {
304                 st->marker++;
305                 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
306         }
307
308 }
309
310 void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
311 {
312         unsigned long end = ~0UL;
313         struct pg_state st;
314
315         if (info->base_addr < TASK_SIZE_64)
316                 end = TASK_SIZE_64;
317
318         st = (struct pg_state){
319                 .seq = s,
320                 .marker = info->markers,
321                 .level = -1,
322                 .ptdump = {
323                         .note_page = note_page,
324                         .range = (struct ptdump_range[]){
325                                 {info->base_addr, end},
326                                 {0, 0}
327                         }
328                 }
329         };
330
331         ptdump_walk_pgd(&st.ptdump, info->mm, NULL);
332 }
333
334 static void __init ptdump_initialize(void)
335 {
336         unsigned i, j;
337
338         for (i = 0; i < ARRAY_SIZE(pg_level); i++)
339                 if (pg_level[i].bits)
340                         for (j = 0; j < pg_level[i].num; j++)
341                                 pg_level[i].mask |= pg_level[i].bits[j].mask;
342 }
343
344 static struct ptdump_info kernel_ptdump_info = {
345         .mm             = &init_mm,
346         .markers        = address_markers,
347         .base_addr      = PAGE_OFFSET,
348 };
349
350 void ptdump_check_wx(void)
351 {
352         struct pg_state st = {
353                 .seq = NULL,
354                 .marker = (struct addr_marker[]) {
355                         { 0, NULL},
356                         { -1, NULL},
357                 },
358                 .level = -1,
359                 .check_wx = true,
360                 .ptdump = {
361                         .note_page = note_page,
362                         .range = (struct ptdump_range[]) {
363                                 {PAGE_OFFSET, ~0UL},
364                                 {0, 0}
365                         }
366                 }
367         };
368
369         ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
370
371         if (st.wx_pages || st.uxn_pages)
372                 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
373                         st.wx_pages, st.uxn_pages);
374         else
375                 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
376 }
377
378 static int __init ptdump_init(void)
379 {
380         address_markers[PAGE_END_NR].start_address = PAGE_END;
381 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
382         address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
383 #endif
384         ptdump_initialize();
385         ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
386         return 0;
387 }
388 device_initcall(ptdump_init);