clk: mediatek: using CLK_MUX_ROUND_CLOSEST for the clock of dpi1_sel
[linux-2.6-microblaze.git] / arch / arm64 / mm / dump.c
1 /*
2  * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3  * Debug helper to dump the current kernel pagetables of the system
4  * so that we can see what the various memory ranges are set to.
5  *
6  * Derived from x86 and arm implementation:
7  * (C) Copyright 2008 Intel Corporation
8  *
9  * Author: Arjan van de Ven <arjan@linux.intel.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; version 2
14  * of the License.
15  */
16 #include <linux/debugfs.h>
17 #include <linux/errno.h>
18 #include <linux/fs.h>
19 #include <linux/io.h>
20 #include <linux/init.h>
21 #include <linux/mm.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24
25 #include <asm/fixmap.h>
26 #include <asm/kasan.h>
27 #include <asm/memory.h>
28 #include <asm/pgtable.h>
29 #include <asm/pgtable-hwdef.h>
30 #include <asm/ptdump.h>
31
32 static const struct addr_marker address_markers[] = {
33 #ifdef CONFIG_KASAN
34         { KASAN_SHADOW_START,           "Kasan shadow start" },
35         { KASAN_SHADOW_END,             "Kasan shadow end" },
36 #endif
37         { MODULES_VADDR,                "Modules start" },
38         { MODULES_END,                  "Modules end" },
39         { VMALLOC_START,                "vmalloc() area" },
40         { VMALLOC_END,                  "vmalloc() end" },
41         { FIXADDR_START,                "Fixmap start" },
42         { FIXADDR_TOP,                  "Fixmap end" },
43         { PCI_IO_START,                 "PCI I/O start" },
44         { PCI_IO_END,                   "PCI I/O end" },
45 #ifdef CONFIG_SPARSEMEM_VMEMMAP
46         { VMEMMAP_START,                "vmemmap start" },
47         { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
48 #endif
49         { PAGE_OFFSET,                  "Linear mapping" },
50         { -1,                           NULL },
51 };
52
53 #define pt_dump_seq_printf(m, fmt, args...)     \
54 ({                                              \
55         if (m)                                  \
56                 seq_printf(m, fmt, ##args);     \
57 })
58
59 #define pt_dump_seq_puts(m, fmt)        \
60 ({                                      \
61         if (m)                          \
62                 seq_printf(m, fmt);     \
63 })
64
65 /*
66  * The page dumper groups page table entries of the same type into a single
67  * description. It uses pg_state to track the range information while
68  * iterating over the pte entries. When the continuity is broken it then
69  * dumps out a description of the range.
70  */
71 struct pg_state {
72         struct seq_file *seq;
73         const struct addr_marker *marker;
74         unsigned long start_address;
75         unsigned level;
76         u64 current_prot;
77         bool check_wx;
78         unsigned long wx_pages;
79         unsigned long uxn_pages;
80 };
81
82 struct prot_bits {
83         u64             mask;
84         u64             val;
85         const char      *set;
86         const char      *clear;
87 };
88
89 static const struct prot_bits pte_bits[] = {
90         {
91                 .mask   = PTE_VALID,
92                 .val    = PTE_VALID,
93                 .set    = " ",
94                 .clear  = "F",
95         }, {
96                 .mask   = PTE_USER,
97                 .val    = PTE_USER,
98                 .set    = "USR",
99                 .clear  = "   ",
100         }, {
101                 .mask   = PTE_RDONLY,
102                 .val    = PTE_RDONLY,
103                 .set    = "ro",
104                 .clear  = "RW",
105         }, {
106                 .mask   = PTE_PXN,
107                 .val    = PTE_PXN,
108                 .set    = "NX",
109                 .clear  = "x ",
110         }, {
111                 .mask   = PTE_SHARED,
112                 .val    = PTE_SHARED,
113                 .set    = "SHD",
114                 .clear  = "   ",
115         }, {
116                 .mask   = PTE_AF,
117                 .val    = PTE_AF,
118                 .set    = "AF",
119                 .clear  = "  ",
120         }, {
121                 .mask   = PTE_NG,
122                 .val    = PTE_NG,
123                 .set    = "NG",
124                 .clear  = "  ",
125         }, {
126                 .mask   = PTE_CONT,
127                 .val    = PTE_CONT,
128                 .set    = "CON",
129                 .clear  = "   ",
130         }, {
131                 .mask   = PTE_TABLE_BIT,
132                 .val    = PTE_TABLE_BIT,
133                 .set    = "   ",
134                 .clear  = "BLK",
135         }, {
136                 .mask   = PTE_UXN,
137                 .val    = PTE_UXN,
138                 .set    = "UXN",
139         }, {
140                 .mask   = PTE_ATTRINDX_MASK,
141                 .val    = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
142                 .set    = "DEVICE/nGnRnE",
143         }, {
144                 .mask   = PTE_ATTRINDX_MASK,
145                 .val    = PTE_ATTRINDX(MT_DEVICE_nGnRE),
146                 .set    = "DEVICE/nGnRE",
147         }, {
148                 .mask   = PTE_ATTRINDX_MASK,
149                 .val    = PTE_ATTRINDX(MT_DEVICE_GRE),
150                 .set    = "DEVICE/GRE",
151         }, {
152                 .mask   = PTE_ATTRINDX_MASK,
153                 .val    = PTE_ATTRINDX(MT_NORMAL_NC),
154                 .set    = "MEM/NORMAL-NC",
155         }, {
156                 .mask   = PTE_ATTRINDX_MASK,
157                 .val    = PTE_ATTRINDX(MT_NORMAL),
158                 .set    = "MEM/NORMAL",
159         }
160 };
161
162 struct pg_level {
163         const struct prot_bits *bits;
164         const char *name;
165         size_t num;
166         u64 mask;
167 };
168
169 static struct pg_level pg_level[] = {
170         {
171         }, { /* pgd */
172                 .name   = "PGD",
173                 .bits   = pte_bits,
174                 .num    = ARRAY_SIZE(pte_bits),
175         }, { /* pud */
176                 .name   = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
177                 .bits   = pte_bits,
178                 .num    = ARRAY_SIZE(pte_bits),
179         }, { /* pmd */
180                 .name   = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
181                 .bits   = pte_bits,
182                 .num    = ARRAY_SIZE(pte_bits),
183         }, { /* pte */
184                 .name   = "PTE",
185                 .bits   = pte_bits,
186                 .num    = ARRAY_SIZE(pte_bits),
187         },
188 };
189
190 static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
191                         size_t num)
192 {
193         unsigned i;
194
195         for (i = 0; i < num; i++, bits++) {
196                 const char *s;
197
198                 if ((st->current_prot & bits->mask) == bits->val)
199                         s = bits->set;
200                 else
201                         s = bits->clear;
202
203                 if (s)
204                         pt_dump_seq_printf(st->seq, " %s", s);
205         }
206 }
207
208 static void note_prot_uxn(struct pg_state *st, unsigned long addr)
209 {
210         if (!st->check_wx)
211                 return;
212
213         if ((st->current_prot & PTE_UXN) == PTE_UXN)
214                 return;
215
216         WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n",
217                   (void *)st->start_address, (void *)st->start_address);
218
219         st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
220 }
221
222 static void note_prot_wx(struct pg_state *st, unsigned long addr)
223 {
224         if (!st->check_wx)
225                 return;
226         if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY)
227                 return;
228         if ((st->current_prot & PTE_PXN) == PTE_PXN)
229                 return;
230
231         WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n",
232                   (void *)st->start_address, (void *)st->start_address);
233
234         st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
235 }
236
237 static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
238                                 u64 val)
239 {
240         static const char units[] = "KMGTPE";
241         u64 prot = val & pg_level[level].mask;
242
243         if (!st->level) {
244                 st->level = level;
245                 st->current_prot = prot;
246                 st->start_address = addr;
247                 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
248         } else if (prot != st->current_prot || level != st->level ||
249                    addr >= st->marker[1].start_address) {
250                 const char *unit = units;
251                 unsigned long delta;
252
253                 if (st->current_prot) {
254                         note_prot_uxn(st, addr);
255                         note_prot_wx(st, addr);
256                         pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx   ",
257                                    st->start_address, addr);
258
259                         delta = (addr - st->start_address) >> 10;
260                         while (!(delta & 1023) && unit[1]) {
261                                 delta >>= 10;
262                                 unit++;
263                         }
264                         pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
265                                    pg_level[st->level].name);
266                         if (pg_level[st->level].bits)
267                                 dump_prot(st, pg_level[st->level].bits,
268                                           pg_level[st->level].num);
269                         pt_dump_seq_puts(st->seq, "\n");
270                 }
271
272                 if (addr >= st->marker[1].start_address) {
273                         st->marker++;
274                         pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
275                 }
276
277                 st->start_address = addr;
278                 st->current_prot = prot;
279                 st->level = level;
280         }
281
282         if (addr >= st->marker[1].start_address) {
283                 st->marker++;
284                 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
285         }
286
287 }
288
289 static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start)
290 {
291         pte_t *ptep = pte_offset_kernel(pmdp, 0UL);
292         unsigned long addr;
293         unsigned i;
294
295         for (i = 0; i < PTRS_PER_PTE; i++, ptep++) {
296                 addr = start + i * PAGE_SIZE;
297                 note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
298         }
299 }
300
301 static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start)
302 {
303         pmd_t *pmdp = pmd_offset(pudp, 0UL);
304         unsigned long addr;
305         unsigned i;
306
307         for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
308                 pmd_t pmd = READ_ONCE(*pmdp);
309
310                 addr = start + i * PMD_SIZE;
311                 if (pmd_none(pmd) || pmd_sect(pmd)) {
312                         note_page(st, addr, 3, pmd_val(pmd));
313                 } else {
314                         BUG_ON(pmd_bad(pmd));
315                         walk_pte(st, pmdp, addr);
316                 }
317         }
318 }
319
320 static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start)
321 {
322         pud_t *pudp = pud_offset(pgdp, 0UL);
323         unsigned long addr;
324         unsigned i;
325
326         for (i = 0; i < PTRS_PER_PUD; i++, pudp++) {
327                 pud_t pud = READ_ONCE(*pudp);
328
329                 addr = start + i * PUD_SIZE;
330                 if (pud_none(pud) || pud_sect(pud)) {
331                         note_page(st, addr, 2, pud_val(pud));
332                 } else {
333                         BUG_ON(pud_bad(pud));
334                         walk_pmd(st, pudp, addr);
335                 }
336         }
337 }
338
339 static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
340                      unsigned long start)
341 {
342         pgd_t *pgdp = pgd_offset(mm, 0UL);
343         unsigned i;
344         unsigned long addr;
345
346         for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) {
347                 pgd_t pgd = READ_ONCE(*pgdp);
348
349                 addr = start + i * PGDIR_SIZE;
350                 if (pgd_none(pgd)) {
351                         note_page(st, addr, 1, pgd_val(pgd));
352                 } else {
353                         BUG_ON(pgd_bad(pgd));
354                         walk_pud(st, pgdp, addr);
355                 }
356         }
357 }
358
359 void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
360 {
361         struct pg_state st = {
362                 .seq = m,
363                 .marker = info->markers,
364         };
365
366         walk_pgd(&st, info->mm, info->base_addr);
367
368         note_page(&st, 0, 0, 0);
369 }
370
371 static void ptdump_initialize(void)
372 {
373         unsigned i, j;
374
375         for (i = 0; i < ARRAY_SIZE(pg_level); i++)
376                 if (pg_level[i].bits)
377                         for (j = 0; j < pg_level[i].num; j++)
378                                 pg_level[i].mask |= pg_level[i].bits[j].mask;
379 }
380
381 static struct ptdump_info kernel_ptdump_info = {
382         .mm             = &init_mm,
383         .markers        = address_markers,
384         .base_addr      = VA_START,
385 };
386
387 void ptdump_check_wx(void)
388 {
389         struct pg_state st = {
390                 .seq = NULL,
391                 .marker = (struct addr_marker[]) {
392                         { 0, NULL},
393                         { -1, NULL},
394                 },
395                 .check_wx = true,
396         };
397
398         walk_pgd(&st, &init_mm, VA_START);
399         note_page(&st, 0, 0, 0);
400         if (st.wx_pages || st.uxn_pages)
401                 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
402                         st.wx_pages, st.uxn_pages);
403         else
404                 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
405 }
406
407 static int ptdump_init(void)
408 {
409         ptdump_initialize();
410         return ptdump_debugfs_register(&kernel_ptdump_info,
411                                         "kernel_page_tables");
412 }
413 device_initcall(ptdump_init);