Merge remote-tracking branch 'torvalds/master' into perf/core
[linux-2.6-microblaze.git] / arch / riscv / mm / kasan_init.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
3
4 #include <linux/pfn.h>
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
13
14 extern pgd_t early_pg_dir[PTRS_PER_PGD];
15 asmlinkage void __init kasan_early_init(void)
16 {
17         uintptr_t i;
18         pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
19
20         for (i = 0; i < PTRS_PER_PTE; ++i)
21                 set_pte(kasan_early_shadow_pte + i,
22                         mk_pte(virt_to_page(kasan_early_shadow_page),
23                                PAGE_KERNEL));
24
25         for (i = 0; i < PTRS_PER_PMD; ++i)
26                 set_pmd(kasan_early_shadow_pmd + i,
27                         pfn_pmd(PFN_DOWN
28                                 (__pa((uintptr_t) kasan_early_shadow_pte)),
29                                 __pgprot(_PAGE_TABLE)));
30
31         for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
32              i += PGDIR_SIZE, ++pgd)
33                 set_pgd(pgd,
34                         pfn_pgd(PFN_DOWN
35                                 (__pa(((uintptr_t) kasan_early_shadow_pmd))),
36                                 __pgprot(_PAGE_TABLE)));
37
38         /* init for swapper_pg_dir */
39         pgd = pgd_offset_k(KASAN_SHADOW_START);
40
41         for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
42              i += PGDIR_SIZE, ++pgd)
43                 set_pgd(pgd,
44                         pfn_pgd(PFN_DOWN
45                                 (__pa(((uintptr_t) kasan_early_shadow_pmd))),
46                                 __pgprot(_PAGE_TABLE)));
47
48         local_flush_tlb_all();
49 }
50
51 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
52 {
53         phys_addr_t phys_addr;
54         pte_t *ptep, *base_pte;
55
56         if (pmd_none(*pmd))
57                 base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
58         else
59                 base_pte = (pte_t *)pmd_page_vaddr(*pmd);
60
61         ptep = base_pte + pte_index(vaddr);
62
63         do {
64                 if (pte_none(*ptep)) {
65                         phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
66                         set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
67                 }
68         } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
69
70         set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
71 }
72
73 static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
74 {
75         phys_addr_t phys_addr;
76         pmd_t *pmdp, *base_pmd;
77         unsigned long next;
78
79         base_pmd = (pmd_t *)pgd_page_vaddr(*pgd);
80         if (base_pmd == lm_alias(kasan_early_shadow_pmd))
81                 base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
82
83         pmdp = base_pmd + pmd_index(vaddr);
84
85         do {
86                 next = pmd_addr_end(vaddr, end);
87
88                 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
89                         phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
90                         if (phys_addr) {
91                                 set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
92                                 continue;
93                         }
94                 }
95
96                 kasan_populate_pte(pmdp, vaddr, next);
97         } while (pmdp++, vaddr = next, vaddr != end);
98
99         /*
100          * Wait for the whole PGD to be populated before setting the PGD in
101          * the page table, otherwise, if we did set the PGD before populating
102          * it entirely, memblock could allocate a page at a physical address
103          * where KASAN is not populated yet and then we'd get a page fault.
104          */
105         set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
106 }
107
108 static void __init kasan_populate_pgd(unsigned long vaddr, unsigned long end)
109 {
110         phys_addr_t phys_addr;
111         pgd_t *pgdp = pgd_offset_k(vaddr);
112         unsigned long next;
113
114         do {
115                 next = pgd_addr_end(vaddr, end);
116
117                 /*
118                  * pgdp can't be none since kasan_early_init initialized all KASAN
119                  * shadow region with kasan_early_shadow_pmd: if this is stillthe case,
120                  * that means we can try to allocate a hugepage as a replacement.
121                  */
122                 if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
123                     IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
124                         phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
125                         if (phys_addr) {
126                                 set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
127                                 continue;
128                         }
129                 }
130
131                 kasan_populate_pmd(pgdp, vaddr, next);
132         } while (pgdp++, vaddr = next, vaddr != end);
133 }
134
135 static void __init kasan_populate(void *start, void *end)
136 {
137         unsigned long vaddr = (unsigned long)start & PAGE_MASK;
138         unsigned long vend = PAGE_ALIGN((unsigned long)end);
139
140         kasan_populate_pgd(vaddr, vend);
141
142         local_flush_tlb_all();
143         memset(start, KASAN_SHADOW_INIT, end - start);
144 }
145
146 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
147 {
148         unsigned long next;
149         void *p;
150         pgd_t *pgd_k = pgd_offset_k(vaddr);
151
152         do {
153                 next = pgd_addr_end(vaddr, end);
154                 if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
155                         p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
156                         set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
157                 }
158         } while (pgd_k++, vaddr = next, vaddr != end);
159 }
160
161 static void __init kasan_shallow_populate(void *start, void *end)
162 {
163         unsigned long vaddr = (unsigned long)start & PAGE_MASK;
164         unsigned long vend = PAGE_ALIGN((unsigned long)end);
165
166         kasan_shallow_populate_pgd(vaddr, vend);
167         local_flush_tlb_all();
168 }
169
170 void __init kasan_init(void)
171 {
172         phys_addr_t p_start, p_end;
173         u64 i;
174
175         /*
176          * Populate all kernel virtual address space with kasan_early_shadow_page
177          * except for the linear mapping and the modules/kernel/BPF mapping.
178          */
179         kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
180                                     (void *)kasan_mem_to_shadow((void *)
181                                                                 VMEMMAP_END));
182         if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
183                 kasan_shallow_populate(
184                         (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
185                         (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
186         else
187                 kasan_populate_early_shadow(
188                         (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
189                         (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
190
191         /* Populate the linear mapping */
192         for_each_mem_range(i, &p_start, &p_end) {
193                 void *start = (void *)__va(p_start);
194                 void *end = (void *)__va(p_end);
195
196                 if (start >= end)
197                         break;
198
199                 kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
200         }
201
202         /* Populate kernel, BPF, modules mapping */
203         kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
204                        kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
205
206         for (i = 0; i < PTRS_PER_PTE; i++)
207                 set_pte(&kasan_early_shadow_pte[i],
208                         mk_pte(virt_to_page(kasan_early_shadow_page),
209                                __pgprot(_PAGE_PRESENT | _PAGE_READ |
210                                         _PAGE_ACCESSED)));
211
212         memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
213         init_task.kasan_depth = 0;
214 }