Merge tag 'arm-dt-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / arch / arm64 / mm / kasan_init.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contains kasan initialization code for ARM64.
4  *
5  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  */
8
9 #define pr_fmt(fmt) "kasan: " fmt
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
12 #include <linux/sched/task.h>
13 #include <linux/memblock.h>
14 #include <linux/start_kernel.h>
15 #include <linux/mm.h>
16
17 #include <asm/mmu_context.h>
18 #include <asm/kernel-pgtable.h>
19 #include <asm/page.h>
20 #include <asm/pgalloc.h>
21 #include <asm/sections.h>
22 #include <asm/tlbflush.h>
23
24 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
25
26 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
27
28 /*
29  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
30  * directly on kernel symbols (bm_p*d). All the early functions are called too
31  * early to use lm_alias so __p*d_populate functions must be used to populate
32  * with the physical address from __pa_symbol.
33  */
34
35 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
36 {
37         void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
38                                               __pa(MAX_DMA_ADDRESS),
39                                               MEMBLOCK_ALLOC_NOLEAKTRACE, node);
40         if (!p)
41                 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
42                       __func__, PAGE_SIZE, PAGE_SIZE, node,
43                       __pa(MAX_DMA_ADDRESS));
44
45         return __pa(p);
46 }
47
48 static phys_addr_t __init kasan_alloc_raw_page(int node)
49 {
50         void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
51                                                 __pa(MAX_DMA_ADDRESS),
52                                                 MEMBLOCK_ALLOC_NOLEAKTRACE,
53                                                 node);
54         if (!p)
55                 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
56                       __func__, PAGE_SIZE, PAGE_SIZE, node,
57                       __pa(MAX_DMA_ADDRESS));
58
59         return __pa(p);
60 }
61
62 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
63                                       bool early)
64 {
65         if (pmd_none(READ_ONCE(*pmdp))) {
66                 phys_addr_t pte_phys = early ?
67                                 __pa_symbol(kasan_early_shadow_pte)
68                                         : kasan_alloc_zeroed_page(node);
69                 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
70         }
71
72         return early ? pte_offset_kimg(pmdp, addr)
73                      : pte_offset_kernel(pmdp, addr);
74 }
75
76 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
77                                       bool early)
78 {
79         if (pud_none(READ_ONCE(*pudp))) {
80                 phys_addr_t pmd_phys = early ?
81                                 __pa_symbol(kasan_early_shadow_pmd)
82                                         : kasan_alloc_zeroed_page(node);
83                 __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
84         }
85
86         return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
87 }
88
89 static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
90                                       bool early)
91 {
92         if (p4d_none(READ_ONCE(*p4dp))) {
93                 phys_addr_t pud_phys = early ?
94                                 __pa_symbol(kasan_early_shadow_pud)
95                                         : kasan_alloc_zeroed_page(node);
96                 __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE);
97         }
98
99         return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
100 }
101
102 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
103                                       unsigned long end, int node, bool early)
104 {
105         unsigned long next;
106         pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
107
108         do {
109                 phys_addr_t page_phys = early ?
110                                 __pa_symbol(kasan_early_shadow_page)
111                                         : kasan_alloc_raw_page(node);
112                 if (!early)
113                         memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
114                 next = addr + PAGE_SIZE;
115                 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
116         } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
117 }
118
119 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
120                                       unsigned long end, int node, bool early)
121 {
122         unsigned long next;
123         pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
124
125         do {
126                 next = pmd_addr_end(addr, end);
127                 kasan_pte_populate(pmdp, addr, next, node, early);
128         } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
129 }
130
131 static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
132                                       unsigned long end, int node, bool early)
133 {
134         unsigned long next;
135         pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
136
137         do {
138                 next = pud_addr_end(addr, end);
139                 kasan_pmd_populate(pudp, addr, next, node, early);
140         } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
141 }
142
143 static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
144                                       unsigned long end, int node, bool early)
145 {
146         unsigned long next;
147         p4d_t *p4dp = p4d_offset(pgdp, addr);
148
149         do {
150                 next = p4d_addr_end(addr, end);
151                 kasan_pud_populate(p4dp, addr, next, node, early);
152         } while (p4dp++, addr = next, addr != end);
153 }
154
155 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
156                                       int node, bool early)
157 {
158         unsigned long next;
159         pgd_t *pgdp;
160
161         pgdp = pgd_offset_k(addr);
162         do {
163                 next = pgd_addr_end(addr, end);
164                 kasan_p4d_populate(pgdp, addr, next, node, early);
165         } while (pgdp++, addr = next, addr != end);
166 }
167
168 /* The early shadow maps everything to a single page of zeroes */
169 asmlinkage void __init kasan_early_init(void)
170 {
171         BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
172                 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
173         BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
174         BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
175         BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
176         kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
177                            true);
178 }
179
180 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
181 static void __init kasan_map_populate(unsigned long start, unsigned long end,
182                                       int node)
183 {
184         kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
185 }
186
187 /*
188  * Copy the current shadow region into a new pgdir.
189  */
190 void __init kasan_copy_shadow(pgd_t *pgdir)
191 {
192         pgd_t *pgdp, *pgdp_new, *pgdp_end;
193
194         pgdp = pgd_offset_k(KASAN_SHADOW_START);
195         pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
196         pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
197         do {
198                 set_pgd(pgdp_new, READ_ONCE(*pgdp));
199         } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
200 }
201
202 static void __init clear_pgds(unsigned long start,
203                         unsigned long end)
204 {
205         /*
206          * Remove references to kasan page tables from
207          * swapper_pg_dir. pgd_clear() can't be used
208          * here because it's nop on 2,3-level pagetable setups
209          */
210         for (; start < end; start += PGDIR_SIZE)
211                 set_pgd(pgd_offset_k(start), __pgd(0));
212 }
213
214 static void __init kasan_init_shadow(void)
215 {
216         u64 kimg_shadow_start, kimg_shadow_end;
217         u64 mod_shadow_start, mod_shadow_end;
218         u64 vmalloc_shadow_end;
219         phys_addr_t pa_start, pa_end;
220         u64 i;
221
222         kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
223         kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
224
225         mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
226         mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
227
228         vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
229
230         /*
231          * We are going to perform proper setup of shadow memory.
232          * At first we should unmap early shadow (clear_pgds() call below).
233          * However, instrumented code couldn't execute without shadow memory.
234          * tmp_pg_dir used to keep early shadow mapped until full shadow
235          * setup will be finished.
236          */
237         memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
238         dsb(ishst);
239         cpu_replace_ttbr1(lm_alias(tmp_pg_dir), idmap_pg_dir);
240
241         clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
242
243         kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
244                            early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
245
246         kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
247                                    (void *)mod_shadow_start);
248
249         if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
250                 BUILD_BUG_ON(VMALLOC_START != MODULES_END);
251                 kasan_populate_early_shadow((void *)vmalloc_shadow_end,
252                                             (void *)KASAN_SHADOW_END);
253         } else {
254                 kasan_populate_early_shadow((void *)kimg_shadow_end,
255                                             (void *)KASAN_SHADOW_END);
256                 if (kimg_shadow_start > mod_shadow_end)
257                         kasan_populate_early_shadow((void *)mod_shadow_end,
258                                                     (void *)kimg_shadow_start);
259         }
260
261         for_each_mem_range(i, &pa_start, &pa_end) {
262                 void *start = (void *)__phys_to_virt(pa_start);
263                 void *end = (void *)__phys_to_virt(pa_end);
264
265                 if (start >= end)
266                         break;
267
268                 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
269                                    (unsigned long)kasan_mem_to_shadow(end),
270                                    early_pfn_to_nid(virt_to_pfn(start)));
271         }
272
273         /*
274          * KAsan may reuse the contents of kasan_early_shadow_pte directly,
275          * so we should make sure that it maps the zero page read-only.
276          */
277         for (i = 0; i < PTRS_PER_PTE; i++)
278                 set_pte(&kasan_early_shadow_pte[i],
279                         pfn_pte(sym_to_pfn(kasan_early_shadow_page),
280                                 PAGE_KERNEL_RO));
281
282         memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
283         cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
284 }
285
286 static void __init kasan_init_depth(void)
287 {
288         init_task.kasan_depth = 0;
289 }
290
291 #ifdef CONFIG_KASAN_VMALLOC
292 void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size)
293 {
294         unsigned long shadow_start, shadow_end;
295
296         if (!is_vmalloc_or_module_addr(start))
297                 return;
298
299         shadow_start = (unsigned long)kasan_mem_to_shadow(start);
300         shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
301         shadow_end = (unsigned long)kasan_mem_to_shadow(start + size);
302         shadow_end = ALIGN(shadow_end, PAGE_SIZE);
303         kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE);
304 }
305 #endif
306
307 void __init kasan_init(void)
308 {
309         kasan_init_shadow();
310         kasan_init_depth();
311 #if defined(CONFIG_KASAN_GENERIC)
312         /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
313         pr_info("KernelAddressSanitizer initialized (generic)\n");
314 #endif
315 }
316
317 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */