1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
14 extern pgd_t early_pg_dir[PTRS_PER_PGD];
15 asmlinkage void __init kasan_early_init(void)
18 pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
20 for (i = 0; i < PTRS_PER_PTE; ++i)
21 set_pte(kasan_early_shadow_pte + i,
22 mk_pte(virt_to_page(kasan_early_shadow_page),
25 for (i = 0; i < PTRS_PER_PMD; ++i)
26 set_pmd(kasan_early_shadow_pmd + i,
28 (__pa((uintptr_t) kasan_early_shadow_pte)),
29 __pgprot(_PAGE_TABLE)));
31 for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
32 i += PGDIR_SIZE, ++pgd)
35 (__pa(((uintptr_t) kasan_early_shadow_pmd))),
36 __pgprot(_PAGE_TABLE)));
38 /* init for swapper_pg_dir */
39 pgd = pgd_offset_k(KASAN_SHADOW_START);
41 for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
42 i += PGDIR_SIZE, ++pgd)
45 (__pa(((uintptr_t) kasan_early_shadow_pmd))),
46 __pgprot(_PAGE_TABLE)));
48 local_flush_tlb_all();
51 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
53 phys_addr_t phys_addr;
54 pte_t *ptep, *base_pte;
57 base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
59 base_pte = (pte_t *)pmd_page_vaddr(*pmd);
61 ptep = base_pte + pte_index(vaddr);
64 if (pte_none(*ptep)) {
65 phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
66 set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
68 } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
70 set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
73 static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
75 phys_addr_t phys_addr;
76 pmd_t *pmdp, *base_pmd;
79 base_pmd = (pmd_t *)pgd_page_vaddr(*pgd);
80 if (base_pmd == lm_alias(kasan_early_shadow_pmd))
81 base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
83 pmdp = base_pmd + pmd_index(vaddr);
86 next = pmd_addr_end(vaddr, end);
88 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
89 phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
91 set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
96 kasan_populate_pte(pmdp, vaddr, next);
97 } while (pmdp++, vaddr = next, vaddr != end);
100 * Wait for the whole PGD to be populated before setting the PGD in
101 * the page table, otherwise, if we did set the PGD before populating
102 * it entirely, memblock could allocate a page at a physical address
103 * where KASAN is not populated yet and then we'd get a page fault.
105 set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
108 static void __init kasan_populate_pgd(unsigned long vaddr, unsigned long end)
110 phys_addr_t phys_addr;
111 pgd_t *pgdp = pgd_offset_k(vaddr);
115 next = pgd_addr_end(vaddr, end);
118 * pgdp can't be none since kasan_early_init initialized all KASAN
119 * shadow region with kasan_early_shadow_pmd: if this is stillthe case,
120 * that means we can try to allocate a hugepage as a replacement.
122 if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
123 IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
124 phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
126 set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
131 kasan_populate_pmd(pgdp, vaddr, next);
132 } while (pgdp++, vaddr = next, vaddr != end);
135 static void __init kasan_populate(void *start, void *end)
137 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
138 unsigned long vend = PAGE_ALIGN((unsigned long)end);
140 kasan_populate_pgd(vaddr, vend);
142 local_flush_tlb_all();
143 memset(start, KASAN_SHADOW_INIT, end - start);
146 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
150 pgd_t *pgd_k = pgd_offset_k(vaddr);
153 next = pgd_addr_end(vaddr, end);
154 if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
155 p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
156 set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
158 } while (pgd_k++, vaddr = next, vaddr != end);
161 static void __init kasan_shallow_populate(void *start, void *end)
163 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
164 unsigned long vend = PAGE_ALIGN((unsigned long)end);
166 kasan_shallow_populate_pgd(vaddr, vend);
167 local_flush_tlb_all();
170 void __init kasan_init(void)
172 phys_addr_t p_start, p_end;
176 * Populate all kernel virtual address space with kasan_early_shadow_page
177 * except for the linear mapping and the modules/kernel/BPF mapping.
179 kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
180 (void *)kasan_mem_to_shadow((void *)
182 if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
183 kasan_shallow_populate(
184 (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
185 (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
187 kasan_populate_early_shadow(
188 (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
189 (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
191 /* Populate the linear mapping */
192 for_each_mem_range(i, &p_start, &p_end) {
193 void *start = (void *)__va(p_start);
194 void *end = (void *)__va(p_end);
199 kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
202 /* Populate kernel, BPF, modules mapping */
203 kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
204 kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
206 for (i = 0; i < PTRS_PER_PTE; i++)
207 set_pte(&kasan_early_shadow_pte[i],
208 mk_pte(virt_to_page(kasan_early_shadow_page),
209 __pgprot(_PAGE_PRESENT | _PAGE_READ |
212 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
213 init_task.kasan_depth = 0;