Merge tag 'pwm/for-5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/thierry...
[linux-2.6-microblaze.git] / arch / riscv / mm / kasan_init.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
3
4 #include <linux/pfn.h>
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
13
14 static __init void *early_alloc(size_t size, int node)
15 {
16         void *ptr = memblock_alloc_try_nid(size, size,
17                 __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
18
19         if (!ptr)
20                 panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
21                         __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
22
23         return ptr;
24 }
25
26 extern pgd_t early_pg_dir[PTRS_PER_PGD];
27 asmlinkage void __init kasan_early_init(void)
28 {
29         uintptr_t i;
30         pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
31
32         for (i = 0; i < PTRS_PER_PTE; ++i)
33                 set_pte(kasan_early_shadow_pte + i,
34                         mk_pte(virt_to_page(kasan_early_shadow_page),
35                                PAGE_KERNEL));
36
37         for (i = 0; i < PTRS_PER_PMD; ++i)
38                 set_pmd(kasan_early_shadow_pmd + i,
39                         pfn_pmd(PFN_DOWN
40                                 (__pa((uintptr_t) kasan_early_shadow_pte)),
41                                 __pgprot(_PAGE_TABLE)));
42
43         for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
44              i += PGDIR_SIZE, ++pgd)
45                 set_pgd(pgd,
46                         pfn_pgd(PFN_DOWN
47                                 (__pa(((uintptr_t) kasan_early_shadow_pmd))),
48                                 __pgprot(_PAGE_TABLE)));
49
50         /* init for swapper_pg_dir */
51         pgd = pgd_offset_k(KASAN_SHADOW_START);
52
53         for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
54              i += PGDIR_SIZE, ++pgd)
55                 set_pgd(pgd,
56                         pfn_pgd(PFN_DOWN
57                                 (__pa(((uintptr_t) kasan_early_shadow_pmd))),
58                                 __pgprot(_PAGE_TABLE)));
59
60         local_flush_tlb_all();
61 }
62
63 static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
64 {
65         phys_addr_t phys_addr;
66         pte_t *ptep, *base_pte;
67
68         if (pmd_none(*pmd))
69                 base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
70         else
71                 base_pte = (pte_t *)pmd_page_vaddr(*pmd);
72
73         ptep = base_pte + pte_index(vaddr);
74
75         do {
76                 if (pte_none(*ptep)) {
77                         phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
78                         set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
79                 }
80         } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
81
82         set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
83 }
84
85 static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
86 {
87         phys_addr_t phys_addr;
88         pmd_t *pmdp, *base_pmd;
89         unsigned long next;
90
91         base_pmd = (pmd_t *)pgd_page_vaddr(*pgd);
92         if (base_pmd == lm_alias(kasan_early_shadow_pmd))
93                 base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
94
95         pmdp = base_pmd + pmd_index(vaddr);
96
97         do {
98                 next = pmd_addr_end(vaddr, end);
99
100                 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
101                         phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
102                         if (phys_addr) {
103                                 set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
104                                 continue;
105                         }
106                 }
107
108                 kasan_populate_pte(pmdp, vaddr, next);
109         } while (pmdp++, vaddr = next, vaddr != end);
110
111         /*
112          * Wait for the whole PGD to be populated before setting the PGD in
113          * the page table, otherwise, if we did set the PGD before populating
114          * it entirely, memblock could allocate a page at a physical address
115          * where KASAN is not populated yet and then we'd get a page fault.
116          */
117         set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
118 }
119
120 static void kasan_populate_pgd(unsigned long vaddr, unsigned long end)
121 {
122         phys_addr_t phys_addr;
123         pgd_t *pgdp = pgd_offset_k(vaddr);
124         unsigned long next;
125
126         do {
127                 next = pgd_addr_end(vaddr, end);
128
129                 /*
130                  * pgdp can't be none since kasan_early_init initialized all KASAN
131                  * shadow region with kasan_early_shadow_pmd: if this is stillthe case,
132                  * that means we can try to allocate a hugepage as a replacement.
133                  */
134                 if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
135                     IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
136                         phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
137                         if (phys_addr) {
138                                 set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
139                                 continue;
140                         }
141                 }
142
143                 kasan_populate_pmd(pgdp, vaddr, next);
144         } while (pgdp++, vaddr = next, vaddr != end);
145 }
146
147 static void __init kasan_populate(void *start, void *end)
148 {
149         unsigned long vaddr = (unsigned long)start & PAGE_MASK;
150         unsigned long vend = PAGE_ALIGN((unsigned long)end);
151
152         kasan_populate_pgd(vaddr, vend);
153
154         local_flush_tlb_all();
155         memset(start, KASAN_SHADOW_INIT, end - start);
156 }
157
158 static void __init kasan_shallow_populate(void *start, void *end)
159 {
160         unsigned long vaddr = (unsigned long)start & PAGE_MASK;
161         unsigned long vend = PAGE_ALIGN((unsigned long)end);
162         unsigned long pfn;
163         int index;
164         void *p;
165         pud_t *pud_dir, *pud_k;
166         pgd_t *pgd_dir, *pgd_k;
167         p4d_t *p4d_dir, *p4d_k;
168
169         while (vaddr < vend) {
170                 index = pgd_index(vaddr);
171                 pfn = csr_read(CSR_SATP) & SATP_PPN;
172                 pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
173                 pgd_k = init_mm.pgd + index;
174                 pgd_dir = pgd_offset_k(vaddr);
175                 set_pgd(pgd_dir, *pgd_k);
176
177                 p4d_dir = p4d_offset(pgd_dir, vaddr);
178                 p4d_k  = p4d_offset(pgd_k, vaddr);
179
180                 vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
181                 pud_dir = pud_offset(p4d_dir, vaddr);
182                 pud_k = pud_offset(p4d_k, vaddr);
183
184                 if (pud_present(*pud_dir)) {
185                         p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
186                         pud_populate(&init_mm, pud_dir, p);
187                 }
188                 vaddr += PAGE_SIZE;
189         }
190
191         local_flush_tlb_all();
192 }
193
194 void __init kasan_init(void)
195 {
196         phys_addr_t _start, _end;
197         u64 i;
198
199         kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
200                                     (void *)kasan_mem_to_shadow((void *)
201                                                                 VMEMMAP_END));
202         if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
203                 kasan_shallow_populate(
204                         (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
205                         (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
206         else
207                 kasan_populate_early_shadow(
208                         (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
209                         (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
210
211         for_each_mem_range(i, &_start, &_end) {
212                 void *start = (void *)__va(_start);
213                 void *end = (void *)__va(_end);
214
215                 if (start >= end)
216                         break;
217
218                 kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
219         }
220
221         for (i = 0; i < PTRS_PER_PTE; i++)
222                 set_pte(&kasan_early_shadow_pte[i],
223                         mk_pte(virt_to_page(kasan_early_shadow_page),
224                                __pgprot(_PAGE_PRESENT | _PAGE_READ |
225                                         _PAGE_ACCESSED)));
226
227         memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
228         init_task.kasan_depth = 0;
229 }