1 // SPDX-License-Identifier: GPL-2.0
2 #define DISABLE_BRANCH_PROFILING
3 #define pr_fmt(fmt) "kasan: " fmt
5 /* cpu_feature_enabled() cannot be used this early */
6 #define USE_EARLY_PGTABLE_L5
8 #include <linux/memblock.h>
9 #include <linux/kasan.h>
10 #include <linux/kdebug.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task.h>
14 #include <linux/vmalloc.h>
16 #include <asm/e820/types.h>
17 #include <asm/pgalloc.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
20 #include <asm/cpu_entry_area.h>
22 extern struct range pfn_mapped[E820_MAX_ENTRIES];
24 static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
26 static __init void *early_alloc(size_t size, int nid, bool should_panic)
28 void *ptr = memblock_alloc_try_nid(size, size,
29 __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
31 if (!ptr && should_panic)
32 panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
33 (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
38 static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
39 unsigned long end, int nid)
46 if (boot_cpu_has(X86_FEATURE_PSE) &&
47 ((end - addr) == PMD_SIZE) &&
48 IS_ALIGNED(addr, PMD_SIZE)) {
49 p = early_alloc(PMD_SIZE, nid, false);
50 if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
52 memblock_free(p, PMD_SIZE);
55 p = early_alloc(PAGE_SIZE, nid, true);
56 pmd_populate_kernel(&init_mm, pmd, p);
59 pte = pte_offset_kernel(pmd, addr);
67 p = early_alloc(PAGE_SIZE, nid, true);
68 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
69 set_pte_at(&init_mm, addr, pte, entry);
70 } while (pte++, addr += PAGE_SIZE, addr != end);
73 static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
74 unsigned long end, int nid)
82 if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
83 ((end - addr) == PUD_SIZE) &&
84 IS_ALIGNED(addr, PUD_SIZE)) {
85 p = early_alloc(PUD_SIZE, nid, false);
86 if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
88 memblock_free(p, PUD_SIZE);
91 p = early_alloc(PAGE_SIZE, nid, true);
92 pud_populate(&init_mm, pud, p);
95 pmd = pmd_offset(pud, addr);
97 next = pmd_addr_end(addr, end);
99 kasan_populate_pmd(pmd, addr, next, nid);
100 } while (pmd++, addr = next, addr != end);
103 static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
104 unsigned long end, int nid)
109 if (p4d_none(*p4d)) {
110 void *p = early_alloc(PAGE_SIZE, nid, true);
112 p4d_populate(&init_mm, p4d, p);
115 pud = pud_offset(p4d, addr);
117 next = pud_addr_end(addr, end);
118 if (!pud_large(*pud))
119 kasan_populate_pud(pud, addr, next, nid);
120 } while (pud++, addr = next, addr != end);
123 static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
124 unsigned long end, int nid)
130 if (pgd_none(*pgd)) {
131 p = early_alloc(PAGE_SIZE, nid, true);
132 pgd_populate(&init_mm, pgd, p);
135 p4d = p4d_offset(pgd, addr);
137 next = p4d_addr_end(addr, end);
138 kasan_populate_p4d(p4d, addr, next, nid);
139 } while (p4d++, addr = next, addr != end);
142 static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
148 addr = addr & PAGE_MASK;
149 end = round_up(end, PAGE_SIZE);
150 pgd = pgd_offset_k(addr);
152 next = pgd_addr_end(addr, end);
153 kasan_populate_pgd(pgd, addr, next, nid);
154 } while (pgd++, addr = next, addr != end);
157 static void __init map_range(struct range *range)
162 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
163 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
165 kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
168 static void __init clear_pgds(unsigned long start,
172 /* See comment in kasan_init() */
173 unsigned long pgd_end = end & PGDIR_MASK;
175 for (; start < pgd_end; start += PGDIR_SIZE) {
176 pgd = pgd_offset_k(start);
178 * With folded p4d, pgd_clear() is nop, use p4d_clear()
181 if (pgtable_l5_enabled())
184 p4d_clear(p4d_offset(pgd, start));
187 pgd = pgd_offset_k(start);
188 for (; start < end; start += P4D_SIZE)
189 p4d_clear(p4d_offset(pgd, start));
192 static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
196 if (!pgtable_l5_enabled())
199 p4d = pgd_val(*pgd) & PTE_PFN_MASK;
200 p4d += __START_KERNEL_map - phys_base;
201 return (p4d_t *)p4d + p4d_index(addr);
204 static void __init kasan_early_p4d_populate(pgd_t *pgd,
209 p4d_t *p4d, p4d_entry;
212 if (pgd_none(*pgd)) {
213 pgd_entry = __pgd(_KERNPG_TABLE |
214 __pa_nodebug(kasan_early_shadow_p4d));
215 set_pgd(pgd, pgd_entry);
218 p4d = early_p4d_offset(pgd, addr);
220 next = p4d_addr_end(addr, end);
225 p4d_entry = __p4d(_KERNPG_TABLE |
226 __pa_nodebug(kasan_early_shadow_pud));
227 set_p4d(p4d, p4d_entry);
228 } while (p4d++, addr = next, addr != end && p4d_none(*p4d));
231 static void __init kasan_map_early_shadow(pgd_t *pgd)
233 /* See comment in kasan_init() */
234 unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
235 unsigned long end = KASAN_SHADOW_END;
238 pgd += pgd_index(addr);
240 next = pgd_addr_end(addr, end);
241 kasan_early_p4d_populate(pgd, addr, next);
242 } while (pgd++, addr = next, addr != end);
245 static void __init kasan_shallow_populate_p4ds(pgd_t *pgd,
253 p4d = p4d_offset(pgd, addr);
255 next = p4d_addr_end(addr, end);
257 if (p4d_none(*p4d)) {
258 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
259 p4d_populate(&init_mm, p4d, p);
261 } while (p4d++, addr = next, addr != end);
264 static void __init kasan_shallow_populate_pgds(void *start, void *end)
266 unsigned long addr, next;
270 addr = (unsigned long)start;
271 pgd = pgd_offset_k(addr);
273 next = pgd_addr_end(addr, (unsigned long)end);
275 if (pgd_none(*pgd)) {
276 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
277 pgd_populate(&init_mm, pgd, p);
281 * we need to populate p4ds to be synced when running in
282 * four level mode - see sync_global_pgds_l4()
284 kasan_shallow_populate_p4ds(pgd, addr, next);
285 } while (pgd++, addr = next, addr != (unsigned long)end);
288 void __init kasan_early_init(void)
291 pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
292 __PAGE_KERNEL | _PAGE_ENC;
293 pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
294 pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
295 p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
297 /* Mask out unsupported __PAGE_KERNEL bits: */
298 pte_val &= __default_kernel_pte_mask;
299 pmd_val &= __default_kernel_pte_mask;
300 pud_val &= __default_kernel_pte_mask;
301 p4d_val &= __default_kernel_pte_mask;
303 for (i = 0; i < PTRS_PER_PTE; i++)
304 kasan_early_shadow_pte[i] = __pte(pte_val);
306 for (i = 0; i < PTRS_PER_PMD; i++)
307 kasan_early_shadow_pmd[i] = __pmd(pmd_val);
309 for (i = 0; i < PTRS_PER_PUD; i++)
310 kasan_early_shadow_pud[i] = __pud(pud_val);
312 for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
313 kasan_early_shadow_p4d[i] = __p4d(p4d_val);
315 kasan_map_early_shadow(early_top_pgt);
316 kasan_map_early_shadow(init_top_pgt);
319 void __init kasan_init(void)
322 void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
324 memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
327 * We use the same shadow offset for 4- and 5-level paging to
328 * facilitate boot-time switching between paging modes.
329 * As result in 5-level paging mode KASAN_SHADOW_START and
330 * KASAN_SHADOW_END are not aligned to PGD boundary.
332 * KASAN_SHADOW_START doesn't share PGD with anything else.
333 * We claim whole PGD entry to make things easier.
335 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
336 * bunch of things like kernel code, modules, EFI mapping, etc.
337 * We need to take extra steps to not overwrite them.
339 if (pgtable_l5_enabled()) {
342 ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
343 memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
344 set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
345 __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
348 load_cr3(early_top_pgt);
351 clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
353 kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
354 kasan_mem_to_shadow((void *)PAGE_OFFSET));
356 for (i = 0; i < E820_MAX_ENTRIES; i++) {
357 if (pfn_mapped[i].end == 0)
360 map_range(&pfn_mapped[i]);
363 shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
364 shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
365 shadow_cpu_entry_begin = (void *)round_down(
366 (unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
368 shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
369 CPU_ENTRY_AREA_MAP_SIZE);
370 shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
371 shadow_cpu_entry_end = (void *)round_up(
372 (unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
374 kasan_populate_early_shadow(
375 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
376 kasan_mem_to_shadow((void *)VMALLOC_START));
379 * If we're in full vmalloc mode, don't back vmalloc space with early
380 * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
381 * the global table and we can populate the lower levels on demand.
383 if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
384 kasan_shallow_populate_pgds(
385 kasan_mem_to_shadow((void *)VMALLOC_START),
386 kasan_mem_to_shadow((void *)VMALLOC_END));
388 kasan_populate_early_shadow(
389 kasan_mem_to_shadow((void *)VMALLOC_START),
390 kasan_mem_to_shadow((void *)VMALLOC_END));
392 kasan_populate_early_shadow(
393 kasan_mem_to_shadow((void *)VMALLOC_END + 1),
394 shadow_cpu_entry_begin);
396 kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
397 (unsigned long)shadow_cpu_entry_end, 0);
399 kasan_populate_early_shadow(shadow_cpu_entry_end,
400 kasan_mem_to_shadow((void *)__START_KERNEL_map));
402 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
403 (unsigned long)kasan_mem_to_shadow(_end),
404 early_pfn_to_nid(__pa(_stext)));
406 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
407 (void *)KASAN_SHADOW_END);
409 load_cr3(init_top_pgt);
413 * kasan_early_shadow_page has been used as early shadow memory, thus
414 * it may contain some garbage. Now we can clear and write protect it,
415 * since after the TLB flush no one should write to it.
417 memset(kasan_early_shadow_page, 0, PAGE_SIZE);
418 for (i = 0; i < PTRS_PER_PTE; i++) {
422 prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
423 pgprot_val(prot) &= __default_kernel_pte_mask;
425 pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
426 set_pte(&kasan_early_shadow_pte[i], pte);
428 /* Flush TLBs again to be sure that write protection applied. */
431 init_task.kasan_depth = 0;
432 pr_info("KernelAddressSanitizer initialized\n");