Merge tag 's390-5.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[linux-2.6-microblaze.git] / arch / riscv / mm / kasan_init.c
index 937d13c..d7189c8 100644 (file)
 #include <asm/fixmap.h>
 #include <asm/pgalloc.h>
 
-static __init void *early_alloc(size_t size, int node)
-{
-       void *ptr = memblock_alloc_try_nid(size, size,
-               __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
-
-       if (!ptr)
-               panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
-                       __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
-
-       return ptr;
-}
-
 extern pgd_t early_pg_dir[PTRS_PER_PGD];
 asmlinkage void __init kasan_early_init(void)
 {
@@ -60,7 +48,7 @@ asmlinkage void __init kasan_early_init(void)
        local_flush_tlb_all();
 }
 
-static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
+static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
 {
        phys_addr_t phys_addr;
        pte_t *ptep, *base_pte;
@@ -82,7 +70,7 @@ static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long en
        set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
 }
 
-static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
+static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
 {
        phys_addr_t phys_addr;
        pmd_t *pmdp, *base_pmd;
@@ -117,7 +105,7 @@ static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long en
        set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
 }
 
-static void kasan_populate_pgd(unsigned long vaddr, unsigned long end)
+static void __init kasan_populate_pgd(unsigned long vaddr, unsigned long end)
 {
        phys_addr_t phys_addr;
        pgd_t *pgdp = pgd_offset_k(vaddr);
@@ -155,47 +143,39 @@ static void __init kasan_populate(void *start, void *end)
        memset(start, KASAN_SHADOW_INIT, end - start);
 }
 
+static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
+{
+       unsigned long next;
+       void *p;
+       pgd_t *pgd_k = pgd_offset_k(vaddr);
+
+       do {
+               next = pgd_addr_end(vaddr, end);
+               if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
+                       p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+                       set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
+               }
+       } while (pgd_k++, vaddr = next, vaddr != end);
+}
+
 static void __init kasan_shallow_populate(void *start, void *end)
 {
        unsigned long vaddr = (unsigned long)start & PAGE_MASK;
        unsigned long vend = PAGE_ALIGN((unsigned long)end);
-       unsigned long pfn;
-       int index;
-       void *p;
-       pud_t *pud_dir, *pud_k;
-       pgd_t *pgd_dir, *pgd_k;
-       p4d_t *p4d_dir, *p4d_k;
-
-       while (vaddr < vend) {
-               index = pgd_index(vaddr);
-               pfn = csr_read(CSR_SATP) & SATP_PPN;
-               pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
-               pgd_k = init_mm.pgd + index;
-               pgd_dir = pgd_offset_k(vaddr);
-               set_pgd(pgd_dir, *pgd_k);
-
-               p4d_dir = p4d_offset(pgd_dir, vaddr);
-               p4d_k  = p4d_offset(pgd_k, vaddr);
-
-               vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
-               pud_dir = pud_offset(p4d_dir, vaddr);
-               pud_k = pud_offset(p4d_k, vaddr);
-
-               if (pud_present(*pud_dir)) {
-                       p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
-                       pud_populate(&init_mm, pud_dir, p);
-               }
-               vaddr += PAGE_SIZE;
-       }
 
+       kasan_shallow_populate_pgd(vaddr, vend);
        local_flush_tlb_all();
 }
 
 void __init kasan_init(void)
 {
-       phys_addr_t _start, _end;
+       phys_addr_t p_start, p_end;
        u64 i;
 
+       /*
+        * Populate all kernel virtual address space with kasan_early_shadow_page
+        * except for the linear mapping and the modules/kernel/BPF mapping.
+        */
        kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
                                    (void *)kasan_mem_to_shadow((void *)
                                                                VMEMMAP_END));
@@ -208,9 +188,10 @@ void __init kasan_init(void)
                        (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
                        (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
 
-       for_each_mem_range(i, &_start, &_end) {
-               void *start = (void *)__va(_start);
-               void *end = (void *)__va(_end);
+       /* Populate the linear mapping */
+       for_each_mem_range(i, &p_start, &p_end) {
+               void *start = (void *)__va(p_start);
+               void *end = (void *)__va(p_end);
 
                if (start >= end)
                        break;
@@ -218,6 +199,10 @@ void __init kasan_init(void)
                kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
        }
 
+       /* Populate kernel, BPF, modules mapping */
+       kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
+                      kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
+
        for (i = 0; i < PTRS_PER_PTE; i++)
                set_pte(&kasan_early_shadow_pte[i],
                        mk_pte(virt_to_page(kasan_early_shadow_page),