memblock: drop memblock_alloc_*_nopanic() variants
[linux-2.6-microblaze.git] / arch / x86 / mm / kasan_init_64.c
index 04a9cf6..8dc0fc0 100644 (file)
@@ -24,14 +24,16 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
 
 static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
 
-static __init void *early_alloc(size_t size, int nid, bool panic)
+static __init void *early_alloc(size_t size, int nid, bool should_panic)
 {
-       if (panic)
-               return memblock_alloc_try_nid(size, size,
-                       __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
-       else
-               return memblock_alloc_try_nid_nopanic(size, size,
+       void *ptr = memblock_alloc_try_nid(size, size,
                        __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+
+       if (!ptr && should_panic)
+               panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
+                     (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
+
+       return ptr;
 }
 
 static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
@@ -211,7 +213,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
        unsigned long next;
 
        if (pgd_none(*pgd)) {
-               pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
+               pgd_entry = __pgd(_KERNPG_TABLE |
+                                       __pa_nodebug(kasan_early_shadow_p4d));
                set_pgd(pgd, pgd_entry);
        }
 
@@ -222,7 +225,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
                if (!p4d_none(*p4d))
                        continue;
 
-               p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
+               p4d_entry = __p4d(_KERNPG_TABLE |
+                                       __pa_nodebug(kasan_early_shadow_pud));
                set_p4d(p4d, p4d_entry);
        } while (p4d++, addr = next, addr != end && p4d_none(*p4d));
 }
@@ -261,10 +265,11 @@ static struct notifier_block kasan_die_notifier = {
 void __init kasan_early_init(void)
 {
        int i;
-       pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
-       pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
-       pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
-       p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
+       pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
+                               __PAGE_KERNEL | _PAGE_ENC;
+       pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
+       pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
+       p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
 
        /* Mask out unsupported __PAGE_KERNEL bits: */
        pte_val &= __default_kernel_pte_mask;
@@ -273,16 +278,16 @@ void __init kasan_early_init(void)
        p4d_val &= __default_kernel_pte_mask;
 
        for (i = 0; i < PTRS_PER_PTE; i++)
-               kasan_zero_pte[i] = __pte(pte_val);
+               kasan_early_shadow_pte[i] = __pte(pte_val);
 
        for (i = 0; i < PTRS_PER_PMD; i++)
-               kasan_zero_pmd[i] = __pmd(pmd_val);
+               kasan_early_shadow_pmd[i] = __pmd(pmd_val);
 
        for (i = 0; i < PTRS_PER_PUD; i++)
-               kasan_zero_pud[i] = __pud(pud_val);
+               kasan_early_shadow_pud[i] = __pud(pud_val);
 
        for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
-               kasan_zero_p4d[i] = __p4d(p4d_val);
+               kasan_early_shadow_p4d[i] = __p4d(p4d_val);
 
        kasan_map_early_shadow(early_top_pgt);
        kasan_map_early_shadow(init_top_pgt);
@@ -326,7 +331,7 @@ void __init kasan_init(void)
 
        clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
 
-       kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
+       kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
                        kasan_mem_to_shadow((void *)PAGE_OFFSET));
 
        for (i = 0; i < E820_MAX_ENTRIES; i++) {
@@ -338,41 +343,41 @@ void __init kasan_init(void)
 
        shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
        shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
-       shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
-                                               PAGE_SIZE);
+       shadow_cpu_entry_begin = (void *)round_down(
+                       (unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
 
        shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
                                        CPU_ENTRY_AREA_MAP_SIZE);
        shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
-       shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
-                                       PAGE_SIZE);
+       shadow_cpu_entry_end = (void *)round_up(
+                       (unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
 
-       kasan_populate_zero_shadow(
+       kasan_populate_early_shadow(
                kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
                shadow_cpu_entry_begin);
 
        kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
                              (unsigned long)shadow_cpu_entry_end, 0);
 
-       kasan_populate_zero_shadow(shadow_cpu_entry_end,
-                               kasan_mem_to_shadow((void *)__START_KERNEL_map));
+       kasan_populate_early_shadow(shadow_cpu_entry_end,
+                       kasan_mem_to_shadow((void *)__START_KERNEL_map));
 
        kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
                              (unsigned long)kasan_mem_to_shadow(_end),
                              early_pfn_to_nid(__pa(_stext)));
 
-       kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
-                               (void *)KASAN_SHADOW_END);
+       kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
+                                       (void *)KASAN_SHADOW_END);
 
        load_cr3(init_top_pgt);
        __flush_tlb_all();
 
        /*
-        * kasan_zero_page has been used as early shadow memory, thus it may
-        * contain some garbage. Now we can clear and write protect it, since
-        * after the TLB flush no one should write to it.
+        * kasan_early_shadow_page has been used as early shadow memory, thus
+        * it may contain some garbage. Now we can clear and write protect it,
+        * since after the TLB flush no one should write to it.
         */
-       memset(kasan_zero_page, 0, PAGE_SIZE);
+       memset(kasan_early_shadow_page, 0, PAGE_SIZE);
        for (i = 0; i < PTRS_PER_PTE; i++) {
                pte_t pte;
                pgprot_t prot;
@@ -380,8 +385,8 @@ void __init kasan_init(void)
                prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
                pgprot_val(prot) &= __default_kernel_pte_mask;
 
-               pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot));
-               set_pte(&kasan_zero_pte[i], pte);
+               pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
+               set_pte(&kasan_early_shadow_pte[i], pte);
        }
        /* Flush TLBs again to be sure that write protection applied. */
        __flush_tlb_all();