Merge tag 'riscv-for-linus-5.15-mw0' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / arch / riscv / mm / init.c
index 93720b0..c0cddf0 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/set_memory.h>
 #include <linux/dma-map-ops.h>
 #include <linux/crash_dump.h>
+#include <linux/hugetlb.h>
 
 #include <asm/fixmap.h>
 #include <asm/tlbflush.h>
@@ -222,6 +223,8 @@ static void __init setup_bootmem(void)
 
        early_init_fdt_scan_reserved_mem();
        dma_contiguous_reserve(dma32_phys_limit);
+       if (IS_ENABLED(CONFIG_64BIT))
+               hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
        memblock_allow_resize();
 }
 
@@ -234,14 +237,15 @@ static struct pt_alloc_ops _pt_ops __initdata;
 #define pt_ops _pt_ops
 #endif
 
-unsigned long pfn_base __ro_after_init;
-EXPORT_SYMBOL(pfn_base);
+unsigned long riscv_pfn_base __ro_after_init;
+EXPORT_SYMBOL(riscv_pfn_base);
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
 
 pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
 
 #ifdef CONFIG_XIP_KERNEL
 #define trampoline_pg_dir      ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
@@ -322,7 +326,6 @@ static void __init create_pte_mapping(pte_t *ptep,
 static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
 static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
 static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
-static pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
 
 #ifdef CONFIG_XIP_KERNEL
 #define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
@@ -408,6 +411,7 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)     \
        create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
 #define fixmap_pgd_next                fixmap_pte
+#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot)
 #endif
 
 void __init create_pgd_mapping(pgd_t *pgdp,
@@ -515,49 +519,80 @@ static __init pgprot_t pgprot_from_va(uintptr_t va)
 #endif
 
 #ifdef CONFIG_XIP_KERNEL
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
+static void __init create_kernel_page_table(pgd_t *pgdir,
                                            __always_unused bool early)
 {
        uintptr_t va, end_va;
 
        /* Map the flash resident part */
        end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
-       for (va = kernel_map.virt_addr; va < end_va; va += map_size)
+       for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
                create_pgd_mapping(pgdir, va,
                                   kernel_map.xiprom + (va - kernel_map.virt_addr),
-                                  map_size, PAGE_KERNEL_EXEC);
+                                  PMD_SIZE, PAGE_KERNEL_EXEC);
 
        /* Map the data in RAM */
        end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
-       for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += map_size)
+       for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
                create_pgd_mapping(pgdir, va,
                                   kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
-                                  map_size, PAGE_KERNEL);
+                                  PMD_SIZE, PAGE_KERNEL);
 }
 #else
-static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
-                                           bool early)
+static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
 {
        uintptr_t va, end_va;
 
        end_va = kernel_map.virt_addr + kernel_map.size;
-       for (va = kernel_map.virt_addr; va < end_va; va += map_size)
+       for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
                create_pgd_mapping(pgdir, va,
                                   kernel_map.phys_addr + (va - kernel_map.virt_addr),
-                                  map_size,
+                                  PMD_SIZE,
                                   early ?
                                        PAGE_KERNEL_EXEC : pgprot_from_va(va));
 }
 #endif
 
-asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+/*
+ * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel,
+ * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
+ * entry.
+ */
+static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
 {
-       uintptr_t __maybe_unused pa;
-       uintptr_t map_size;
-#ifndef __PAGETABLE_PMD_FOLDED
-       pmd_t fix_bmap_spmd, fix_bmap_epmd;
+#ifndef CONFIG_BUILTIN_DTB
+       uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
+
+       create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
+                          IS_ENABLED(CONFIG_64BIT) ? (uintptr_t)early_dtb_pmd : pa,
+                          PGDIR_SIZE,
+                          IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL);
+
+       if (IS_ENABLED(CONFIG_64BIT)) {
+               create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
+                                  pa, PMD_SIZE, PAGE_KERNEL);
+               create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
+                                  pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
+       }
+
+       dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
+#else
+       /*
+        * For 64-bit kernel, __va can't be used since it would return a linear
+        * mapping address whereas dtb_early_va will be used before
+        * setup_vm_final installs the linear mapping. For 32-bit kernel, as the
+        * kernel is mapped in the linear mapping, that makes no difference.
+        */
+       dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
 #endif
 
+       dtb_early_pa = dtb_pa;
+}
+
+asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+{
+       pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
+
        kernel_map.virt_addr = KERNEL_LINK_ADDR;
 
 #ifdef CONFIG_XIP_KERNEL
@@ -573,23 +608,14 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
        kernel_map.phys_addr = (uintptr_t)(&_start);
        kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
 #endif
-
        kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr;
-#ifdef CONFIG_64BIT
        kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
-#endif
 
-       pfn_base = PFN_DOWN(kernel_map.phys_addr);
-
-       /*
-        * Enforce boot alignment requirements of RV32 and
-        * RV64 by only allowing PMD or PGD mappings.
-        */
-       map_size = PMD_SIZE;
+       riscv_pfn_base = PFN_DOWN(kernel_map.phys_addr);
 
        /* Sanity check alignment and size */
        BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
-       BUG_ON((kernel_map.phys_addr % map_size) != 0);
+       BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
 
 #ifdef CONFIG_64BIT
        /*
@@ -634,50 +660,10 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
         * us to reach paging_init(). We map all memory banks later
         * in setup_vm_final() below.
         */
-       create_kernel_page_table(early_pg_dir, map_size, true);
+       create_kernel_page_table(early_pg_dir, true);
 
-#ifndef __PAGETABLE_PMD_FOLDED
-       /* Setup early PMD for DTB */
-       create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
-                          (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
-#ifndef CONFIG_BUILTIN_DTB
-       /* Create two consecutive PMD mappings for FDT early scan */
-       pa = dtb_pa & ~(PMD_SIZE - 1);
-       create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
-                          pa, PMD_SIZE, PAGE_KERNEL);
-       create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
-                          pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
-       dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
-#else /* CONFIG_BUILTIN_DTB */
-#ifdef CONFIG_64BIT
-       /*
-        * __va can't be used since it would return a linear mapping address
-        * whereas dtb_early_va will be used before setup_vm_final installs
-        * the linear mapping.
-        */
-       dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
-#else
-       dtb_early_va = __va(dtb_pa);
-#endif /* CONFIG_64BIT */
-#endif /* CONFIG_BUILTIN_DTB */
-#else
-#ifndef CONFIG_BUILTIN_DTB
-       /* Create two consecutive PGD mappings for FDT early scan */
-       pa = dtb_pa & ~(PGDIR_SIZE - 1);
-       create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
-                          pa, PGDIR_SIZE, PAGE_KERNEL);
-       create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
-                          pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
-       dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
-#else /* CONFIG_BUILTIN_DTB */
-#ifdef CONFIG_64BIT
-       dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
-#else
-       dtb_early_va = __va(dtb_pa);
-#endif /* CONFIG_64BIT */
-#endif /* CONFIG_BUILTIN_DTB */
-#endif
-       dtb_early_pa = dtb_pa;
+       /* Setup early mapping for FDT early scan */
+       create_fdt_early_page_table(early_pg_dir, dtb_pa);
 
        /*
         * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
@@ -752,7 +738,7 @@ static void __init setup_vm_final(void)
 
 #ifdef CONFIG_64BIT
        /* Map the kernel */
-       create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
+       create_kernel_page_table(swapper_pg_dir, false);
 #endif
 
        /* Clear fixmap PTE and PMD mappings */
@@ -819,38 +805,22 @@ static void __init reserve_crashkernel(void)
 
        crash_size = PAGE_ALIGN(crash_size);
 
-       if (crash_base == 0) {
-               /*
-                * Current riscv boot protocol requires 2MB alignment for
-                * RV64 and 4MB alignment for RV32 (hugepage size)
-                */
-               crash_base = memblock_find_in_range(search_start, search_end,
-                                                   crash_size, PMD_SIZE);
-
-               if (crash_base == 0) {
-                       pr_warn("crashkernel: couldn't allocate %lldKB\n",
-                               crash_size >> 10);
-                       return;
-               }
-       } else {
-               /* User specifies base address explicitly. */
-               if (!memblock_is_region_memory(crash_base, crash_size)) {
-                       pr_warn("crashkernel: requested region is not memory\n");
-                       return;
-               }
-
-               if (memblock_is_region_reserved(crash_base, crash_size)) {
-                       pr_warn("crashkernel: requested region is reserved\n");
-                       return;
-               }
-
+       if (crash_base) {
+               search_start = crash_base;
+               search_end = crash_base + crash_size;
+       }
 
-               if (!IS_ALIGNED(crash_base, PMD_SIZE)) {
-                       pr_warn("crashkernel: requested region is misaligned\n");
-                       return;
-               }
+       /*
+        * Current riscv boot protocol requires 2MB alignment for
+        * RV64 and 4MB alignment for RV32 (hugepage size)
+        */
+       crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
+                                              search_start, search_end);
+       if (crash_base == 0) {
+               pr_warn("crashkernel: couldn't allocate %lldKB\n",
+                       crash_size >> 10);
+               return;
        }
-       memblock_reserve(crash_base, crash_size);
 
        pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
                crash_base, crash_base + crash_size, crash_size >> 20);