arm64: mm: handle ARM64_KERNEL_USES_PMD_MAPS in vmemmap_populate()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Tue, 20 Sep 2022 01:49:51 +0000 (09:49 +0800)
committerCatalin Marinas <catalin.marinas@arm.com>
Thu, 22 Sep 2022 15:25:51 +0000 (16:25 +0100)
Directly check ARM64_SWAPPER_USES_SECTION_MAPS to choose base page
or PMD level huge page mapping in vmemmap_populate() to simplify
code a bit.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/r/20220920014951.196191-1-wangkefeng.wang@huawei.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/mm/mmu.c

index e7ad445..69deed2 100644 (file)
@@ -1180,14 +1180,6 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
 }
 #endif
 
-#if !ARM64_KERNEL_USES_PMD_MAPS
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
-               struct vmem_altmap *altmap)
-{
-       WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
-       return vmemmap_populate_basepages(start, end, node, altmap);
-}
-#else  /* !ARM64_KERNEL_USES_PMD_MAPS */
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
                struct vmem_altmap *altmap)
 {
@@ -1199,6 +1191,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
        pmd_t *pmdp;
 
        WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
+
+       if (!ARM64_KERNEL_USES_PMD_MAPS)
+               return vmemmap_populate_basepages(start, end, node, altmap);
+
        do {
                next = pmd_addr_end(addr, end);
 
@@ -1232,7 +1228,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 
        return 0;
 }
-#endif /* !ARM64_KERNEL_USES_PMD_MAPS */
 
 #ifdef CONFIG_MEMORY_HOTPLUG
 void vmemmap_free(unsigned long start, unsigned long end,