1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/mm/init.c
5 * Copyright (C) 1995-2005 Russell King
6 * Copyright (C) 2012 ARM Ltd.
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/cache.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/gfp.h>
19 #include <linux/memblock.h>
20 #include <linux/sort.h>
22 #include <linux/of_fdt.h>
23 #include <linux/dma-direct.h>
24 #include <linux/dma-map-ops.h>
25 #include <linux/efi.h>
26 #include <linux/swiotlb.h>
27 #include <linux/vmalloc.h>
29 #include <linux/kexec.h>
30 #include <linux/crash_dump.h>
31 #include <linux/hugetlb.h>
32 #include <linux/acpi_iort.h>
35 #include <asm/fixmap.h>
36 #include <asm/kasan.h>
37 #include <asm/kernel-pgtable.h>
38 #include <asm/kvm_host.h>
39 #include <asm/memory.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <linux/sizes.h>
45 #include <asm/alternative.h>
46 #include <asm/xen/swiotlb-xen.h>
49 * We need to be able to catch inadvertent references to memstart_addr
50 * that occur (potentially in generic code) before arm64_memblock_init()
51 * executes, which assigns it its actual value. So use a default value
52 * that cannot be mistaken for a real physical address.
54 s64 memstart_addr __ro_after_init = -1;
55 EXPORT_SYMBOL(memstart_addr);
58 * If the corresponding config options are enabled, we create both ZONE_DMA
59 * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
60 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
61 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
62 * otherwise it is empty.
64 phys_addr_t arm64_dma_phys_limit __ro_after_init;
66 #ifdef CONFIG_KEXEC_CORE
68 * reserve_crashkernel() - reserves memory for crash kernel
70 * This function reserves memory area given in "crashkernel=" kernel command
71 * line parameter. The memory reserved is used by dump capture kernel when
72 * primary kernel is crashing.
74 static void __init reserve_crashkernel(void)
76 unsigned long long crash_base, crash_size;
79 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
80 &crash_size, &crash_base);
81 /* no crashkernel= or invalid value specified */
82 if (ret || !crash_size)
85 crash_size = PAGE_ALIGN(crash_size);
87 if (crash_base == 0) {
88 /* Current arm64 boot protocol requires 2MB alignment */
89 crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
91 if (crash_base == 0) {
92 pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
97 /* User specifies base address explicitly. */
98 if (!memblock_is_region_memory(crash_base, crash_size)) {
99 pr_warn("cannot reserve crashkernel: region is not memory\n");
103 if (memblock_is_region_reserved(crash_base, crash_size)) {
104 pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
108 if (!IS_ALIGNED(crash_base, SZ_2M)) {
109 pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
113 memblock_reserve(crash_base, crash_size);
115 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
116 crash_base, crash_base + crash_size, crash_size >> 20);
118 crashk_res.start = crash_base;
119 crashk_res.end = crash_base + crash_size - 1;
122 static void __init reserve_crashkernel(void)
125 #endif /* CONFIG_KEXEC_CORE */
127 #ifdef CONFIG_CRASH_DUMP
128 static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
129 const char *uname, int depth, void *data)
134 if (depth != 1 || strcmp(uname, "chosen") != 0)
137 reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
138 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
141 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®);
142 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®);
148 * reserve_elfcorehdr() - reserves memory for elf core header
150 * This function reserves the memory occupied by an elf core header
151 * described in the device tree. This region contains all the
152 * information about primary kernel's core image and is used by a dump
153 * capture kernel to access the system memory on primary kernel.
155 static void __init reserve_elfcorehdr(void)
157 of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
159 if (!elfcorehdr_size)
162 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
163 pr_warn("elfcorehdr is overlapped\n");
167 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
169 pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
170 elfcorehdr_size >> 10, elfcorehdr_addr);
173 static void __init reserve_elfcorehdr(void)
176 #endif /* CONFIG_CRASH_DUMP */
179 * Return the maximum physical address for a zone accessible by the given bits
180 * limit. If DRAM starts above 32-bit, expand the zone to the maximum
181 * available memory, otherwise cap it at 32-bit.
183 static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
185 phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
186 phys_addr_t phys_start = memblock_start_of_DRAM();
188 if (phys_start > U32_MAX)
189 zone_mask = PHYS_ADDR_MAX;
190 else if (phys_start > zone_mask)
193 return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
196 static void __init zone_sizes_init(unsigned long min, unsigned long max)
198 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
199 unsigned int __maybe_unused acpi_zone_dma_bits;
200 unsigned int __maybe_unused dt_zone_dma_bits;
201 phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
203 #ifdef CONFIG_ZONE_DMA
204 acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
205 dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
206 zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
207 arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
208 max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
210 #ifdef CONFIG_ZONE_DMA32
211 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
212 if (!arm64_dma_phys_limit)
213 arm64_dma_phys_limit = dma32_phys_limit;
215 if (!arm64_dma_phys_limit)
216 arm64_dma_phys_limit = PHYS_MASK + 1;
217 max_zone_pfns[ZONE_NORMAL] = max;
219 free_area_init(max_zone_pfns);
222 int pfn_valid(unsigned long pfn)
224 phys_addr_t addr = PFN_PHYS(pfn);
225 struct mem_section *ms;
228 * Ensure the upper PAGE_SHIFT bits are clear in the
229 * pfn. Else it might lead to false positives when
230 * some of the upper bits are set, but the lower bits
233 if (PHYS_PFN(addr) != pfn)
236 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
239 ms = __pfn_to_section(pfn);
240 if (!valid_section(ms))
244 * ZONE_DEVICE memory does not have the memblock entries.
245 * memblock_is_map_memory() check for ZONE_DEVICE based
246 * addresses will always fail. Even the normal hotplugged
247 * memory will never have MEMBLOCK_NOMAP flag set in their
248 * memblock entries. Skip memblock search for all non early
249 * memory sections covering all of hotplug memory including
250 * both normal and ZONE_DEVICE based.
252 if (!early_section(ms))
253 return pfn_section_valid(ms, pfn);
255 return memblock_is_memory(addr);
257 EXPORT_SYMBOL(pfn_valid);
259 int pfn_is_map_memory(unsigned long pfn)
261 phys_addr_t addr = PFN_PHYS(pfn);
263 /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
264 if (PHYS_PFN(addr) != pfn)
267 return memblock_is_map_memory(addr);
269 EXPORT_SYMBOL(pfn_is_map_memory);
271 static phys_addr_t memory_limit = PHYS_ADDR_MAX;
274 * Limit the memory size that was specified via FDT.
276 static int __init early_mem(char *p)
281 memory_limit = memparse(p, &p) & PAGE_MASK;
282 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
286 early_param("mem", early_mem);
288 static int __init early_init_dt_scan_usablemem(unsigned long node,
289 const char *uname, int depth, void *data)
291 struct memblock_region *usablemem = data;
295 if (depth != 1 || strcmp(uname, "chosen") != 0)
298 reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
299 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
302 usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®);
303 usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®);
308 static void __init fdt_enforce_memory_region(void)
310 struct memblock_region reg = {
314 of_scan_flat_dt(early_init_dt_scan_usablemem, ®);
317 memblock_cap_memory_range(reg.base, reg.size);
320 void __init arm64_memblock_init(void)
322 const s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
324 /* Handle linux,usable-memory-range property */
325 fdt_enforce_memory_region();
327 /* Remove memory above our supported physical address size */
328 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
331 * Select a suitable value for the base of physical memory.
333 memstart_addr = round_down(memblock_start_of_DRAM(),
334 ARM64_MEMSTART_ALIGN);
336 if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size)
337 pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n");
340 * Remove the memory that we will not be able to cover with the
341 * linear mapping. Take care not to clip the kernel which may be
344 memblock_remove(max_t(u64, memstart_addr + linear_region_size,
345 __pa_symbol(_end)), ULLONG_MAX);
346 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
347 /* ensure that memstart_addr remains sufficiently aligned */
348 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
349 ARM64_MEMSTART_ALIGN);
350 memblock_remove(0, memstart_addr);
354 * If we are running with a 52-bit kernel VA config on a system that
355 * does not support it, we have to place the available physical
356 * memory in the 48-bit addressable part of the linear region, i.e.,
357 * we have to move it upward. Since memstart_addr represents the
358 * physical address of PAGE_OFFSET, we have to *subtract* from it.
360 if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
361 memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
364 * Apply the memory limit if it was set. Since the kernel may be loaded
365 * high up in memory, add back the kernel region that must be accessible
366 * via the linear mapping.
368 if (memory_limit != PHYS_ADDR_MAX) {
369 memblock_mem_limit_remove_map(memory_limit);
370 memblock_add(__pa_symbol(_text), (u64)(_end - _text));
373 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
375 * Add back the memory we just removed if it results in the
376 * initrd to become inaccessible via the linear mapping.
377 * Otherwise, this is a no-op
379 u64 base = phys_initrd_start & PAGE_MASK;
380 u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
383 * We can only add back the initrd memory if we don't end up
384 * with more memory than we can address via the linear mapping.
385 * It is up to the bootloader to position the kernel and the
386 * initrd reasonably close to each other (i.e., within 32 GB of
387 * each other) so that all granule/#levels combinations can
388 * always access both.
390 if (WARN(base < memblock_start_of_DRAM() ||
391 base + size > memblock_start_of_DRAM() +
393 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
394 phys_initrd_size = 0;
396 memblock_remove(base, size); /* clear MEMBLOCK_ flags */
397 memblock_add(base, size);
398 memblock_reserve(base, size);
402 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
403 extern u16 memstart_offset_seed;
404 u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
405 int parange = cpuid_feature_extract_unsigned_field(
406 mmfr0, ID_AA64MMFR0_PARANGE_SHIFT);
407 s64 range = linear_region_size -
408 BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
411 * If the size of the linear region exceeds, by a sufficient
412 * margin, the size of the region that the physical memory can
413 * span, randomize the linear region as well.
415 if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
416 range /= ARM64_MEMSTART_ALIGN;
417 memstart_addr -= ARM64_MEMSTART_ALIGN *
418 ((range * memstart_offset_seed) >> 16);
423 * Register the kernel text, kernel data, initrd, and initial
424 * pagetables with memblock.
426 memblock_reserve(__pa_symbol(_stext), _end - _stext);
427 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
428 /* the generic initrd code expects virtual addresses */
429 initrd_start = __phys_to_virt(phys_initrd_start);
430 initrd_end = initrd_start + phys_initrd_size;
433 early_init_fdt_scan_reserved_mem();
435 reserve_elfcorehdr();
437 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
440 void __init bootmem_init(void)
442 unsigned long min, max;
444 min = PFN_UP(memblock_start_of_DRAM());
445 max = PFN_DOWN(memblock_end_of_DRAM());
447 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
449 max_pfn = max_low_pfn = max;
455 * must be done after arch_numa_init() which calls numa_init() to
456 * initialize node_online_map that gets used in hugetlb_cma_reserve()
457 * while allocating required CMA size across online nodes.
459 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
460 arm64_hugetlb_cma_reserve();
463 dma_pernuma_cma_reserve();
468 * sparse_init() tries to allocate memory from memblock, so must be
469 * done after the fixed reservations
472 zone_sizes_init(min, max);
475 * Reserve the CMA area after arm64_dma_phys_limit was initialised.
477 dma_contiguous_reserve(arm64_dma_phys_limit);
480 * request_standard_resources() depends on crashkernel's memory being
481 * reserved, so do it here.
483 reserve_crashkernel();
489 * mem_init() marks the free areas in the mem_map and tells us how much memory
490 * is free. This is done after various parts of the system have claimed their
491 * memory after the kernel image.
493 void __init mem_init(void)
495 if (swiotlb_force == SWIOTLB_FORCE ||
496 max_pfn > PFN_DOWN(arm64_dma_phys_limit))
498 else if (!xen_swiotlb_detect())
499 swiotlb_force = SWIOTLB_NO_FORCE;
501 set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
503 /* this will put all unused low memory onto the freelists */
507 * Check boundaries twice: Some fundamental inconsistencies can be
508 * detected at build time already.
511 BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
515 * Selected page table levels should match when derived from
516 * scratch using the virtual address range and page size.
518 BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
519 CONFIG_PGTABLE_LEVELS);
521 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
522 extern int sysctl_overcommit_memory;
524 * On a machine this small we won't get anywhere without
525 * overcommit, so turn it on by default.
527 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
531 void free_initmem(void)
533 free_reserved_area(lm_alias(__init_begin),
534 lm_alias(__init_end),
535 POISON_FREE_INITMEM, "unused kernel");
537 * Unmap the __init region but leave the VM area in place. This
538 * prevents the region from being reused for kernel modules, which
539 * is not supported by kallsyms.
541 vunmap_range((u64)__init_begin, (u64)__init_end);
544 void dump_mem_limit(void)
546 if (memory_limit != PHYS_ADDR_MAX) {
547 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
549 pr_emerg("Memory Limit: none\n");