1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 * Copyright (C) 1996 Paul Mackerras
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 #include <linux/memblock.h>
16 #include <linux/highmem.h>
17 #include <linux/suspend.h>
18 #include <linux/dma-direct.h>
19 #include <linux/execmem.h>
20 #include <linux/vmalloc.h>
22 #include <asm/swiotlb.h>
23 #include <asm/machdep.h>
25 #include <asm/kasan.h>
27 #include <asm/mmzone.h>
28 #include <asm/ftrace.h>
29 #include <asm/code-patching.h>
30 #include <asm/setup.h>
31 #include <asm/fixmap.h>
33 #include <mm/mmu_decl.h>
35 unsigned long long memory_limit __initdata;
37 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
38 EXPORT_SYMBOL(empty_zero_page);
40 pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
43 if (ppc_md.phys_mem_access_prot)
44 return ppc_md.phys_mem_access_prot(pfn, size, vma_prot);
46 if (!page_is_ram(pfn))
47 vma_prot = pgprot_noncached(vma_prot);
51 EXPORT_SYMBOL(__phys_mem_access_prot);
53 #ifdef CONFIG_MEMORY_HOTPLUG
54 static DEFINE_MUTEX(linear_mapping_mutex);
57 int memory_add_physaddr_to_nid(u64 start)
59 return hot_add_scn_to_nid(start);
61 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
64 int __weak create_section_mapping(unsigned long start, unsigned long end,
65 int nid, pgprot_t prot)
70 int __weak remove_section_mapping(unsigned long start, unsigned long end)
75 int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
76 struct mhp_params *params)
80 start = (unsigned long)__va(start);
81 mutex_lock(&linear_mapping_mutex);
82 rc = create_section_mapping(start, start + size, nid,
84 mutex_unlock(&linear_mapping_mutex);
86 pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
87 start, start + size, rc);
93 void __ref arch_remove_linear_mapping(u64 start, u64 size)
97 /* Remove htab bolted mappings for this section of memory */
98 start = (unsigned long)__va(start);
100 mutex_lock(&linear_mapping_mutex);
101 ret = remove_section_mapping(start, start + size);
102 mutex_unlock(&linear_mapping_mutex);
104 pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
105 start, start + size, ret);
107 /* Ensure all vmalloc mappings are flushed in case they also
108 * hit that section of memory
114 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
117 static void update_end_of_memory_vars(u64 start, u64 size)
119 unsigned long end_pfn = PFN_UP(start + size);
121 if (end_pfn > max_pfn) {
123 max_low_pfn = end_pfn;
124 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
128 int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
129 struct mhp_params *params)
133 ret = __add_pages(nid, start_pfn, nr_pages, params);
137 /* update max_pfn, max_low_pfn and high_memory */
138 update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
139 nr_pages << PAGE_SHIFT);
144 int __ref arch_add_memory(int nid, u64 start, u64 size,
145 struct mhp_params *params)
147 unsigned long start_pfn = start >> PAGE_SHIFT;
148 unsigned long nr_pages = size >> PAGE_SHIFT;
151 rc = arch_create_linear_mapping(nid, start, size, params);
154 rc = add_pages(nid, start_pfn, nr_pages, params);
156 arch_remove_linear_mapping(start, size);
160 void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
162 unsigned long start_pfn = start >> PAGE_SHIFT;
163 unsigned long nr_pages = size >> PAGE_SHIFT;
165 __remove_pages(start_pfn, nr_pages, altmap);
166 arch_remove_linear_mapping(start, size);
171 void __init mem_topology_setup(void)
173 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
174 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
175 #ifdef CONFIG_HIGHMEM
176 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
179 /* Place all memblock_regions in the same node and merge contiguous
182 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
185 void __init initmem_init(void)
190 /* mark pages that don't exist as nosave */
191 static int __init mark_nonram_nosave(void)
193 unsigned long spfn, epfn, prev = 0;
196 for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
197 if (prev && prev < spfn)
198 register_nosave_region(prev, spfn);
205 #else /* CONFIG_NUMA */
206 static int __init mark_nonram_nosave(void)
215 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
216 * everything else. GFP_DMA32 page allocations automatically fall back to
219 * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
220 * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
221 * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
224 static unsigned long max_zone_pfns[MAX_NR_ZONES];
227 * paging_init() sets up the page tables - in fact we've already done this.
229 void __init paging_init(void)
231 unsigned long long total_ram = memblock_phys_mem_size();
232 phys_addr_t top_of_ram = memblock_end_of_DRAM();
234 #ifdef CONFIG_HIGHMEM
235 unsigned long v = __fix_to_virt(FIX_KMAP_END);
236 unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
238 for (; v < end; v += PAGE_SIZE)
239 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
241 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
242 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
243 #endif /* CONFIG_HIGHMEM */
245 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
246 (unsigned long long)top_of_ram, total_ram);
247 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
248 (long int)((top_of_ram - total_ram) >> 20));
251 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
254 if (IS_ENABLED(CONFIG_PPC32))
259 #ifdef CONFIG_ZONE_DMA
260 max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
261 1UL << (zone_dma_bits - PAGE_SHIFT));
263 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
264 #ifdef CONFIG_HIGHMEM
265 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
268 free_area_init(max_zone_pfns);
270 mark_nonram_nosave();
273 void __init mem_init(void)
276 * book3s is limited to 16 page sizes due to encoding this in
277 * a 4-bit field for slices.
279 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
281 #ifdef CONFIG_SWIOTLB
283 * Some platforms (e.g. 85xx) limit DMA-able memory way below
284 * 4G. We force memblock to bottom-up mode to ensure that the
285 * memory allocated in swiotlb_init() is DMA-able.
286 * As it's the last memblock allocation, no need to reset it
289 memblock_set_bottom_up(true);
290 swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
293 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
299 #ifdef CONFIG_HIGHMEM
301 unsigned long pfn, highmem_mapnr;
303 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
304 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
305 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
306 struct page *page = pfn_to_page(pfn);
307 if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr))
308 free_highmem_page(page);
311 #endif /* CONFIG_HIGHMEM */
313 #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP)
315 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
316 * functions.... do it here for the non-smp case.
318 per_cpu(next_tlbcam_idx, smp_processor_id()) =
319 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
323 pr_info("Kernel virtual memory layout:\n");
325 pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
326 KASAN_SHADOW_START, KASAN_SHADOW_END);
328 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
329 #ifdef CONFIG_HIGHMEM
330 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
331 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
332 #endif /* CONFIG_HIGHMEM */
333 if (ioremap_bot != IOREMAP_TOP)
334 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
335 ioremap_bot, IOREMAP_TOP);
336 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
337 VMALLOC_START, VMALLOC_END);
339 pr_info(" * 0x%08lx..0x%08lx : modules\n",
340 MODULES_VADDR, MODULES_END);
342 #endif /* CONFIG_PPC32 */
345 void free_initmem(void)
347 ppc_md.progress = ppc_printk_progress;
349 free_initmem_default(POISON_FREE_INITMEM);
350 ftrace_free_init_tramp();
354 * System memory should not be in /proc/iomem but various tools expect it
357 static int __init add_system_ram_resources(void)
359 phys_addr_t start, end;
362 for_each_mem_range(i, &start, &end) {
363 struct resource *res;
365 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
369 res->name = "System RAM";
372 * In memblock, end points to the first byte after
373 * the range while in resourses, end points to the
374 * last byte in the range.
377 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
378 WARN_ON(request_resource(&iomem_resource, res) < 0);
384 subsys_initcall(add_system_ram_resources);
386 #ifdef CONFIG_STRICT_DEVMEM
388 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
389 * is valid. The argument is a physical page number.
391 * Access has to be given to non-kernel-ram areas as well, these contain the
392 * PCI mmio resources as well as potential bios/acpi data regions.
394 int devmem_is_allowed(unsigned long pfn)
396 if (page_is_rtas_user_buf(pfn))
398 if (iomem_is_exclusive(PFN_PHYS(pfn)))
400 if (!page_is_ram(pfn))
404 #endif /* CONFIG_STRICT_DEVMEM */
407 * This is defined in kernel/resource.c but only powerpc needs to export it, for
408 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
410 EXPORT_SYMBOL_GPL(walk_system_ram_range);
412 #ifdef CONFIG_EXECMEM
413 static struct execmem_info execmem_info __ro_after_init;
415 struct execmem_info __init *execmem_arch_setup(void)
417 pgprot_t kprobes_prot = strict_module_rwx_enabled() ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
418 pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
419 unsigned long fallback_start = 0, fallback_end = 0;
420 unsigned long start, end;
423 * BOOK3S_32 and 8xx define MODULES_VADDR for text allocations and
424 * allow allocating data in the entire vmalloc space
427 unsigned long limit = (unsigned long)_etext - SZ_32M;
429 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
431 /* First try within 32M limit from _etext to avoid branch trampolines */
432 if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) {
434 fallback_start = MODULES_VADDR;
435 fallback_end = MODULES_END;
437 start = MODULES_VADDR;
442 start = VMALLOC_START;
446 execmem_info = (struct execmem_info){
448 [EXECMEM_DEFAULT] = {
453 .fallback_start = fallback_start,
454 .fallback_end = fallback_end,
456 [EXECMEM_KPROBES] = {
457 .start = VMALLOC_START,
459 .pgprot = kprobes_prot,
462 [EXECMEM_MODULE_DATA] = {
463 .start = VMALLOC_START,
465 .pgprot = PAGE_KERNEL,
471 return &execmem_info;
473 #endif /* CONFIG_EXECMEM */