1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 * Copyright (C) 1996 Paul Mackerras
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 #include <linux/memblock.h>
16 #include <linux/highmem.h>
17 #include <linux/suspend.h>
18 #include <linux/dma-direct.h>
20 #include <asm/machdep.h>
22 #include <asm/kasan.h>
23 #include <asm/sparsemem.h>
26 #include <mm/mmu_decl.h>
28 unsigned long long memory_limit;
29 bool init_mem_is_free;
31 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
32 EXPORT_SYMBOL(empty_zero_page);
34 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
35 unsigned long size, pgprot_t vma_prot)
37 if (ppc_md.phys_mem_access_prot)
38 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
40 if (!page_is_ram(pfn))
41 vma_prot = pgprot_noncached(vma_prot);
45 EXPORT_SYMBOL(phys_mem_access_prot);
47 #ifdef CONFIG_MEMORY_HOTPLUG
48 static DEFINE_MUTEX(linear_mapping_mutex);
51 int memory_add_physaddr_to_nid(u64 start)
53 return hot_add_scn_to_nid(start);
57 int __weak create_section_mapping(unsigned long start, unsigned long end,
58 int nid, pgprot_t prot)
63 int __weak remove_section_mapping(unsigned long start, unsigned long end)
68 int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
69 struct mhp_params *params)
73 start = (unsigned long)__va(start);
74 mutex_lock(&linear_mapping_mutex);
75 rc = create_section_mapping(start, start + size, nid,
77 mutex_unlock(&linear_mapping_mutex);
79 pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
80 start, start + size, rc);
86 void __ref arch_remove_linear_mapping(u64 start, u64 size)
90 /* Remove htab bolted mappings for this section of memory */
91 start = (unsigned long)__va(start);
93 mutex_lock(&linear_mapping_mutex);
94 ret = remove_section_mapping(start, start + size);
95 mutex_unlock(&linear_mapping_mutex);
97 pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
98 start, start + size, ret);
100 /* Ensure all vmalloc mappings are flushed in case they also
101 * hit that section of memory
106 int __ref arch_add_memory(int nid, u64 start, u64 size,
107 struct mhp_params *params)
109 unsigned long start_pfn = start >> PAGE_SHIFT;
110 unsigned long nr_pages = size >> PAGE_SHIFT;
113 rc = arch_create_linear_mapping(nid, start, size, params);
116 rc = __add_pages(nid, start_pfn, nr_pages, params);
118 arch_remove_linear_mapping(start, size);
122 void __ref arch_remove_memory(int nid, u64 start, u64 size,
123 struct vmem_altmap *altmap)
125 unsigned long start_pfn = start >> PAGE_SHIFT;
126 unsigned long nr_pages = size >> PAGE_SHIFT;
128 __remove_pages(start_pfn, nr_pages, altmap);
129 arch_remove_linear_mapping(start, size);
134 void __init mem_topology_setup(void)
136 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
137 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
138 #ifdef CONFIG_HIGHMEM
139 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
142 /* Place all memblock_regions in the same node and merge contiguous
145 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
148 void __init initmem_init(void)
153 /* mark pages that don't exist as nosave */
154 static int __init mark_nonram_nosave(void)
156 unsigned long spfn, epfn, prev = 0;
159 for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
160 if (prev && prev < spfn)
161 register_nosave_region(prev, spfn);
168 #else /* CONFIG_NUMA */
169 static int __init mark_nonram_nosave(void)
178 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
179 * everything else. GFP_DMA32 page allocations automatically fall back to
182 * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
183 * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
184 * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
187 static unsigned long max_zone_pfns[MAX_NR_ZONES];
190 * paging_init() sets up the page tables - in fact we've already done this.
192 void __init paging_init(void)
194 unsigned long long total_ram = memblock_phys_mem_size();
195 phys_addr_t top_of_ram = memblock_end_of_DRAM();
197 #ifdef CONFIG_HIGHMEM
198 unsigned long v = __fix_to_virt(FIX_KMAP_END);
199 unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
201 for (; v < end; v += PAGE_SIZE)
202 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
204 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
205 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
206 #endif /* CONFIG_HIGHMEM */
208 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
209 (unsigned long long)top_of_ram, total_ram);
210 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
211 (long int)((top_of_ram - total_ram) >> 20));
214 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
217 if (IS_ENABLED(CONFIG_PPC32))
222 #ifdef CONFIG_ZONE_DMA
223 max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
224 1UL << (zone_dma_bits - PAGE_SHIFT));
226 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
227 #ifdef CONFIG_HIGHMEM
228 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
231 free_area_init(max_zone_pfns);
233 mark_nonram_nosave();
236 void __init mem_init(void)
239 * book3s is limited to 16 page sizes due to encoding this in
240 * a 4-bit field for slices.
242 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
244 #ifdef CONFIG_SWIOTLB
246 * Some platforms (e.g. 85xx) limit DMA-able memory way below
247 * 4G. We force memblock to bottom-up mode to ensure that the
248 * memory allocated in swiotlb_init() is DMA-able.
249 * As it's the last memblock allocation, no need to reset it
252 memblock_set_bottom_up(true);
253 if (is_secure_guest())
259 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
260 set_max_mapnr(max_pfn);
266 #ifdef CONFIG_HIGHMEM
268 unsigned long pfn, highmem_mapnr;
270 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
271 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
272 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
273 struct page *page = pfn_to_page(pfn);
274 if (!memblock_is_reserved(paddr))
275 free_highmem_page(page);
278 #endif /* CONFIG_HIGHMEM */
280 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
282 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
283 * functions.... do it here for the non-smp case.
285 per_cpu(next_tlbcam_idx, smp_processor_id()) =
286 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
290 pr_info("Kernel virtual memory layout:\n");
292 pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
293 KASAN_SHADOW_START, KASAN_SHADOW_END);
295 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
296 #ifdef CONFIG_HIGHMEM
297 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
298 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
299 #endif /* CONFIG_HIGHMEM */
300 if (ioremap_bot != IOREMAP_TOP)
301 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
302 ioremap_bot, IOREMAP_TOP);
303 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
304 VMALLOC_START, VMALLOC_END);
306 pr_info(" * 0x%08lx..0x%08lx : modules\n",
307 MODULES_VADDR, MODULES_END);
309 #endif /* CONFIG_PPC32 */
312 void free_initmem(void)
314 ppc_md.progress = ppc_printk_progress;
316 init_mem_is_free = true;
317 free_initmem_default(POISON_FREE_INITMEM);
321 * System memory should not be in /proc/iomem but various tools expect it
324 static int __init add_system_ram_resources(void)
326 phys_addr_t start, end;
329 for_each_mem_range(i, &start, &end) {
330 struct resource *res;
332 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
336 res->name = "System RAM";
339 * In memblock, end points to the first byte after
340 * the range while in resourses, end points to the
341 * last byte in the range.
344 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
345 WARN_ON(request_resource(&iomem_resource, res) < 0);
351 subsys_initcall(add_system_ram_resources);
353 #ifdef CONFIG_STRICT_DEVMEM
355 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
356 * is valid. The argument is a physical page number.
358 * Access has to be given to non-kernel-ram areas as well, these contain the
359 * PCI mmio resources as well as potential bios/acpi data regions.
361 int devmem_is_allowed(unsigned long pfn)
363 if (page_is_rtas_user_buf(pfn))
365 if (iomem_is_exclusive(PFN_PHYS(pfn)))
367 if (!page_is_ram(pfn))
371 #endif /* CONFIG_STRICT_DEVMEM */
374 * This is defined in kernel/resource.c but only powerpc needs to export it, for
375 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
377 EXPORT_SYMBOL_GPL(walk_system_ram_range);