1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains ioremap and related functions for 64-bit machines.
5 * Derived from arch/ppc64/mm/init.c
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/export.h>
25 #include <linux/types.h>
26 #include <linux/mman.h>
28 #include <linux/swap.h>
29 #include <linux/stddef.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32 #include <linux/hugetlb.h>
34 #include <asm/pgalloc.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
42 #include <asm/machdep.h>
44 #include <asm/processor.h>
45 #include <asm/cputable.h>
46 #include <asm/sections.h>
47 #include <asm/firmware.h>
50 #include <mm/mmu_decl.h>
53 #ifdef CONFIG_PPC_BOOK3S_64
55 * partition table and process table for ISA 3.0
57 struct prtb_entry *process_tb;
58 struct patb_entry *partition_tb;
62 unsigned long __pte_index_size;
63 EXPORT_SYMBOL(__pte_index_size);
64 unsigned long __pmd_index_size;
65 EXPORT_SYMBOL(__pmd_index_size);
66 unsigned long __pud_index_size;
67 EXPORT_SYMBOL(__pud_index_size);
68 unsigned long __pgd_index_size;
69 EXPORT_SYMBOL(__pgd_index_size);
70 unsigned long __pud_cache_index;
71 EXPORT_SYMBOL(__pud_cache_index);
72 unsigned long __pte_table_size;
73 EXPORT_SYMBOL(__pte_table_size);
74 unsigned long __pmd_table_size;
75 EXPORT_SYMBOL(__pmd_table_size);
76 unsigned long __pud_table_size;
77 EXPORT_SYMBOL(__pud_table_size);
78 unsigned long __pgd_table_size;
79 EXPORT_SYMBOL(__pgd_table_size);
80 unsigned long __pmd_val_bits;
81 EXPORT_SYMBOL(__pmd_val_bits);
82 unsigned long __pud_val_bits;
83 EXPORT_SYMBOL(__pud_val_bits);
84 unsigned long __pgd_val_bits;
85 EXPORT_SYMBOL(__pgd_val_bits);
86 unsigned long __kernel_virt_start;
87 EXPORT_SYMBOL(__kernel_virt_start);
88 unsigned long __vmalloc_start;
89 EXPORT_SYMBOL(__vmalloc_start);
90 unsigned long __vmalloc_end;
91 EXPORT_SYMBOL(__vmalloc_end);
92 unsigned long __kernel_io_start;
93 EXPORT_SYMBOL(__kernel_io_start);
94 unsigned long __kernel_io_end;
96 EXPORT_SYMBOL(vmemmap);
97 unsigned long __pte_frag_nr;
98 EXPORT_SYMBOL(__pte_frag_nr);
99 unsigned long __pte_frag_size_shift;
100 EXPORT_SYMBOL(__pte_frag_size_shift);
101 unsigned long ioremap_bot;
102 #else /* !CONFIG_PPC_BOOK3S_64 */
103 unsigned long ioremap_bot = IOREMAP_BASE;
107 * __ioremap_at - Low level function to establish the page tables
110 void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
114 /* We don't support the 4K PFN hack with ioremap */
115 if (pgprot_val(prot) & H_PAGE_4K_PFN)
118 if ((ea + size) >= (void *)IOREMAP_END) {
119 pr_warn("Outside the supported range\n");
123 WARN_ON(pa & ~PAGE_MASK);
124 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
125 WARN_ON(size & ~PAGE_MASK);
127 for (i = 0; i < size; i += PAGE_SIZE)
128 if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
131 return (void __iomem *)ea;
135 * __iounmap_from - Low level function to tear down the page tables
136 * for an IO mapping. This is used for mappings that
137 * are manipulated manually, like partial unmapping of
138 * PCI IOs or ISA space.
140 void __iounmap_at(void *ea, unsigned long size)
142 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
143 WARN_ON(size & ~PAGE_MASK);
145 unmap_kernel_range((unsigned long)ea, size);
148 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
149 pgprot_t prot, void *caller)
151 phys_addr_t paligned;
155 * Choose an address to map it to.
156 * Once the imalloc system is running, we use it.
157 * Before that, we map using addresses going
158 * up from ioremap_bot. imalloc will use
159 * the addresses from ioremap_bot through
163 paligned = addr & PAGE_MASK;
164 size = PAGE_ALIGN(addr + size) - paligned;
166 if ((size == 0) || (paligned == 0))
169 if (slab_is_available()) {
170 struct vm_struct *area;
172 area = __get_vm_area_caller(size, VM_IOREMAP,
173 ioremap_bot, IOREMAP_END,
178 area->phys_addr = paligned;
179 ret = __ioremap_at(paligned, area->addr, size, prot);
183 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
189 ret += addr & ~PAGE_MASK;
193 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
196 return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
199 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
201 pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
202 void *caller = __builtin_return_address(0);
205 return ppc_md.ioremap(addr, size, prot, caller);
206 return __ioremap_caller(addr, size, prot, caller);
209 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
211 pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
212 void *caller = __builtin_return_address(0);
215 return ppc_md.ioremap(addr, size, prot, caller);
216 return __ioremap_caller(addr, size, prot, caller);
219 void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
221 pgprot_t prot = pgprot_cached(PAGE_KERNEL);
222 void *caller = __builtin_return_address(0);
225 return ppc_md.ioremap(addr, size, prot, caller);
226 return __ioremap_caller(addr, size, prot, caller);
229 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
232 pte_t pte = __pte(flags);
233 void *caller = __builtin_return_address(0);
235 /* writeable implies dirty for kernel addresses */
237 pte = pte_mkdirty(pte);
239 /* we don't want to let _PAGE_EXEC leak out */
240 pte = pte_exprotect(pte);
242 * Force kernel mapping.
244 pte = pte_mkprivileged(pte);
247 return ppc_md.ioremap(addr, size, pte_pgprot(pte), caller);
248 return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
253 * Unmap an IO region and remove it from imalloc'd list.
254 * Access to IO memory should be serialized by driver.
256 void __iounmap(volatile void __iomem *token)
260 if (!slab_is_available())
263 addr = (void *) ((unsigned long __force)
264 PCI_FIX_ADDR(token) & PAGE_MASK);
265 if ((unsigned long)addr < ioremap_bot) {
266 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
273 void iounmap(volatile void __iomem *token)
276 ppc_md.iounmap(token);
281 EXPORT_SYMBOL(ioremap);
282 EXPORT_SYMBOL(ioremap_wc);
283 EXPORT_SYMBOL(ioremap_prot);
284 EXPORT_SYMBOL(__ioremap);
285 EXPORT_SYMBOL(__ioremap_at);
286 EXPORT_SYMBOL(iounmap);
287 EXPORT_SYMBOL(__iounmap);
288 EXPORT_SYMBOL(__iounmap_at);
290 #ifndef __PAGETABLE_PUD_FOLDED
291 /* 4 level page table */
292 struct page *pgd_page(pgd_t pgd)
295 return pte_page(pgd_pte(pgd));
296 return virt_to_page(pgd_page_vaddr(pgd));
300 struct page *pud_page(pud_t pud)
303 return pte_page(pud_pte(pud));
304 return virt_to_page(pud_page_vaddr(pud));
308 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
309 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
311 struct page *pmd_page(pmd_t pmd)
313 if (pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
314 return pte_page(pmd_pte(pmd));
315 return virt_to_page(pmd_page_vaddr(pmd));
318 #ifdef CONFIG_STRICT_KERNEL_RWX
319 void mark_rodata_ro(void)
321 if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
322 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
327 radix__mark_rodata_ro();
329 hash__mark_rodata_ro();
331 // mark_initmem_nx() should have already run by now
335 void mark_initmem_nx(void)
338 radix__mark_initmem_nx();
340 hash__mark_initmem_nx();