1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains the routines for initializing the MMU
4 * on the 8xx series of chips.
7 * Derived from arch/powerpc/mm/40x_mmu.c:
10 #include <linux/memblock.h>
11 #include <linux/mmu_context.h>
12 #include <linux/hugetlb.h>
13 #include <asm/fixmap.h>
14 #include <asm/code-patching.h>
17 #include <mm/mmu_decl.h>
19 #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
21 extern int __map_without_ltlbs;
23 static unsigned long block_mapped_ram;
26 * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
27 * Otherwise, returns 0
29 phys_addr_t v_block_mapped(unsigned long va)
31 unsigned long p = PHYS_IMMR_BASE;
33 if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
34 return p + va - VIRT_IMMR_BASE;
35 if (__map_without_ltlbs)
37 if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
43 * Return VA for a given PA mapped with LTLBs or fixmap
44 * Return 0 if not mapped
46 unsigned long p_block_mapped(phys_addr_t pa)
48 unsigned long p = PHYS_IMMR_BASE;
50 if (pa >= p && pa < p + IMMR_SIZE)
51 return VIRT_IMMR_BASE + pa - p;
52 if (__map_without_ltlbs)
54 if (pa < block_mapped_ram)
55 return (unsigned long)__va(pa);
59 static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
61 if (hpd_val(*pmdp) == 0) {
62 pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
67 hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
68 hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
70 return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
73 static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
74 pgprot_t prot, int psize, bool new)
76 pmd_t *pmdp = pmd_off_k(va);
79 if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
83 if (WARN_ON(slab_is_available()))
86 if (psize == MMU_PAGE_512K)
87 ptep = early_pte_alloc_kernel(pmdp, va);
89 ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
91 if (psize == MMU_PAGE_512K)
92 ptep = pte_offset_kernel(pmdp, va);
94 ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
100 /* The PTE should never be already present */
101 if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
104 set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)));
110 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
112 void __init MMU_init_hw(void)
116 static bool immr_is_mapped __initdata;
118 void __init mmu_mapin_immr(void)
123 immr_is_mapped = true;
125 __early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE,
126 PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
129 static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
130 pgprot_t prot, bool new)
132 unsigned long v = PAGE_OFFSET + offset;
133 unsigned long p = offset;
135 WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
137 for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
138 __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
139 for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
140 __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
141 for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
142 __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
145 flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
148 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
150 unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
151 unsigned long sinittext = __pa(_sinittext);
152 bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence();
153 unsigned long boundary = strict_boundary ? sinittext : etext8;
154 unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
156 WARN_ON(top < einittext8);
160 if (__map_without_ltlbs)
163 mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
164 if (debug_pagealloc_enabled_or_kfence()) {
167 mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
168 mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
172 memblock_set_current_limit(top);
174 block_mapped_ram = top;
179 void mmu_mark_initmem_nx(void)
181 unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
182 unsigned long sinittext = __pa(_sinittext);
183 unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
184 unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
186 mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
187 mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
189 mmu_pin_tlb(block_mapped_ram, false);
192 #ifdef CONFIG_STRICT_KERNEL_RWX
193 void mmu_mark_rodata_ro(void)
195 unsigned long sinittext = __pa(_sinittext);
197 mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
198 if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
199 mmu_pin_tlb(block_mapped_ram, true);
203 void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
204 phys_addr_t first_memblock_size)
206 /* We don't currently support the first MEMBLOCK not mapping 0
207 * physical on those processors
209 BUG_ON(first_memblock_base != 0);
211 /* 8xx can only access 32MB at the moment */
212 memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
215 #ifdef CONFIG_PPC_KUEP
216 void __init setup_kuep(bool disabled)
221 pr_info("Activating Kernel Userspace Execution Prevention\n");
223 mtspr(SPRN_MI_AP, MI_APG_KUEP);
227 #ifdef CONFIG_PPC_KUAP
228 struct static_key_false disable_kuap_key;
229 EXPORT_SYMBOL(disable_kuap_key);
231 void __init setup_kuap(bool disabled)
234 static_branch_enable(&disable_kuap_key);
238 pr_info("Activating Kernel Userspace Access Protection\n");
240 mtspr(SPRN_MD_AP, MD_APG_KUAP);