1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains the routines for initializing the MMU
4 * on the 8xx series of chips.
7 * Derived from arch/powerpc/mm/40x_mmu.c:
10 #include <linux/memblock.h>
11 #include <linux/mmu_context.h>
12 #include <asm/fixmap.h>
13 #include <asm/code-patching.h>
16 #include <mm/mmu_decl.h>
18 #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
20 extern int __map_without_ltlbs;
22 static unsigned long block_mapped_ram;
25 * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
26 * Otherwise, returns 0
28 phys_addr_t v_block_mapped(unsigned long va)
30 unsigned long p = PHYS_IMMR_BASE;
32 if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
33 return p + va - VIRT_IMMR_BASE;
34 if (__map_without_ltlbs)
36 if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
42 * Return VA for a given PA mapped with LTLBs or fixmap
43 * Return 0 if not mapped
45 unsigned long p_block_mapped(phys_addr_t pa)
47 unsigned long p = PHYS_IMMR_BASE;
49 if (pa >= p && pa < p + IMMR_SIZE)
50 return VIRT_IMMR_BASE + pa - p;
51 if (__map_without_ltlbs)
53 if (pa < block_mapped_ram)
54 return (unsigned long)__va(pa);
59 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
61 void __init MMU_init_hw(void)
65 static bool immr_is_mapped __initdata;
67 void __init mmu_mapin_immr(void)
69 unsigned long p = PHYS_IMMR_BASE;
70 unsigned long v = VIRT_IMMR_BASE;
76 immr_is_mapped = true;
78 for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
79 map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
82 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
89 void mmu_mark_initmem_nx(void)
93 #ifdef CONFIG_STRICT_KERNEL_RWX
94 void mmu_mark_rodata_ro(void)
99 void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
100 phys_addr_t first_memblock_size)
102 /* We don't currently support the first MEMBLOCK not mapping 0
103 * physical on those processors
105 BUG_ON(first_memblock_base != 0);
107 /* 8xx can only access 32MB at the moment */
108 memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
112 * Set up to use a given MMU context.
113 * id is context number, pgd is PGD pointer.
115 * We place the physical address of the new task page directory loaded
116 * into the MMU base register, and set the ASID compare register with
119 void set_context(unsigned long id, pgd_t *pgd)
121 s16 offset = (s16)(__pa(swapper_pg_dir));
123 /* Context switch the PTE pointer for the Abatron BDI2000.
124 * The PGDIR is passed as second argument.
126 if (IS_ENABLED(CONFIG_BDI_SWITCH))
127 abatron_pteptrs[1] = pgd;
129 /* Register M_TWB will contain base address of level 1 table minus the
130 * lower part of the kernel PGDIR base address, so that all accesses to
131 * level 1 table are done relative to lower part of kernel PGDIR base
134 mtspr(SPRN_M_TWB, __pa(pgd) - offset);
137 mtspr(SPRN_M_CASID, id - 1);
142 void flush_instruction_cache(void)
145 mtspr(SPRN_IC_CST, IDC_INVALL);
149 #ifdef CONFIG_PPC_KUEP
150 void __init setup_kuep(bool disabled)
155 pr_info("Activating Kernel Userspace Execution Prevention\n");
157 mtspr(SPRN_MI_AP, MI_APG_KUEP);
161 #ifdef CONFIG_PPC_KUAP
162 void __init setup_kuap(bool disabled)
164 pr_info("Activating Kernel Userspace Access Protection\n");
167 pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
169 mtspr(SPRN_MD_AP, MD_APG_KUAP);