2 * Based on arch/arm/include/asm/mmu_context.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #ifndef __ASM_MMU_CONTEXT_H
20 #define __ASM_MMU_CONTEXT_H
22 #include <linux/compiler.h>
23 #include <linux/sched.h>
25 #include <asm/cacheflush.h>
26 #include <asm/proc-fns.h>
27 #include <asm-generic/mm_hooks.h>
28 #include <asm/cputype.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlbflush.h>
32 #ifdef CONFIG_PID_IN_CONTEXTIDR
33 static inline void contextidr_thread_switch(struct task_struct *next)
36 " msr contextidr_el1, %0\n"
39 : "r" (task_pid_nr(next)));
42 static inline void contextidr_thread_switch(struct task_struct *next)
48 * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
50 static inline void cpu_set_reserved_ttbr0(void)
52 unsigned long ttbr = virt_to_phys(empty_zero_page);
55 " msr ttbr0_el1, %0 // set TTBR0\n"
62 * TCR.T0SZ value to use when the ID map is active. Usually equals
63 * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
64 * physical memory, in which case it will be smaller.
66 extern u64 idmap_t0sz;
68 static inline bool __cpu_uses_extended_idmap(void)
70 return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) &&
71 unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)));
75 * Set TCR.T0SZ to its default value (based on VA_BITS)
77 static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
81 if (!__cpu_uses_extended_idmap())
86 " bfi %0, %1, %2, %3 ;"
90 : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
93 #define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
94 #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
97 * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
99 * The idmap lives in the same VA range as userspace, but uses global entries
100 * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
101 * speculative TLB fetches, we must temporarily install the reserved page
102 * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
104 * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
105 * which should not be installed in TTBR0_EL1. In this case we can leave the
106 * reserved page tables in place.
108 static inline void cpu_uninstall_idmap(void)
110 struct mm_struct *mm = current->active_mm;
112 cpu_set_reserved_ttbr0();
113 local_flush_tlb_all();
114 cpu_set_default_tcr_t0sz();
117 cpu_switch_mm(mm->pgd, mm);
120 static inline void cpu_install_idmap(void)
122 cpu_set_reserved_ttbr0();
123 local_flush_tlb_all();
124 cpu_set_idmap_tcr_t0sz();
126 cpu_switch_mm(idmap_pg_dir, &init_mm);
130 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
131 * avoiding the possibility of conflicting TLB entries being allocated.
133 static inline void cpu_replace_ttbr1(pgd_t *pgd)
135 typedef void (ttbr_replace_func)(phys_addr_t);
136 extern ttbr_replace_func idmap_cpu_replace_ttbr1;
137 ttbr_replace_func *replace_phys;
139 phys_addr_t pgd_phys = virt_to_phys(pgd);
141 replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
144 replace_phys(pgd_phys);
145 cpu_uninstall_idmap();
149 * It would be nice to return ASIDs back to the allocator, but unfortunately
150 * that introduces a race with a generation rollover where we could erroneously
151 * free an ASID allocated in a future generation. We could workaround this by
152 * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
153 * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
154 * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
155 * take CPU migration into account.
157 #define destroy_context(mm) do { } while(0)
158 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
160 #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
163 * This is called when "tsk" is about to enter lazy TLB mode.
165 * mm: describes the currently active mm context
166 * tsk: task which is entering lazy tlb
167 * cpu: cpu number which is entering lazy tlb
169 * tsk->mm will be NULL
172 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
177 * This is the actual mm switch as far as the scheduler
178 * is concerned. No registers are touched. We avoid
179 * calling the CPU specific function when the mm hasn't
183 switch_mm(struct mm_struct *prev, struct mm_struct *next,
184 struct task_struct *tsk)
186 unsigned int cpu = smp_processor_id();
192 * init_mm.pgd does not contain any user mappings and it is always
193 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
195 if (next == &init_mm) {
196 cpu_set_reserved_ttbr0();
200 check_and_switch_context(next, cpu);
203 #define deactivate_mm(tsk,mm) do { } while (0)
204 #define activate_mm(prev,next) switch_mm(prev, next, NULL)
206 void verify_cpu_asid_bits(void);