1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/mm/context.c
5 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
6 * Copyright (C) 2012 ARM Ltd.
9 #include <linux/bitfield.h>
10 #include <linux/bitops.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
15 #include <asm/cpufeature.h>
16 #include <asm/mmu_context.h>
18 #include <asm/tlbflush.h>
21 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
23 static atomic64_t asid_generation;
24 static unsigned long *asid_map;
26 static DEFINE_PER_CPU(atomic64_t, active_asids);
27 static DEFINE_PER_CPU(u64, reserved_asids);
28 static cpumask_t tlb_flush_pending;
30 #define ASID_MASK (~GENMASK(asid_bits - 1, 0))
31 #define ASID_FIRST_VERSION (1UL << asid_bits)
33 #define NUM_USER_ASIDS ASID_FIRST_VERSION
34 #define asid2idx(asid) ((asid) & ~ASID_MASK)
35 #define idx2asid(idx) asid2idx(idx)
37 /* Get the ASIDBits supported by the current CPU */
38 static u32 get_cpu_asid_bits(void)
41 int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
42 ID_AA64MMFR0_ASID_SHIFT);
46 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
47 smp_processor_id(), fld);
59 /* Check if the current cpu's ASIDBits is compatible with asid_bits */
60 void verify_cpu_asid_bits(void)
62 u32 asid = get_cpu_asid_bits();
64 if (asid < asid_bits) {
66 * We cannot decrease the ASID size at runtime, so panic if we support
67 * fewer ASID bits than the boot CPU.
69 pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
70 smp_processor_id(), asid, asid_bits);
75 static void set_kpti_asid_bits(void)
77 unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
79 * In case of KPTI kernel/user ASIDs are allocated in
80 * pairs, the bottom bit distinguishes the two: if it
81 * is set, then the ASID will map only userspace. Thus
82 * mark even as reserved for kernel.
84 memset(asid_map, 0xaa, len);
87 static void set_reserved_asid_bits(void)
89 if (arm64_kernel_unmapped_at_el0())
92 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
95 static void flush_context(void)
100 /* Update the list of reserved ASIDs and the ASID bitmap. */
101 set_reserved_asid_bits();
103 for_each_possible_cpu(i) {
104 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
106 * If this CPU has already been through a
107 * rollover, but hasn't run another task in
108 * the meantime, we must preserve its reserved
109 * ASID, as this is the only trace we have of
110 * the process it is still running.
113 asid = per_cpu(reserved_asids, i);
114 __set_bit(asid2idx(asid), asid_map);
115 per_cpu(reserved_asids, i) = asid;
119 * Queue a TLB invalidation for each CPU to perform on next
122 cpumask_setall(&tlb_flush_pending);
125 static bool check_update_reserved_asid(u64 asid, u64 newasid)
131 * Iterate over the set of reserved ASIDs looking for a match.
132 * If we find one, then we can update our mm to use newasid
133 * (i.e. the same ASID in the current generation) but we can't
134 * exit the loop early, since we need to ensure that all copies
135 * of the old ASID are updated to reflect the mm. Failure to do
136 * so could result in us missing the reserved ASID in a future
139 for_each_possible_cpu(cpu) {
140 if (per_cpu(reserved_asids, cpu) == asid) {
142 per_cpu(reserved_asids, cpu) = newasid;
149 static u64 new_context(struct mm_struct *mm)
151 static u32 cur_idx = 1;
152 u64 asid = atomic64_read(&mm->context.id);
153 u64 generation = atomic64_read(&asid_generation);
156 u64 newasid = generation | (asid & ~ASID_MASK);
159 * If our current ASID was active during a rollover, we
160 * can continue to use it and this was just a false alarm.
162 if (check_update_reserved_asid(asid, newasid))
166 * We had a valid ASID in a previous life, so try to re-use
169 if (!__test_and_set_bit(asid2idx(asid), asid_map))
174 * Allocate a free ASID. If we can't find one, take a note of the
175 * currently active ASIDs and mark the TLBs as requiring flushes. We
176 * always count from ASID #2 (index 1), as we use ASID #0 when setting
177 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
180 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
181 if (asid != NUM_USER_ASIDS)
184 /* We're out of ASIDs, so increment the global generation count */
185 generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
189 /* We have more ASIDs than CPUs, so this will always succeed */
190 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
193 __set_bit(asid, asid_map);
195 return idx2asid(asid) | generation;
198 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
201 u64 asid, old_active_asid;
203 if (system_supports_cnp())
204 cpu_set_reserved_ttbr0();
206 asid = atomic64_read(&mm->context.id);
209 * The memory ordering here is subtle.
210 * If our active_asids is non-zero and the ASID matches the current
211 * generation, then we update the active_asids entry with a relaxed
212 * cmpxchg. Racing with a concurrent rollover means that either:
214 * - We get a zero back from the cmpxchg and end up waiting on the
215 * lock. Taking the lock synchronises with the rollover and so
216 * we are forced to see the updated generation.
218 * - We get a valid ASID back from the cmpxchg, which means the
219 * relaxed xchg in flush_context will treat us as reserved
220 * because atomic RmWs are totally ordered for a given location.
222 old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
223 if (old_active_asid &&
224 !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
225 atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
226 old_active_asid, asid))
227 goto switch_mm_fastpath;
229 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
230 /* Check that our ASID belongs to the current generation. */
231 asid = atomic64_read(&mm->context.id);
232 if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
233 asid = new_context(mm);
234 atomic64_set(&mm->context.id, asid);
237 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
238 local_flush_tlb_all();
240 atomic64_set(&per_cpu(active_asids, cpu), asid);
241 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
245 arm64_apply_bp_hardening();
248 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
251 if (!system_uses_ttbr0_pan())
252 cpu_switch_mm(mm->pgd, mm);
255 /* Errata workaround post TTBRx_EL1 update. */
256 asmlinkage void post_ttbr_update_workaround(void)
258 if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
261 asm(ALTERNATIVE("nop; nop; nop",
262 "ic iallu; dsb nsh; isb",
263 ARM64_WORKAROUND_CAVIUM_27456));
266 void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
268 unsigned long ttbr1 = read_sysreg(ttbr1_el1);
269 unsigned long asid = ASID(mm);
270 unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
272 /* Skip CNP for the reserved ASID */
273 if (system_supports_cnp() && asid)
274 ttbr0 |= TTBR_CNP_BIT;
276 /* SW PAN needs a copy of the ASID in TTBR0 for entry */
277 if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
278 ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
280 /* Set ASID in TTBR1 since TCR.A1 is set */
281 ttbr1 &= ~TTBR_ASID_MASK;
282 ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
284 write_sysreg(ttbr1, ttbr1_el1);
286 write_sysreg(ttbr0, ttbr0_el1);
288 post_ttbr_update_workaround();
291 static int asids_init(void)
293 asid_bits = get_cpu_asid_bits();
295 * Expect allocation after rollover to fail if we don't have at least
296 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
298 WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
299 atomic64_set(&asid_generation, ASID_FIRST_VERSION);
300 asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
303 panic("Failed to allocate bitmap for %lu ASIDs\n",
307 * We cannot call set_reserved_asid_bits() here because CPU
308 * caps are not finalized yet, so it is safer to assume KPTI
309 * and reserve kernel ASID's from beginning.
311 if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
312 set_kpti_asid_bits();
314 pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
317 early_initcall(asids_init);