1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/init.h>
5 #include <linux/spinlock.h>
7 #include <linux/interrupt.h>
8 #include <linux/export.h>
10 #include <linux/debugfs.h>
12 #include <asm/tlbflush.h>
13 #include <asm/mmu_context.h>
14 #include <asm/nospec-branch.h>
15 #include <asm/cache.h>
18 #include "mm_internal.h"
20 #ifdef CONFIG_PARAVIRT
23 # define STATIC_NOPV static
24 # define __flush_tlb_local native_flush_tlb_local
25 # define __flush_tlb_global native_flush_tlb_global
26 # define __flush_tlb_one_user(addr) native_flush_tlb_one_user(addr)
27 # define __flush_tlb_others(msk, info) native_flush_tlb_others(msk, info)
31 * TLB flushing, formerly SMP-only
34 * These mean you can really definitely utterly forget about
35 * writing to user space from interrupts. (Its not allowed anyway).
37 * Optimizations Manfred Spraul <manfred@colorfullife.com>
39 * More scalable flush, from Andi Kleen
41 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
45 * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
46 * stored in cpu_tlb_state.last_user_mm_ibpb.
48 #define LAST_USER_MM_IBPB 0x1UL
51 * The x86 feature is called PCID (Process Context IDentifier). It is similar
52 * to what is traditionally called ASID on the RISC processors.
54 * We don't use the traditional ASID implementation, where each process/mm gets
55 * its own ASID and flush/restart when we run out of ASID space.
57 * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
58 * that came by on this CPU, allowing cheaper switch_mm between processes on
61 * We end up with different spaces for different things. To avoid confusion we
62 * use different names for each of them:
64 * ASID - [0, TLB_NR_DYN_ASIDS-1]
65 * the canonical identifier for an mm
67 * kPCID - [1, TLB_NR_DYN_ASIDS]
68 * the value we write into the PCID part of CR3; corresponds to the
69 * ASID+1, because PCID 0 is special.
71 * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
72 * for KPTI each mm has two address spaces and thus needs two
73 * PCID values, but we can still do with a single ASID denomination
74 * for each mm. Corresponds to kPCID + 2048.
78 /* There are 12 bits of space for ASIDS in CR3 */
79 #define CR3_HW_ASID_BITS 12
82 * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
83 * user/kernel switches
85 #ifdef CONFIG_PAGE_TABLE_ISOLATION
86 # define PTI_CONSUMED_PCID_BITS 1
88 # define PTI_CONSUMED_PCID_BITS 0
91 #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
94 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
95 * for them being zero-based. Another -1 is because PCID 0 is reserved for
96 * use by non-PCID-aware users.
98 #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
101 * Given @asid, compute kPCID
103 static inline u16 kern_pcid(u16 asid)
105 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
107 #ifdef CONFIG_PAGE_TABLE_ISOLATION
109 * Make sure that the dynamic ASID space does not confict with the
110 * bit we are using to switch between user and kernel ASIDs.
112 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
115 * The ASID being passed in here should have respected the
116 * MAX_ASID_AVAILABLE and thus never have the switch bit set.
118 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
121 * The dynamically-assigned ASIDs that get passed in are small
122 * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set,
123 * so do not bother to clear it.
125 * If PCID is on, ASID-aware code paths put the ASID+1 into the
126 * PCID bits. This serves two purposes. It prevents a nasty
127 * situation in which PCID-unaware code saves CR3, loads some other
128 * value (with PCID == 0), and then restores CR3, thus corrupting
129 * the TLB for ASID 0 if the saved ASID was nonzero. It also means
130 * that any bugs involving loading a PCID-enabled CR3 with
131 * CR4.PCIDE off will trigger deterministically.
137 * Given @asid, compute uPCID
139 static inline u16 user_pcid(u16 asid)
141 u16 ret = kern_pcid(asid);
142 #ifdef CONFIG_PAGE_TABLE_ISOLATION
143 ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
148 static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
150 if (static_cpu_has(X86_FEATURE_PCID)) {
151 return __sme_pa(pgd) | kern_pcid(asid);
153 VM_WARN_ON_ONCE(asid != 0);
154 return __sme_pa(pgd);
158 static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
160 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
162 * Use boot_cpu_has() instead of this_cpu_has() as this function
163 * might be called during early boot. This should work even after
164 * boot because all CPU's the have same capabilities:
166 VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
167 return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
171 * We get here when we do something requiring a TLB invalidation
172 * but could not go invalidate all of the contexts. We do the
173 * necessary invalidation by clearing out the 'ctx_id' which
174 * forces a TLB flush when the context is loaded.
176 static void clear_asid_other(void)
181 * This is only expected to be set if we have disabled
182 * kernel _PAGE_GLOBAL pages.
184 if (!static_cpu_has(X86_FEATURE_PTI)) {
189 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
190 /* Do not need to flush the current asid */
191 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
194 * Make sure the next time we go to switch to
195 * this asid, we do a flush:
197 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
199 this_cpu_write(cpu_tlbstate.invalidate_other, false);
202 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
205 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
206 u16 *new_asid, bool *need_flush)
210 if (!static_cpu_has(X86_FEATURE_PCID)) {
216 if (this_cpu_read(cpu_tlbstate.invalidate_other))
219 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
220 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
221 next->context.ctx_id)
225 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) <
231 * We don't currently own an ASID slot on this CPU.
234 *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
235 if (*new_asid >= TLB_NR_DYN_ASIDS) {
237 this_cpu_write(cpu_tlbstate.next_asid, 1);
243 * Given an ASID, flush the corresponding user ASID. We can delay this
244 * until the next time we switch to it.
246 * See SWITCH_TO_USER_CR3.
248 static inline void invalidate_user_asid(u16 asid)
250 /* There is no user ASID if address space separation is off */
251 if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
255 * We only have a single ASID if PCID is off and the CR3
256 * write will have flushed it.
258 if (!cpu_feature_enabled(X86_FEATURE_PCID))
261 if (!static_cpu_has(X86_FEATURE_PTI))
264 __set_bit(kern_pcid(asid),
265 (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
268 static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
270 unsigned long new_mm_cr3;
273 invalidate_user_asid(new_asid);
274 new_mm_cr3 = build_cr3(pgdir, new_asid);
276 new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
280 * Caution: many callers of this function expect
281 * that load_cr3() is serializing and orders TLB
282 * fills with respect to the mm_cpumask writes.
284 write_cr3(new_mm_cr3);
287 void leave_mm(int cpu)
289 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
292 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
293 * If so, our callers still expect us to flush the TLB, but there
294 * aren't any user TLB entries in init_mm to worry about.
296 * This needs to happen before any other sanity checks due to
297 * intel_idle's shenanigans.
299 if (loaded_mm == &init_mm)
302 /* Warn if we're not lazy. */
303 WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
305 switch_mm(NULL, &init_mm, NULL);
307 EXPORT_SYMBOL_GPL(leave_mm);
309 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
310 struct task_struct *tsk)
314 local_irq_save(flags);
315 switch_mm_irqs_off(prev, next, tsk);
316 local_irq_restore(flags);
319 static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
321 unsigned long next_tif = task_thread_info(next)->flags;
322 unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
324 return (unsigned long)next->mm | ibpb;
327 static void cond_ibpb(struct task_struct *next)
329 if (!next || !next->mm)
333 * Both, the conditional and the always IBPB mode use the mm
334 * pointer to avoid the IBPB when switching between tasks of the
335 * same process. Using the mm pointer instead of mm->context.ctx_id
336 * opens a hypothetical hole vs. mm_struct reuse, which is more or
337 * less impossible to control by an attacker. Aside of that it
338 * would only affect the first schedule so the theoretically
339 * exposed data is not really interesting.
341 if (static_branch_likely(&switch_mm_cond_ibpb)) {
342 unsigned long prev_mm, next_mm;
345 * This is a bit more complex than the always mode because
346 * it has to handle two cases:
348 * 1) Switch from a user space task (potential attacker)
349 * which has TIF_SPEC_IB set to a user space task
350 * (potential victim) which has TIF_SPEC_IB not set.
352 * 2) Switch from a user space task (potential attacker)
353 * which has TIF_SPEC_IB not set to a user space task
354 * (potential victim) which has TIF_SPEC_IB set.
356 * This could be done by unconditionally issuing IBPB when
357 * a task which has TIF_SPEC_IB set is either scheduled in
358 * or out. Though that results in two flushes when:
360 * - the same user space task is scheduled out and later
361 * scheduled in again and only a kernel thread ran in
364 * - a user space task belonging to the same process is
365 * scheduled in after a kernel thread ran in between
367 * - a user space task belonging to the same process is
368 * scheduled in immediately.
370 * Optimize this with reasonably small overhead for the
371 * above cases. Mangle the TIF_SPEC_IB bit into the mm
372 * pointer of the incoming task which is stored in
373 * cpu_tlbstate.last_user_mm_ibpb for comparison.
375 next_mm = mm_mangle_tif_spec_ib(next);
376 prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
379 * Issue IBPB only if the mm's are different and one or
380 * both have the IBPB bit set.
382 if (next_mm != prev_mm &&
383 (next_mm | prev_mm) & LAST_USER_MM_IBPB)
384 indirect_branch_prediction_barrier();
386 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
389 if (static_branch_unlikely(&switch_mm_always_ibpb)) {
391 * Only flush when switching to a user space task with a
392 * different context than the user space task which ran
395 if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
396 indirect_branch_prediction_barrier();
397 this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
402 #ifdef CONFIG_PERF_EVENTS
403 static inline void cr4_update_pce_mm(struct mm_struct *mm)
405 if (static_branch_unlikely(&rdpmc_always_available_key) ||
406 (!static_branch_unlikely(&rdpmc_never_available_key) &&
407 atomic_read(&mm->context.perf_rdpmc_allowed)))
408 cr4_set_bits_irqsoff(X86_CR4_PCE);
410 cr4_clear_bits_irqsoff(X86_CR4_PCE);
413 void cr4_update_pce(void *ignored)
415 cr4_update_pce_mm(this_cpu_read(cpu_tlbstate.loaded_mm));
419 static inline void cr4_update_pce_mm(struct mm_struct *mm) { }
422 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
423 struct task_struct *tsk)
425 struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
426 u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
427 bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
428 unsigned cpu = smp_processor_id();
434 * NB: The scheduler will call us with prev == next when switching
435 * from lazy TLB mode to normal mode if active_mm isn't changing.
436 * When this happens, we don't assume that CR3 (and hence
437 * cpu_tlbstate.loaded_mm) matches next.
439 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
442 /* We don't want flush_tlb_func_* to run concurrently with us. */
443 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
444 WARN_ON_ONCE(!irqs_disabled());
447 * Verify that CR3 is what we think it is. This will catch
448 * hypothetical buggy code that directly switches to swapper_pg_dir
449 * without going through leave_mm() / switch_mm_irqs_off() or that
450 * does something like write_cr3(read_cr3_pa()).
452 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
455 #ifdef CONFIG_DEBUG_VM
456 if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
458 * If we were to BUG here, we'd be very likely to kill
459 * the system so hard that we don't see the call trace.
460 * Try to recover instead by ignoring the error and doing
461 * a global flush to minimize the chance of corruption.
463 * (This is far from being a fully correct recovery.
464 * Architecturally, the CPU could prefetch something
465 * back into an incorrect ASID slot and leave it there
466 * to cause trouble down the road. It's better than
472 this_cpu_write(cpu_tlbstate.is_lazy, false);
475 * The membarrier system call requires a full memory barrier and
476 * core serialization before returning to user-space, after
477 * storing to rq->curr. Writing to CR3 provides that full
478 * memory barrier and core serializing instruction.
480 if (real_prev == next) {
481 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
482 next->context.ctx_id);
485 * Even in lazy TLB mode, the CPU should stay set in the
486 * mm_cpumask. The TLB shootdown code can figure out from
487 * from cpu_tlbstate.is_lazy whether or not to send an IPI.
489 if (WARN_ON_ONCE(real_prev != &init_mm &&
490 !cpumask_test_cpu(cpu, mm_cpumask(next))))
491 cpumask_set_cpu(cpu, mm_cpumask(next));
494 * If the CPU is not in lazy TLB mode, we are just switching
495 * from one thread in a process to another thread in the same
496 * process. No TLB flush required.
502 * Read the tlb_gen to check whether a flush is needed.
503 * If the TLB is up to date, just use it.
504 * The barrier synchronizes with the tlb_gen increment in
505 * the TLB shootdown code.
508 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
509 if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
514 * TLB contents went out of date while we were in lazy
515 * mode. Fall through to the TLB switching code below.
517 new_asid = prev_asid;
521 * Avoid user/user BTB poisoning by flushing the branch
522 * predictor when switching between processes. This stops
523 * one process from doing Spectre-v2 attacks on another.
528 * Stop remote flushes for the previous mm.
529 * Skip kernel threads; we never send init_mm TLB flushing IPIs,
530 * but the bitmap manipulation can cause cache line contention.
532 if (real_prev != &init_mm) {
533 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
534 mm_cpumask(real_prev)));
535 cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
539 * Start remote flushes and then read tlb_gen.
541 if (next != &init_mm)
542 cpumask_set_cpu(cpu, mm_cpumask(next));
543 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
545 choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
547 /* Let nmi_uaccess_okay() know that we're changing CR3. */
548 this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
553 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
554 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
555 load_new_mm_cr3(next->pgd, new_asid, true);
557 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
559 /* The new ASID is already up to date. */
560 load_new_mm_cr3(next->pgd, new_asid, false);
562 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
565 /* Make sure we write CR3 before loaded_mm. */
568 this_cpu_write(cpu_tlbstate.loaded_mm, next);
569 this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
571 if (next != real_prev) {
572 cr4_update_pce_mm(next);
573 switch_ldt(real_prev, next);
578 * Please ignore the name of this function. It should be called
579 * switch_to_kernel_thread().
581 * enter_lazy_tlb() is a hint from the scheduler that we are entering a
582 * kernel thread or other context without an mm. Acceptable implementations
583 * include doing nothing whatsoever, switching to init_mm, or various clever
584 * lazy tricks to try to minimize TLB flushes.
586 * The scheduler reserves the right to call enter_lazy_tlb() several times
587 * in a row. It will notify us that we're going back to a real mm by
588 * calling switch_mm_irqs_off().
590 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
592 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
595 this_cpu_write(cpu_tlbstate.is_lazy, true);
599 * Call this when reinitializing a CPU. It fixes the following potential
602 * - The ASID changed from what cpu_tlbstate thinks it is (most likely
603 * because the CPU was taken down and came back up with CR3's PCID
604 * bits clear. CPU hotplug can do this.
606 * - The TLB contains junk in slots corresponding to inactive ASIDs.
608 * - The CPU went so far out to lunch that it may have missed a TLB
611 void initialize_tlbstate_and_flush(void)
614 struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
615 u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
616 unsigned long cr3 = __read_cr3();
618 /* Assert that CR3 already references the right mm. */
619 WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
622 * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization
623 * doesn't work like other CR4 bits because it can only be set from
626 WARN_ON(boot_cpu_has(X86_FEATURE_PCID) &&
627 !(cr4_read_shadow() & X86_CR4_PCIDE));
629 /* Force ASID 0 and force a TLB flush. */
630 write_cr3(build_cr3(mm->pgd, 0));
632 /* Reinitialize tlbstate. */
633 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
634 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
635 this_cpu_write(cpu_tlbstate.next_asid, 1);
636 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
637 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
639 for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
640 this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
644 * flush_tlb_func_common()'s memory ordering requirement is that any
645 * TLB fills that happen after we flush the TLB are ordered after we
646 * read active_mm's tlb_gen. We don't need any explicit barriers
647 * because all x86 flush operations are serializing and the
648 * atomic64_read operation won't be reordered by the compiler.
650 static void flush_tlb_func_common(const struct flush_tlb_info *f,
651 bool local, enum tlb_flush_reason reason)
654 * We have three different tlb_gen values in here. They are:
656 * - mm_tlb_gen: the latest generation.
657 * - local_tlb_gen: the generation that this CPU has already caught
659 * - f->new_tlb_gen: the generation that the requester of the flush
660 * wants us to catch up to.
662 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
663 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
664 u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
665 u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
667 /* This code cannot presently handle being reentered. */
668 VM_WARN_ON(!irqs_disabled());
670 if (unlikely(loaded_mm == &init_mm))
673 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
674 loaded_mm->context.ctx_id);
676 if (this_cpu_read(cpu_tlbstate.is_lazy)) {
678 * We're in lazy mode. We need to at least flush our
679 * paging-structure cache to avoid speculatively reading
680 * garbage into our TLB. Since switching to init_mm is barely
681 * slower than a minimal flush, just switch to init_mm.
683 * This should be rare, with native_flush_tlb_others skipping
684 * IPIs to lazy TLB mode CPUs.
686 switch_mm_irqs_off(NULL, &init_mm, NULL);
690 if (unlikely(local_tlb_gen == mm_tlb_gen)) {
692 * There's nothing to do: we're already up to date. This can
693 * happen if two concurrent flushes happen -- the first flush to
694 * be handled can catch us all the way up, leaving no work for
697 trace_tlb_flush(reason, 0);
701 WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
702 WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
705 * If we get to this point, we know that our TLB is out of date.
706 * This does not strictly imply that we need to flush (it's
707 * possible that f->new_tlb_gen <= local_tlb_gen), but we're
708 * going to need to flush in the very near future, so we might
709 * as well get it over with.
711 * The only question is whether to do a full or partial flush.
713 * We do a partial flush if requested and two extra conditions
716 * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that
717 * we've always done all needed flushes to catch up to
718 * local_tlb_gen. If, for example, local_tlb_gen == 2 and
719 * f->new_tlb_gen == 3, then we know that the flush needed to bring
720 * us up to date for tlb_gen 3 is the partial flush we're
723 * As an example of why this check is needed, suppose that there
724 * are two concurrent flushes. The first is a full flush that
725 * changes context.tlb_gen from 1 to 2. The second is a partial
726 * flush that changes context.tlb_gen from 2 to 3. If they get
727 * processed on this CPU in reverse order, we'll see
728 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
729 * If we were to use __flush_tlb_one_user() and set local_tlb_gen to
730 * 3, we'd be break the invariant: we'd update local_tlb_gen above
731 * 1 without the full flush that's needed for tlb_gen 2.
733 * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation.
734 * Partial TLB flushes are not all that much cheaper than full TLB
735 * flushes, so it seems unlikely that it would be a performance win
736 * to do a partial flush if that won't bring our TLB fully up to
737 * date. By doing a full flush instead, we can increase
738 * local_tlb_gen all the way to mm_tlb_gen and we can probably
739 * avoid another flush in the very near future.
741 if (f->end != TLB_FLUSH_ALL &&
742 f->new_tlb_gen == local_tlb_gen + 1 &&
743 f->new_tlb_gen == mm_tlb_gen) {
745 unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
746 unsigned long addr = f->start;
748 while (addr < f->end) {
749 flush_tlb_one_user(addr);
750 addr += 1UL << f->stride_shift;
753 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
754 trace_tlb_flush(reason, nr_invalidate);
759 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
760 trace_tlb_flush(reason, TLB_FLUSH_ALL);
763 /* Both paths above update our state to mm_tlb_gen. */
764 this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
767 static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
769 const struct flush_tlb_info *f = info;
771 flush_tlb_func_common(f, true, reason);
774 static void flush_tlb_func_remote(void *info)
776 const struct flush_tlb_info *f = info;
778 inc_irq_stat(irq_tlb_count);
780 if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
783 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
784 flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
787 static bool tlb_is_not_lazy(int cpu, void *data)
789 return !per_cpu(cpu_tlbstate.is_lazy, cpu);
792 STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
793 const struct flush_tlb_info *info)
795 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
796 if (info->end == TLB_FLUSH_ALL)
797 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
799 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
800 (info->end - info->start) >> PAGE_SHIFT);
803 * If no page tables were freed, we can skip sending IPIs to
804 * CPUs in lazy TLB mode. They will flush the CPU themselves
805 * at the next context switch.
807 * However, if page tables are getting freed, we need to send the
808 * IPI everywhere, to prevent CPUs in lazy TLB mode from tripping
809 * up on the new contents of what used to be page tables, while
810 * doing a speculative memory access.
812 if (info->freed_tables)
813 smp_call_function_many(cpumask, flush_tlb_func_remote,
816 on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
817 (void *)info, 1, cpumask);
820 void flush_tlb_others(const struct cpumask *cpumask,
821 const struct flush_tlb_info *info)
823 __flush_tlb_others(cpumask, info);
827 * See Documentation/x86/tlb.rst for details. We choose 33
828 * because it is large enough to cover the vast majority (at
829 * least 95%) of allocations, and is small enough that we are
830 * confident it will not cause too much overhead. Each single
831 * flush is about 100 ns, so this caps the maximum overhead at
834 * This is in units of pages.
836 unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
838 static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
840 #ifdef CONFIG_DEBUG_VM
841 static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
844 static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
845 unsigned long start, unsigned long end,
846 unsigned int stride_shift, bool freed_tables,
849 struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info);
851 #ifdef CONFIG_DEBUG_VM
853 * Ensure that the following code is non-reentrant and flush_tlb_info
854 * is not overwritten. This means no TLB flushing is initiated by
855 * interrupt handlers and machine-check exception handlers.
857 BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
863 info->stride_shift = stride_shift;
864 info->freed_tables = freed_tables;
865 info->new_tlb_gen = new_tlb_gen;
870 static inline void put_flush_tlb_info(void)
872 #ifdef CONFIG_DEBUG_VM
873 /* Complete reentrency prevention checks */
875 this_cpu_dec(flush_tlb_info_idx);
879 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
880 unsigned long end, unsigned int stride_shift,
883 struct flush_tlb_info *info;
889 /* Should we flush just the requested range? */
890 if ((end == TLB_FLUSH_ALL) ||
891 ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
896 /* This is also a barrier that synchronizes with switch_mm(). */
897 new_tlb_gen = inc_mm_tlb_gen(mm);
899 info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
902 if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
903 lockdep_assert_irqs_enabled();
905 flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
909 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
910 flush_tlb_others(mm_cpumask(mm), info);
912 put_flush_tlb_info();
917 static void do_flush_tlb_all(void *info)
919 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
923 void flush_tlb_all(void)
925 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
926 on_each_cpu(do_flush_tlb_all, NULL, 1);
929 static void do_kernel_range_flush(void *info)
931 struct flush_tlb_info *f = info;
934 /* flush range by one by one 'invlpg' */
935 for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
936 flush_tlb_one_kernel(addr);
939 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
941 /* Balance as user space task's flush, a bit conservative */
942 if (end == TLB_FLUSH_ALL ||
943 (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
944 on_each_cpu(do_flush_tlb_all, NULL, 1);
946 struct flush_tlb_info *info;
949 info = get_flush_tlb_info(NULL, start, end, 0, false, 0);
951 on_each_cpu(do_kernel_range_flush, info, 1);
953 put_flush_tlb_info();
959 * This can be used from process context to figure out what the value of
960 * CR3 is without needing to do a (slow) __read_cr3().
962 * It's intended to be used for code like KVM that sneakily changes CR3
963 * and needs to restore it. It needs to be used very carefully.
965 unsigned long __get_current_cr3_fast(void)
967 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
968 this_cpu_read(cpu_tlbstate.loaded_mm_asid));
970 /* For now, be very restrictive about when this can be called. */
971 VM_WARN_ON(in_nmi() || preemptible());
973 VM_BUG_ON(cr3 != __read_cr3());
976 EXPORT_SYMBOL_GPL(__get_current_cr3_fast);
979 * Flush one page in the kernel mapping
981 void flush_tlb_one_kernel(unsigned long addr)
983 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
986 * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
987 * paravirt equivalent. Even with PCID, this is sufficient: we only
988 * use PCID if we also use global PTEs for the kernel mapping, and
989 * INVLPG flushes global translations across all address spaces.
991 * If PTI is on, then the kernel is mapped with non-global PTEs, and
992 * __flush_tlb_one_user() will flush the given address for the current
993 * kernel address space and for its usermode counterpart, but it does
994 * not flush it for other address spaces.
996 flush_tlb_one_user(addr);
998 if (!static_cpu_has(X86_FEATURE_PTI))
1002 * See above. We need to propagate the flush to all other address
1003 * spaces. In principle, we only need to propagate it to kernelmode
1004 * address spaces, but the extra bookkeeping we would need is not
1007 this_cpu_write(cpu_tlbstate.invalidate_other, true);
1011 * Flush one page in the user mapping
1013 STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
1015 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
1017 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
1019 if (!static_cpu_has(X86_FEATURE_PTI))
1023 * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
1024 * Just use invalidate_user_asid() in case we are called early.
1026 if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
1027 invalidate_user_asid(loaded_mm_asid);
1029 invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
1032 void flush_tlb_one_user(unsigned long addr)
1034 __flush_tlb_one_user(addr);
1040 STATIC_NOPV void native_flush_tlb_global(void)
1042 unsigned long cr4, flags;
1044 if (static_cpu_has(X86_FEATURE_INVPCID)) {
1046 * Using INVPCID is considerably faster than a pair of writes
1047 * to CR4 sandwiched inside an IRQ flag save/restore.
1049 * Note, this works with CR4.PCIDE=0 or 1.
1051 invpcid_flush_all();
1056 * Read-modify-write to CR4 - protect it from preemption and
1057 * from interrupts. (Use the raw variant because this code can
1058 * be called from deep inside debugging code.)
1060 raw_local_irq_save(flags);
1062 cr4 = this_cpu_read(cpu_tlbstate.cr4);
1064 native_write_cr4(cr4 ^ X86_CR4_PGE);
1065 /* write old PGE again and flush TLBs */
1066 native_write_cr4(cr4);
1068 raw_local_irq_restore(flags);
1072 * Flush the entire current user mapping
1074 STATIC_NOPV void native_flush_tlb_local(void)
1077 * Preemption or interrupts must be disabled to protect the access
1078 * to the per CPU variable and to prevent being preempted between
1079 * read_cr3() and write_cr3().
1081 WARN_ON_ONCE(preemptible());
1083 invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
1085 /* If current->mm == NULL then the read_cr3() "borrows" an mm */
1086 native_write_cr3(__native_read_cr3());
1089 void flush_tlb_local(void)
1091 __flush_tlb_local();
1097 void __flush_tlb_all(void)
1100 * This is to catch users with enabled preemption and the PGE feature
1101 * and don't trigger the warning in __native_flush_tlb().
1103 VM_WARN_ON_ONCE(preemptible());
1105 if (boot_cpu_has(X86_FEATURE_PGE)) {
1106 __flush_tlb_global();
1109 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
1114 EXPORT_SYMBOL_GPL(__flush_tlb_all);
1117 * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
1118 * This means that the 'struct flush_tlb_info' that describes which mappings to
1119 * flush is actually fixed. We therefore set a single fixed struct and use it in
1120 * arch_tlbbatch_flush().
1122 static const struct flush_tlb_info full_flush_tlb_info = {
1125 .end = TLB_FLUSH_ALL,
1128 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
1130 int cpu = get_cpu();
1132 if (cpumask_test_cpu(cpu, &batch->cpumask)) {
1133 lockdep_assert_irqs_enabled();
1134 local_irq_disable();
1135 flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
1139 if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
1140 flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
1142 cpumask_clear(&batch->cpumask);
1148 * Blindly accessing user memory from NMI context can be dangerous
1149 * if we're in the middle of switching the current user task or
1150 * switching the loaded mm. It can also be dangerous if we
1151 * interrupted some kernel code that was temporarily using a
1154 bool nmi_uaccess_okay(void)
1156 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1157 struct mm_struct *current_mm = current->mm;
1159 VM_WARN_ON_ONCE(!loaded_mm);
1162 * The condition we want to check is
1163 * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
1164 * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
1165 * is supposed to be reasonably fast.
1167 * Instead, we check the almost equivalent but somewhat conservative
1168 * condition below, and we rely on the fact that switch_mm_irqs_off()
1169 * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
1171 if (loaded_mm != current_mm)
1174 VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
1179 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
1180 size_t count, loff_t *ppos)
1185 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
1186 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1189 static ssize_t tlbflush_write_file(struct file *file,
1190 const char __user *user_buf, size_t count, loff_t *ppos)
1196 len = min(count, sizeof(buf) - 1);
1197 if (copy_from_user(buf, user_buf, len))
1201 if (kstrtoint(buf, 0, &ceiling))
1207 tlb_single_page_flush_ceiling = ceiling;
1211 static const struct file_operations fops_tlbflush = {
1212 .read = tlbflush_read_file,
1213 .write = tlbflush_write_file,
1214 .llseek = default_llseek,
1217 static int __init create_tlb_single_page_flush_ceiling(void)
1219 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
1220 arch_debugfs_dir, NULL, &fops_tlbflush);
1223 late_initcall(create_tlb_single_page_flush_ceiling);