1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/init.h>
5 #include <linux/spinlock.h>
7 #include <linux/interrupt.h>
8 #include <linux/export.h>
10 #include <linux/debugfs.h>
12 #include <asm/tlbflush.h>
13 #include <asm/mmu_context.h>
14 #include <asm/nospec-branch.h>
15 #include <asm/cache.h>
18 #include "mm_internal.h"
20 #ifdef CONFIG_PARAVIRT
23 # define STATIC_NOPV static
24 # define __flush_tlb_local native_flush_tlb_local
25 # define __flush_tlb_global native_flush_tlb_global
26 # define __flush_tlb_one_user(addr) native_flush_tlb_one_user(addr)
27 # define __flush_tlb_others(msk, info) native_flush_tlb_others(msk, info)
31 * TLB flushing, formerly SMP-only
34 * These mean you can really definitely utterly forget about
35 * writing to user space from interrupts. (Its not allowed anyway).
37 * Optimizations Manfred Spraul <manfred@colorfullife.com>
39 * More scalable flush, from Andi Kleen
41 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
45 * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
46 * stored in cpu_tlb_state.last_user_mm_ibpb.
48 #define LAST_USER_MM_IBPB 0x1UL
51 * The x86 feature is called PCID (Process Context IDentifier). It is similar
52 * to what is traditionally called ASID on the RISC processors.
54 * We don't use the traditional ASID implementation, where each process/mm gets
55 * its own ASID and flush/restart when we run out of ASID space.
57 * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
58 * that came by on this CPU, allowing cheaper switch_mm between processes on
61 * We end up with different spaces for different things. To avoid confusion we
62 * use different names for each of them:
64 * ASID - [0, TLB_NR_DYN_ASIDS-1]
65 * the canonical identifier for an mm
67 * kPCID - [1, TLB_NR_DYN_ASIDS]
68 * the value we write into the PCID part of CR3; corresponds to the
69 * ASID+1, because PCID 0 is special.
71 * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
72 * for KPTI each mm has two address spaces and thus needs two
73 * PCID values, but we can still do with a single ASID denomination
74 * for each mm. Corresponds to kPCID + 2048.
78 /* There are 12 bits of space for ASIDS in CR3 */
79 #define CR3_HW_ASID_BITS 12
82 * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
83 * user/kernel switches
85 #ifdef CONFIG_PAGE_TABLE_ISOLATION
86 # define PTI_CONSUMED_PCID_BITS 1
88 # define PTI_CONSUMED_PCID_BITS 0
91 #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
94 * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
95 * for them being zero-based. Another -1 is because PCID 0 is reserved for
96 * use by non-PCID-aware users.
98 #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
101 * Given @asid, compute kPCID
103 static inline u16 kern_pcid(u16 asid)
105 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
107 #ifdef CONFIG_PAGE_TABLE_ISOLATION
109 * Make sure that the dynamic ASID space does not conflict with the
110 * bit we are using to switch between user and kernel ASIDs.
112 BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
115 * The ASID being passed in here should have respected the
116 * MAX_ASID_AVAILABLE and thus never have the switch bit set.
118 VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT));
121 * The dynamically-assigned ASIDs that get passed in are small
122 * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set,
123 * so do not bother to clear it.
125 * If PCID is on, ASID-aware code paths put the ASID+1 into the
126 * PCID bits. This serves two purposes. It prevents a nasty
127 * situation in which PCID-unaware code saves CR3, loads some other
128 * value (with PCID == 0), and then restores CR3, thus corrupting
129 * the TLB for ASID 0 if the saved ASID was nonzero. It also means
130 * that any bugs involving loading a PCID-enabled CR3 with
131 * CR4.PCIDE off will trigger deterministically.
137 * Given @asid, compute uPCID
139 static inline u16 user_pcid(u16 asid)
141 u16 ret = kern_pcid(asid);
142 #ifdef CONFIG_PAGE_TABLE_ISOLATION
143 ret |= 1 << X86_CR3_PTI_PCID_USER_BIT;
148 static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
150 if (static_cpu_has(X86_FEATURE_PCID)) {
151 return __sme_pa(pgd) | kern_pcid(asid);
153 VM_WARN_ON_ONCE(asid != 0);
154 return __sme_pa(pgd);
158 static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
160 VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
162 * Use boot_cpu_has() instead of this_cpu_has() as this function
163 * might be called during early boot. This should work even after
164 * boot because all CPU's the have same capabilities:
166 VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
167 return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
171 * We get here when we do something requiring a TLB invalidation
172 * but could not go invalidate all of the contexts. We do the
173 * necessary invalidation by clearing out the 'ctx_id' which
174 * forces a TLB flush when the context is loaded.
176 static void clear_asid_other(void)
181 * This is only expected to be set if we have disabled
182 * kernel _PAGE_GLOBAL pages.
184 if (!static_cpu_has(X86_FEATURE_PTI)) {
189 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
190 /* Do not need to flush the current asid */
191 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
194 * Make sure the next time we go to switch to
195 * this asid, we do a flush:
197 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
199 this_cpu_write(cpu_tlbstate.invalidate_other, false);
202 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
205 static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
206 u16 *new_asid, bool *need_flush)
210 if (!static_cpu_has(X86_FEATURE_PCID)) {
216 if (this_cpu_read(cpu_tlbstate.invalidate_other))
219 for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
220 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
221 next->context.ctx_id)
225 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) <
231 * We don't currently own an ASID slot on this CPU.
234 *new_asid = this_cpu_add_return(cpu_tlbstate.next_asid, 1) - 1;
235 if (*new_asid >= TLB_NR_DYN_ASIDS) {
237 this_cpu_write(cpu_tlbstate.next_asid, 1);
243 * Given an ASID, flush the corresponding user ASID. We can delay this
244 * until the next time we switch to it.
246 * See SWITCH_TO_USER_CR3.
248 static inline void invalidate_user_asid(u16 asid)
250 /* There is no user ASID if address space separation is off */
251 if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
255 * We only have a single ASID if PCID is off and the CR3
256 * write will have flushed it.
258 if (!cpu_feature_enabled(X86_FEATURE_PCID))
261 if (!static_cpu_has(X86_FEATURE_PTI))
264 __set_bit(kern_pcid(asid),
265 (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
268 static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
270 unsigned long new_mm_cr3;
273 invalidate_user_asid(new_asid);
274 new_mm_cr3 = build_cr3(pgdir, new_asid);
276 new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
280 * Caution: many callers of this function expect
281 * that load_cr3() is serializing and orders TLB
282 * fills with respect to the mm_cpumask writes.
284 write_cr3(new_mm_cr3);
287 void leave_mm(int cpu)
289 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
292 * It's plausible that we're in lazy TLB mode while our mm is init_mm.
293 * If so, our callers still expect us to flush the TLB, but there
294 * aren't any user TLB entries in init_mm to worry about.
296 * This needs to happen before any other sanity checks due to
297 * intel_idle's shenanigans.
299 if (loaded_mm == &init_mm)
302 /* Warn if we're not lazy. */
303 WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
305 switch_mm(NULL, &init_mm, NULL);
307 EXPORT_SYMBOL_GPL(leave_mm);
309 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
310 struct task_struct *tsk)
314 local_irq_save(flags);
315 switch_mm_irqs_off(prev, next, tsk);
316 local_irq_restore(flags);
319 static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
321 unsigned long next_tif = task_thread_info(next)->flags;
322 unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
324 return (unsigned long)next->mm | ibpb;
327 static void cond_ibpb(struct task_struct *next)
329 if (!next || !next->mm)
333 * Both, the conditional and the always IBPB mode use the mm
334 * pointer to avoid the IBPB when switching between tasks of the
335 * same process. Using the mm pointer instead of mm->context.ctx_id
336 * opens a hypothetical hole vs. mm_struct reuse, which is more or
337 * less impossible to control by an attacker. Aside of that it
338 * would only affect the first schedule so the theoretically
339 * exposed data is not really interesting.
341 if (static_branch_likely(&switch_mm_cond_ibpb)) {
342 unsigned long prev_mm, next_mm;
345 * This is a bit more complex than the always mode because
346 * it has to handle two cases:
348 * 1) Switch from a user space task (potential attacker)
349 * which has TIF_SPEC_IB set to a user space task
350 * (potential victim) which has TIF_SPEC_IB not set.
352 * 2) Switch from a user space task (potential attacker)
353 * which has TIF_SPEC_IB not set to a user space task
354 * (potential victim) which has TIF_SPEC_IB set.
356 * This could be done by unconditionally issuing IBPB when
357 * a task which has TIF_SPEC_IB set is either scheduled in
358 * or out. Though that results in two flushes when:
360 * - the same user space task is scheduled out and later
361 * scheduled in again and only a kernel thread ran in
364 * - a user space task belonging to the same process is
365 * scheduled in after a kernel thread ran in between
367 * - a user space task belonging to the same process is
368 * scheduled in immediately.
370 * Optimize this with reasonably small overhead for the
371 * above cases. Mangle the TIF_SPEC_IB bit into the mm
372 * pointer of the incoming task which is stored in
373 * cpu_tlbstate.last_user_mm_ibpb for comparison.
375 next_mm = mm_mangle_tif_spec_ib(next);
376 prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
379 * Issue IBPB only if the mm's are different and one or
380 * both have the IBPB bit set.
382 if (next_mm != prev_mm &&
383 (next_mm | prev_mm) & LAST_USER_MM_IBPB)
384 indirect_branch_prediction_barrier();
386 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
389 if (static_branch_unlikely(&switch_mm_always_ibpb)) {
391 * Only flush when switching to a user space task with a
392 * different context than the user space task which ran
395 if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
396 indirect_branch_prediction_barrier();
397 this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
402 #ifdef CONFIG_PERF_EVENTS
403 static inline void cr4_update_pce_mm(struct mm_struct *mm)
405 if (static_branch_unlikely(&rdpmc_always_available_key) ||
406 (!static_branch_unlikely(&rdpmc_never_available_key) &&
407 atomic_read(&mm->context.perf_rdpmc_allowed)))
408 cr4_set_bits_irqsoff(X86_CR4_PCE);
410 cr4_clear_bits_irqsoff(X86_CR4_PCE);
413 void cr4_update_pce(void *ignored)
415 cr4_update_pce_mm(this_cpu_read(cpu_tlbstate.loaded_mm));
419 static inline void cr4_update_pce_mm(struct mm_struct *mm) { }
422 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
423 struct task_struct *tsk)
425 struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
426 u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
427 bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
428 unsigned cpu = smp_processor_id();
434 * NB: The scheduler will call us with prev == next when switching
435 * from lazy TLB mode to normal mode if active_mm isn't changing.
436 * When this happens, we don't assume that CR3 (and hence
437 * cpu_tlbstate.loaded_mm) matches next.
439 * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
442 /* We don't want flush_tlb_func_* to run concurrently with us. */
443 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
444 WARN_ON_ONCE(!irqs_disabled());
447 * Verify that CR3 is what we think it is. This will catch
448 * hypothetical buggy code that directly switches to swapper_pg_dir
449 * without going through leave_mm() / switch_mm_irqs_off() or that
450 * does something like write_cr3(read_cr3_pa()).
452 * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3()
455 #ifdef CONFIG_DEBUG_VM
456 if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
458 * If we were to BUG here, we'd be very likely to kill
459 * the system so hard that we don't see the call trace.
460 * Try to recover instead by ignoring the error and doing
461 * a global flush to minimize the chance of corruption.
463 * (This is far from being a fully correct recovery.
464 * Architecturally, the CPU could prefetch something
465 * back into an incorrect ASID slot and leave it there
466 * to cause trouble down the road. It's better than
472 this_cpu_write(cpu_tlbstate.is_lazy, false);
475 * The membarrier system call requires a full memory barrier and
476 * core serialization before returning to user-space, after
477 * storing to rq->curr, when changing mm. This is because
478 * membarrier() sends IPIs to all CPUs that are in the target mm
479 * to make them issue memory barriers. However, if another CPU
480 * switches to/from the target mm concurrently with
481 * membarrier(), it can cause that CPU not to receive an IPI
482 * when it really should issue a memory barrier. Writing to CR3
483 * provides that full memory barrier and core serializing
486 if (real_prev == next) {
487 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
488 next->context.ctx_id);
491 * Even in lazy TLB mode, the CPU should stay set in the
492 * mm_cpumask. The TLB shootdown code can figure out from
493 * from cpu_tlbstate.is_lazy whether or not to send an IPI.
495 if (WARN_ON_ONCE(real_prev != &init_mm &&
496 !cpumask_test_cpu(cpu, mm_cpumask(next))))
497 cpumask_set_cpu(cpu, mm_cpumask(next));
500 * If the CPU is not in lazy TLB mode, we are just switching
501 * from one thread in a process to another thread in the same
502 * process. No TLB flush required.
508 * Read the tlb_gen to check whether a flush is needed.
509 * If the TLB is up to date, just use it.
510 * The barrier synchronizes with the tlb_gen increment in
511 * the TLB shootdown code.
514 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
515 if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) ==
520 * TLB contents went out of date while we were in lazy
521 * mode. Fall through to the TLB switching code below.
523 new_asid = prev_asid;
527 * Avoid user/user BTB poisoning by flushing the branch
528 * predictor when switching between processes. This stops
529 * one process from doing Spectre-v2 attacks on another.
534 * Stop remote flushes for the previous mm.
535 * Skip kernel threads; we never send init_mm TLB flushing IPIs,
536 * but the bitmap manipulation can cause cache line contention.
538 if (real_prev != &init_mm) {
539 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
540 mm_cpumask(real_prev)));
541 cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
545 * Start remote flushes and then read tlb_gen.
547 if (next != &init_mm)
548 cpumask_set_cpu(cpu, mm_cpumask(next));
549 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
551 choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
553 /* Let nmi_uaccess_okay() know that we're changing CR3. */
554 this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
559 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
560 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
561 load_new_mm_cr3(next->pgd, new_asid, true);
563 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
565 /* The new ASID is already up to date. */
566 load_new_mm_cr3(next->pgd, new_asid, false);
568 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
571 /* Make sure we write CR3 before loaded_mm. */
574 this_cpu_write(cpu_tlbstate.loaded_mm, next);
575 this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
577 if (next != real_prev) {
578 cr4_update_pce_mm(next);
579 switch_ldt(real_prev, next);
584 * Please ignore the name of this function. It should be called
585 * switch_to_kernel_thread().
587 * enter_lazy_tlb() is a hint from the scheduler that we are entering a
588 * kernel thread or other context without an mm. Acceptable implementations
589 * include doing nothing whatsoever, switching to init_mm, or various clever
590 * lazy tricks to try to minimize TLB flushes.
592 * The scheduler reserves the right to call enter_lazy_tlb() several times
593 * in a row. It will notify us that we're going back to a real mm by
594 * calling switch_mm_irqs_off().
596 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
598 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
601 this_cpu_write(cpu_tlbstate.is_lazy, true);
605 * Call this when reinitializing a CPU. It fixes the following potential
608 * - The ASID changed from what cpu_tlbstate thinks it is (most likely
609 * because the CPU was taken down and came back up with CR3's PCID
610 * bits clear. CPU hotplug can do this.
612 * - The TLB contains junk in slots corresponding to inactive ASIDs.
614 * - The CPU went so far out to lunch that it may have missed a TLB
617 void initialize_tlbstate_and_flush(void)
620 struct mm_struct *mm = this_cpu_read(cpu_tlbstate.loaded_mm);
621 u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen);
622 unsigned long cr3 = __read_cr3();
624 /* Assert that CR3 already references the right mm. */
625 WARN_ON((cr3 & CR3_ADDR_MASK) != __pa(mm->pgd));
628 * Assert that CR4.PCIDE is set if needed. (CR4.PCIDE initialization
629 * doesn't work like other CR4 bits because it can only be set from
632 WARN_ON(boot_cpu_has(X86_FEATURE_PCID) &&
633 !(cr4_read_shadow() & X86_CR4_PCIDE));
635 /* Force ASID 0 and force a TLB flush. */
636 write_cr3(build_cr3(mm->pgd, 0));
638 /* Reinitialize tlbstate. */
639 this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
640 this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
641 this_cpu_write(cpu_tlbstate.next_asid, 1);
642 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
643 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen);
645 for (i = 1; i < TLB_NR_DYN_ASIDS; i++)
646 this_cpu_write(cpu_tlbstate.ctxs[i].ctx_id, 0);
650 * flush_tlb_func_common()'s memory ordering requirement is that any
651 * TLB fills that happen after we flush the TLB are ordered after we
652 * read active_mm's tlb_gen. We don't need any explicit barriers
653 * because all x86 flush operations are serializing and the
654 * atomic64_read operation won't be reordered by the compiler.
656 static void flush_tlb_func_common(const struct flush_tlb_info *f,
657 bool local, enum tlb_flush_reason reason)
660 * We have three different tlb_gen values in here. They are:
662 * - mm_tlb_gen: the latest generation.
663 * - local_tlb_gen: the generation that this CPU has already caught
665 * - f->new_tlb_gen: the generation that the requester of the flush
666 * wants us to catch up to.
668 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
669 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
670 u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
671 u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
673 /* This code cannot presently handle being reentered. */
674 VM_WARN_ON(!irqs_disabled());
676 if (unlikely(loaded_mm == &init_mm))
679 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
680 loaded_mm->context.ctx_id);
682 if (this_cpu_read(cpu_tlbstate.is_lazy)) {
684 * We're in lazy mode. We need to at least flush our
685 * paging-structure cache to avoid speculatively reading
686 * garbage into our TLB. Since switching to init_mm is barely
687 * slower than a minimal flush, just switch to init_mm.
689 * This should be rare, with native_flush_tlb_others skipping
690 * IPIs to lazy TLB mode CPUs.
692 switch_mm_irqs_off(NULL, &init_mm, NULL);
696 if (unlikely(local_tlb_gen == mm_tlb_gen)) {
698 * There's nothing to do: we're already up to date. This can
699 * happen if two concurrent flushes happen -- the first flush to
700 * be handled can catch us all the way up, leaving no work for
703 trace_tlb_flush(reason, 0);
707 WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
708 WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
711 * If we get to this point, we know that our TLB is out of date.
712 * This does not strictly imply that we need to flush (it's
713 * possible that f->new_tlb_gen <= local_tlb_gen), but we're
714 * going to need to flush in the very near future, so we might
715 * as well get it over with.
717 * The only question is whether to do a full or partial flush.
719 * We do a partial flush if requested and two extra conditions
722 * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an invariant that
723 * we've always done all needed flushes to catch up to
724 * local_tlb_gen. If, for example, local_tlb_gen == 2 and
725 * f->new_tlb_gen == 3, then we know that the flush needed to bring
726 * us up to date for tlb_gen 3 is the partial flush we're
729 * As an example of why this check is needed, suppose that there
730 * are two concurrent flushes. The first is a full flush that
731 * changes context.tlb_gen from 1 to 2. The second is a partial
732 * flush that changes context.tlb_gen from 2 to 3. If they get
733 * processed on this CPU in reverse order, we'll see
734 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
735 * If we were to use __flush_tlb_one_user() and set local_tlb_gen to
736 * 3, we'd be break the invariant: we'd update local_tlb_gen above
737 * 1 without the full flush that's needed for tlb_gen 2.
739 * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimization.
740 * Partial TLB flushes are not all that much cheaper than full TLB
741 * flushes, so it seems unlikely that it would be a performance win
742 * to do a partial flush if that won't bring our TLB fully up to
743 * date. By doing a full flush instead, we can increase
744 * local_tlb_gen all the way to mm_tlb_gen and we can probably
745 * avoid another flush in the very near future.
747 if (f->end != TLB_FLUSH_ALL &&
748 f->new_tlb_gen == local_tlb_gen + 1 &&
749 f->new_tlb_gen == mm_tlb_gen) {
751 unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
752 unsigned long addr = f->start;
754 while (addr < f->end) {
755 flush_tlb_one_user(addr);
756 addr += 1UL << f->stride_shift;
759 count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
760 trace_tlb_flush(reason, nr_invalidate);
765 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
766 trace_tlb_flush(reason, TLB_FLUSH_ALL);
769 /* Both paths above update our state to mm_tlb_gen. */
770 this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
773 static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
775 const struct flush_tlb_info *f = info;
777 flush_tlb_func_common(f, true, reason);
780 static void flush_tlb_func_remote(void *info)
782 const struct flush_tlb_info *f = info;
784 inc_irq_stat(irq_tlb_count);
786 if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
789 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
790 flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
793 static bool tlb_is_not_lazy(int cpu, void *data)
795 return !per_cpu(cpu_tlbstate.is_lazy, cpu);
798 STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
799 const struct flush_tlb_info *info)
801 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
802 if (info->end == TLB_FLUSH_ALL)
803 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
805 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
806 (info->end - info->start) >> PAGE_SHIFT);
809 * If no page tables were freed, we can skip sending IPIs to
810 * CPUs in lazy TLB mode. They will flush the CPU themselves
811 * at the next context switch.
813 * However, if page tables are getting freed, we need to send the
814 * IPI everywhere, to prevent CPUs in lazy TLB mode from tripping
815 * up on the new contents of what used to be page tables, while
816 * doing a speculative memory access.
818 if (info->freed_tables)
819 smp_call_function_many(cpumask, flush_tlb_func_remote,
822 on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
823 (void *)info, 1, cpumask);
826 void flush_tlb_others(const struct cpumask *cpumask,
827 const struct flush_tlb_info *info)
829 __flush_tlb_others(cpumask, info);
833 * See Documentation/x86/tlb.rst for details. We choose 33
834 * because it is large enough to cover the vast majority (at
835 * least 95%) of allocations, and is small enough that we are
836 * confident it will not cause too much overhead. Each single
837 * flush is about 100 ns, so this caps the maximum overhead at
840 * This is in units of pages.
842 unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
844 static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
846 #ifdef CONFIG_DEBUG_VM
847 static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
850 static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
851 unsigned long start, unsigned long end,
852 unsigned int stride_shift, bool freed_tables,
855 struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info);
857 #ifdef CONFIG_DEBUG_VM
859 * Ensure that the following code is non-reentrant and flush_tlb_info
860 * is not overwritten. This means no TLB flushing is initiated by
861 * interrupt handlers and machine-check exception handlers.
863 BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
869 info->stride_shift = stride_shift;
870 info->freed_tables = freed_tables;
871 info->new_tlb_gen = new_tlb_gen;
876 static inline void put_flush_tlb_info(void)
878 #ifdef CONFIG_DEBUG_VM
879 /* Complete reentrancy prevention checks */
881 this_cpu_dec(flush_tlb_info_idx);
885 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
886 unsigned long end, unsigned int stride_shift,
889 struct flush_tlb_info *info;
895 /* Should we flush just the requested range? */
896 if ((end == TLB_FLUSH_ALL) ||
897 ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
902 /* This is also a barrier that synchronizes with switch_mm(). */
903 new_tlb_gen = inc_mm_tlb_gen(mm);
905 info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
908 if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
909 lockdep_assert_irqs_enabled();
911 flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
915 if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
916 flush_tlb_others(mm_cpumask(mm), info);
918 put_flush_tlb_info();
923 static void do_flush_tlb_all(void *info)
925 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
929 void flush_tlb_all(void)
931 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
932 on_each_cpu(do_flush_tlb_all, NULL, 1);
935 static void do_kernel_range_flush(void *info)
937 struct flush_tlb_info *f = info;
940 /* flush range by one by one 'invlpg' */
941 for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
942 flush_tlb_one_kernel(addr);
945 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
947 /* Balance as user space task's flush, a bit conservative */
948 if (end == TLB_FLUSH_ALL ||
949 (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
950 on_each_cpu(do_flush_tlb_all, NULL, 1);
952 struct flush_tlb_info *info;
955 info = get_flush_tlb_info(NULL, start, end, 0, false, 0);
957 on_each_cpu(do_kernel_range_flush, info, 1);
959 put_flush_tlb_info();
965 * This can be used from process context to figure out what the value of
966 * CR3 is without needing to do a (slow) __read_cr3().
968 * It's intended to be used for code like KVM that sneakily changes CR3
969 * and needs to restore it. It needs to be used very carefully.
971 unsigned long __get_current_cr3_fast(void)
973 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
974 this_cpu_read(cpu_tlbstate.loaded_mm_asid));
976 /* For now, be very restrictive about when this can be called. */
977 VM_WARN_ON(in_nmi() || preemptible());
979 VM_BUG_ON(cr3 != __read_cr3());
982 EXPORT_SYMBOL_GPL(__get_current_cr3_fast);
985 * Flush one page in the kernel mapping
987 void flush_tlb_one_kernel(unsigned long addr)
989 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
992 * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
993 * paravirt equivalent. Even with PCID, this is sufficient: we only
994 * use PCID if we also use global PTEs for the kernel mapping, and
995 * INVLPG flushes global translations across all address spaces.
997 * If PTI is on, then the kernel is mapped with non-global PTEs, and
998 * __flush_tlb_one_user() will flush the given address for the current
999 * kernel address space and for its usermode counterpart, but it does
1000 * not flush it for other address spaces.
1002 flush_tlb_one_user(addr);
1004 if (!static_cpu_has(X86_FEATURE_PTI))
1008 * See above. We need to propagate the flush to all other address
1009 * spaces. In principle, we only need to propagate it to kernelmode
1010 * address spaces, but the extra bookkeeping we would need is not
1013 this_cpu_write(cpu_tlbstate.invalidate_other, true);
1017 * Flush one page in the user mapping
1019 STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
1021 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
1023 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
1025 if (!static_cpu_has(X86_FEATURE_PTI))
1029 * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
1030 * Just use invalidate_user_asid() in case we are called early.
1032 if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
1033 invalidate_user_asid(loaded_mm_asid);
1035 invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
1038 void flush_tlb_one_user(unsigned long addr)
1040 __flush_tlb_one_user(addr);
1046 STATIC_NOPV void native_flush_tlb_global(void)
1048 unsigned long cr4, flags;
1050 if (static_cpu_has(X86_FEATURE_INVPCID)) {
1052 * Using INVPCID is considerably faster than a pair of writes
1053 * to CR4 sandwiched inside an IRQ flag save/restore.
1055 * Note, this works with CR4.PCIDE=0 or 1.
1057 invpcid_flush_all();
1062 * Read-modify-write to CR4 - protect it from preemption and
1063 * from interrupts. (Use the raw variant because this code can
1064 * be called from deep inside debugging code.)
1066 raw_local_irq_save(flags);
1068 cr4 = this_cpu_read(cpu_tlbstate.cr4);
1070 native_write_cr4(cr4 ^ X86_CR4_PGE);
1071 /* write old PGE again and flush TLBs */
1072 native_write_cr4(cr4);
1074 raw_local_irq_restore(flags);
1078 * Flush the entire current user mapping
1080 STATIC_NOPV void native_flush_tlb_local(void)
1083 * Preemption or interrupts must be disabled to protect the access
1084 * to the per CPU variable and to prevent being preempted between
1085 * read_cr3() and write_cr3().
1087 WARN_ON_ONCE(preemptible());
1089 invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
1091 /* If current->mm == NULL then the read_cr3() "borrows" an mm */
1092 native_write_cr3(__native_read_cr3());
1095 void flush_tlb_local(void)
1097 __flush_tlb_local();
1103 void __flush_tlb_all(void)
1106 * This is to catch users with enabled preemption and the PGE feature
1107 * and don't trigger the warning in __native_flush_tlb().
1109 VM_WARN_ON_ONCE(preemptible());
1111 if (boot_cpu_has(X86_FEATURE_PGE)) {
1112 __flush_tlb_global();
1115 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
1120 EXPORT_SYMBOL_GPL(__flush_tlb_all);
1123 * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
1124 * This means that the 'struct flush_tlb_info' that describes which mappings to
1125 * flush is actually fixed. We therefore set a single fixed struct and use it in
1126 * arch_tlbbatch_flush().
1128 static const struct flush_tlb_info full_flush_tlb_info = {
1131 .end = TLB_FLUSH_ALL,
1134 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
1136 int cpu = get_cpu();
1138 if (cpumask_test_cpu(cpu, &batch->cpumask)) {
1139 lockdep_assert_irqs_enabled();
1140 local_irq_disable();
1141 flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
1145 if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
1146 flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
1148 cpumask_clear(&batch->cpumask);
1154 * Blindly accessing user memory from NMI context can be dangerous
1155 * if we're in the middle of switching the current user task or
1156 * switching the loaded mm. It can also be dangerous if we
1157 * interrupted some kernel code that was temporarily using a
1160 bool nmi_uaccess_okay(void)
1162 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1163 struct mm_struct *current_mm = current->mm;
1165 VM_WARN_ON_ONCE(!loaded_mm);
1168 * The condition we want to check is
1169 * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
1170 * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
1171 * is supposed to be reasonably fast.
1173 * Instead, we check the almost equivalent but somewhat conservative
1174 * condition below, and we rely on the fact that switch_mm_irqs_off()
1175 * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
1177 if (loaded_mm != current_mm)
1180 VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
1185 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
1186 size_t count, loff_t *ppos)
1191 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
1192 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1195 static ssize_t tlbflush_write_file(struct file *file,
1196 const char __user *user_buf, size_t count, loff_t *ppos)
1202 len = min(count, sizeof(buf) - 1);
1203 if (copy_from_user(buf, user_buf, len))
1207 if (kstrtoint(buf, 0, &ceiling))
1213 tlb_single_page_flush_ceiling = ceiling;
1217 static const struct file_operations fops_tlbflush = {
1218 .read = tlbflush_read_file,
1219 .write = tlbflush_write_file,
1220 .llseek = default_llseek,
1223 static int __init create_tlb_single_page_flush_ceiling(void)
1225 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
1226 arch_debugfs_dir, NULL, &fops_tlbflush);
1229 late_initcall(create_tlb_single_page_flush_ceiling);