Merge branch 'linus' into sched/urgent, to resolve conflicts
[linux-2.6-microblaze.git] / arch / x86 / mm / tlb.c
index 9b34121..8dcc060 100644 (file)
@@ -6,13 +6,14 @@
 #include <linux/interrupt.h>
 #include <linux/export.h>
 #include <linux/cpu.h>
+#include <linux/debugfs.h>
 
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
+#include <asm/nospec-branch.h>
 #include <asm/cache.h>
 #include <asm/apic.h>
 #include <asm/uv/uv.h>
-#include <linux/debugfs.h>
 
 /*
  *     TLB flushing, formerly SMP-only
@@ -253,6 +254,27 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
        } else {
                u16 new_asid;
                bool need_flush;
+               u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
+
+               /*
+                * Avoid user/user BTB poisoning by flushing the branch
+                * predictor when switching between processes. This stops
+                * one process from doing Spectre-v2 attacks on another.
+                *
+                * As an optimization, flush indirect branches only when
+                * switching into processes that disable dumping. This
+                * protects high value processes like gpg, without having
+                * too high performance overhead. IBPB is *expensive*!
+                *
+                * This will not flush branches when switching into kernel
+                * threads. It will also not flush if we switch to idle
+                * thread and back to the same process. It will flush if we
+                * switch to a different non-dumpable process.
+                */
+               if (tsk && tsk->mm &&
+                   tsk->mm->context.ctx_id != last_ctx_id &&
+                   get_dumpable(tsk->mm) != SUID_DUMP_USER)
+                       indirect_branch_prediction_barrier();
 
                if (IS_ENABLED(CONFIG_VMAP_STACK)) {
                        /*
@@ -298,6 +320,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                        trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
                }
 
+               /*
+                * Record last user mm's context id, so we can avoid
+                * flushing branch buffer with IBPB if we switch back
+                * to the same user.
+                */
+               if (next != &init_mm)
+                       this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+
                this_cpu_write(cpu_tlbstate.loaded_mm, next);
                this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
        }
@@ -375,6 +405,7 @@ void initialize_tlbstate_and_flush(void)
        write_cr3(build_cr3(mm->pgd, 0));
 
        /* Reinitialize tlbstate. */
+       this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id);
        this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
        this_cpu_write(cpu_tlbstate.next_asid, 1);
        this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);