Merge tag 'dt-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / arch / x86 / mm / tlb.c
index cfe6b1e..59ba296 100644 (file)
@@ -8,11 +8,13 @@
 #include <linux/export.h>
 #include <linux/cpu.h>
 #include <linux/debugfs.h>
+#include <linux/sched/smt.h>
 
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 #include <asm/nospec-branch.h>
 #include <asm/cache.h>
+#include <asm/cacheflush.h>
 #include <asm/apic.h>
 #include <asm/perf_event.h>
 
  */
 
 /*
- * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
- * stored in cpu_tlb_state.last_user_mm_ibpb.
+ * Bits to mangle the TIF_SPEC_* state into the mm pointer which is
+ * stored in cpu_tlb_state.last_user_mm_spec.
  */
 #define LAST_USER_MM_IBPB      0x1UL
+#define LAST_USER_MM_L1D_FLUSH 0x2UL
+#define LAST_USER_MM_SPEC_MASK (LAST_USER_MM_IBPB | LAST_USER_MM_L1D_FLUSH)
+
+/* Bits to set when tlbstate and flush is (re)initialized */
+#define LAST_USER_MM_INIT      LAST_USER_MM_IBPB
 
 /*
  * The x86 feature is called PCID (Process Context IDentifier). It is similar
@@ -317,20 +324,70 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        local_irq_restore(flags);
 }
 
-static unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
+/*
+ * Invoked from return to user/guest by a task that opted-in to L1D
+ * flushing but ended up running on an SMT enabled core due to wrong
+ * affinity settings or CPU hotplug. This is part of the paranoid L1D flush
+ * contract which this task requested.
+ */
+static void l1d_flush_force_sigbus(struct callback_head *ch)
+{
+       force_sig(SIGBUS);
+}
+
+static void l1d_flush_evaluate(unsigned long prev_mm, unsigned long next_mm,
+                               struct task_struct *next)
+{
+       /* Flush L1D if the outgoing task requests it */
+       if (prev_mm & LAST_USER_MM_L1D_FLUSH)
+               wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+
+       /* Check whether the incoming task opted in for L1D flush */
+       if (likely(!(next_mm & LAST_USER_MM_L1D_FLUSH)))
+               return;
+
+       /*
+        * Validate that it is not running on an SMT sibling as this would
+        * make the excercise pointless because the siblings share L1D. If
+        * it runs on a SMT sibling, notify it with SIGBUS on return to
+        * user/guest
+        */
+       if (this_cpu_read(cpu_info.smt_active)) {
+               clear_ti_thread_flag(&next->thread_info, TIF_SPEC_L1D_FLUSH);
+               next->l1d_flush_kill.func = l1d_flush_force_sigbus;
+               task_work_add(next, &next->l1d_flush_kill, TWA_RESUME);
+       }
+}
+
+static unsigned long mm_mangle_tif_spec_bits(struct task_struct *next)
 {
        unsigned long next_tif = task_thread_info(next)->flags;
-       unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
+       unsigned long spec_bits = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_SPEC_MASK;
 
-       return (unsigned long)next->mm | ibpb;
+       /*
+        * Ensure that the bit shift above works as expected and the two flags
+        * end up in bit 0 and 1.
+        */
+       BUILD_BUG_ON(TIF_SPEC_L1D_FLUSH != TIF_SPEC_IB + 1);
+
+       return (unsigned long)next->mm | spec_bits;
 }
 
-static void cond_ibpb(struct task_struct *next)
+static void cond_mitigation(struct task_struct *next)
 {
+       unsigned long prev_mm, next_mm;
+
        if (!next || !next->mm)
                return;
 
+       next_mm = mm_mangle_tif_spec_bits(next);
+       prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
+
        /*
+        * Avoid user/user BTB poisoning by flushing the branch predictor
+        * when switching between processes. This stops one process from
+        * doing Spectre-v2 attacks on another.
+        *
         * Both, the conditional and the always IBPB mode use the mm
         * pointer to avoid the IBPB when switching between tasks of the
         * same process. Using the mm pointer instead of mm->context.ctx_id
@@ -340,8 +397,6 @@ static void cond_ibpb(struct task_struct *next)
         * exposed data is not really interesting.
         */
        if (static_branch_likely(&switch_mm_cond_ibpb)) {
-               unsigned long prev_mm, next_mm;
-
                /*
                 * This is a bit more complex than the always mode because
                 * it has to handle two cases:
@@ -371,20 +426,14 @@ static void cond_ibpb(struct task_struct *next)
                 * Optimize this with reasonably small overhead for the
                 * above cases. Mangle the TIF_SPEC_IB bit into the mm
                 * pointer of the incoming task which is stored in
-                * cpu_tlbstate.last_user_mm_ibpb for comparison.
-                */
-               next_mm = mm_mangle_tif_spec_ib(next);
-               prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
-
-               /*
+                * cpu_tlbstate.last_user_mm_spec for comparison.
+                *
                 * Issue IBPB only if the mm's are different and one or
                 * both have the IBPB bit set.
                 */
                if (next_mm != prev_mm &&
                    (next_mm | prev_mm) & LAST_USER_MM_IBPB)
                        indirect_branch_prediction_barrier();
-
-               this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
        }
 
        if (static_branch_unlikely(&switch_mm_always_ibpb)) {
@@ -393,11 +442,22 @@ static void cond_ibpb(struct task_struct *next)
                 * different context than the user space task which ran
                 * last on this CPU.
                 */
-               if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
+               if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) !=
+                                       (unsigned long)next->mm)
                        indirect_branch_prediction_barrier();
-                       this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
-               }
        }
+
+       if (static_branch_unlikely(&switch_mm_cond_l1d_flush)) {
+               /*
+                * Flush L1D when the outgoing task requested it and/or
+                * check whether the incoming task requested L1D flushing
+                * and ended up on an SMT sibling.
+                */
+               if (unlikely((prev_mm | next_mm) & LAST_USER_MM_L1D_FLUSH))
+                       l1d_flush_evaluate(prev_mm, next_mm, next);
+       }
+
+       this_cpu_write(cpu_tlbstate.last_user_mm_spec, next_mm);
 }
 
 #ifdef CONFIG_PERF_EVENTS
@@ -531,11 +591,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                need_flush = true;
        } else {
                /*
-                * Avoid user/user BTB poisoning by flushing the branch
-                * predictor when switching between processes. This stops
-                * one process from doing Spectre-v2 attacks on another.
+                * Apply process to process speculation vulnerability
+                * mitigations if applicable.
                 */
-               cond_ibpb(tsk);
+               cond_mitigation(tsk);
 
                /*
                 * Stop remote flushes for the previous mm.
@@ -643,7 +702,7 @@ void initialize_tlbstate_and_flush(void)
        write_cr3(build_cr3(mm->pgd, 0));
 
        /* Reinitialize tlbstate. */
-       this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
+       this_cpu_write(cpu_tlbstate.last_user_mm_spec, LAST_USER_MM_INIT);
        this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
        this_cpu_write(cpu_tlbstate.next_asid, 1);
        this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);