x86/mm: Save debug registers when loading a temporary mm
authorNadav Amit <namit@vmware.com>
Fri, 26 Apr 2019 00:11:24 +0000 (17:11 -0700)
committerIngo Molnar <mingo@kernel.org>
Tue, 30 Apr 2019 10:37:50 +0000 (12:37 +0200)
Prevent user watchpoints from mistakenly firing while the temporary mm
is being used. As the addresses of the temporary mm might overlap those
of the user-process, this is necessary to prevent wrong signals or worse
things from happening.

Signed-off-by: Nadav Amit <namit@vmware.com>
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <akpm@linux-foundation.org>
Cc: <ard.biesheuvel@linaro.org>
Cc: <deneen.t.dock@intel.com>
Cc: <kernel-hardening@lists.openwall.com>
Cc: <kristen@linux.intel.com>
Cc: <linux_dti@icloud.com>
Cc: <will.deacon@arm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190426001143.4983-5-namit@vmware.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/mmu_context.h

index 24dc3b8..93dff19 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/tlbflush.h>
 #include <asm/paravirt.h>
 #include <asm/mpx.h>
+#include <asm/debugreg.h>
 
 extern atomic64_t last_mm_ctx_id;
 
@@ -380,6 +381,21 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
        lockdep_assert_irqs_disabled();
        temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
        switch_mm_irqs_off(NULL, mm, current);
+
+       /*
+        * If breakpoints are enabled, disable them while the temporary mm is
+        * used. Userspace might set up watchpoints on addresses that are used
+        * in the temporary mm, which would lead to wrong signals being sent or
+        * crashes.
+        *
+        * Note that breakpoints are not disabled selectively, which also causes
+        * kernel breakpoints (e.g., perf's) to be disabled. This might be
+        * undesirable, but still seems reasonable as the code that runs in the
+        * temporary mm should be short.
+        */
+       if (hw_breakpoint_active())
+               hw_breakpoint_disable();
+
        return temp_state;
 }
 
@@ -387,6 +403,13 @@ static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
 {
        lockdep_assert_irqs_disabled();
        switch_mm_irqs_off(NULL, prev_state.mm, current);
+
+       /*
+        * Restore the breakpoints if they were disabled before the temporary mm
+        * was loaded.
+        */
+       if (hw_breakpoint_active())
+               hw_breakpoint_restore();
 }
 
 #endif /* _ASM_X86_MMU_CONTEXT_H */