1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_DEBUGREG_H
3 #define _ASM_X86_DEBUGREG_H
6 #include <linux/percpu.h>
7 #include <uapi/asm/debugreg.h>
9 #include <asm/cpufeature.h>
12 DECLARE_PER_CPU(unsigned long, cpu_dr7);
14 #ifndef CONFIG_PARAVIRT_XXL
16 * These special macros can be used to get or set a debugging register
18 #define get_debugreg(var, register) \
19 (var) = native_get_debugreg(register)
20 #define set_debugreg(value, register) \
21 native_set_debugreg(register, value)
24 static __always_inline unsigned long native_get_debugreg(int regno)
26 unsigned long val = 0; /* Damn you, gcc! */
30 asm("mov %%db0, %0" :"=r" (val));
33 asm("mov %%db1, %0" :"=r" (val));
36 asm("mov %%db2, %0" :"=r" (val));
39 asm("mov %%db3, %0" :"=r" (val));
42 asm("mov %%db6, %0" :"=r" (val));
46 * Apply __FORCE_ORDER to DR7 reads to forbid re-ordering them
49 * This is needed because a DR7 access can cause a #VC exception
50 * when running under SEV-ES. Taking a #VC exception is not a
51 * safe thing to do just anywhere in the entry code and
52 * re-ordering might place the access into an unsafe location.
54 * This happened in the NMI handler, where the DR7 read was
55 * re-ordered to happen before the call to sev_es_ist_enter(),
56 * causing stack recursion.
58 asm volatile("mov %%db7, %0" : "=r" (val) : __FORCE_ORDER);
66 static __always_inline void native_set_debugreg(int regno, unsigned long value)
70 asm("mov %0, %%db0" ::"r" (value));
73 asm("mov %0, %%db1" ::"r" (value));
76 asm("mov %0, %%db2" ::"r" (value));
79 asm("mov %0, %%db3" ::"r" (value));
82 asm("mov %0, %%db6" ::"r" (value));
86 * Apply __FORCE_ORDER to DR7 writes to forbid re-ordering them
89 * While is didn't happen with a DR7 write (see the DR7 read
90 * comment above which explains where it happened), add the
91 * __FORCE_ORDER here too to avoid similar problems in the
94 asm volatile("mov %0, %%db7" ::"r" (value), __FORCE_ORDER);
101 static inline void hw_breakpoint_disable(void)
103 /* Zero the control register for HW Breakpoint */
104 set_debugreg(0UL, 7);
106 /* Zero-out the individual HW breakpoint address registers */
107 set_debugreg(0UL, 0);
108 set_debugreg(0UL, 1);
109 set_debugreg(0UL, 2);
110 set_debugreg(0UL, 3);
113 static __always_inline bool hw_breakpoint_active(void)
115 return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
118 extern void hw_breakpoint_restore(void);
120 static __always_inline unsigned long local_db_save(void)
124 if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active())
127 get_debugreg(dr7, 7);
128 dr7 &= ~0x400; /* architecturally set bit */
132 * Ensure the compiler doesn't lower the above statements into
133 * the critical section; disabling breakpoints late would not
141 static __always_inline void local_db_restore(unsigned long dr7)
144 * Ensure the compiler doesn't raise this statement into
145 * the critical section; enabling breakpoints early would
150 set_debugreg(dr7, 7);
153 #ifdef CONFIG_CPU_SUP_AMD
154 extern void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr);
155 extern unsigned long amd_get_dr_addr_mask(unsigned int dr);
157 static inline void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr) { }
158 static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
164 static inline unsigned long get_debugctlmsr(void)
166 unsigned long debugctlmsr = 0;
168 #ifndef CONFIG_X86_DEBUGCTLMSR
169 if (boot_cpu_data.x86 < 6)
172 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
177 static inline void update_debugctlmsr(unsigned long debugctlmsr)
179 #ifndef CONFIG_X86_DEBUGCTLMSR
180 if (boot_cpu_data.x86 < 6)
183 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
186 #endif /* _ASM_X86_DEBUGREG_H */