1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
6 #include <linux/static_key.h>
7 #include <linux/objtool.h>
8 #include <linux/linkage.h>
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/msr-index.h>
13 #include <asm/unwind_hints.h>
14 #include <asm/percpu.h>
16 #define RETPOLINE_THUNK_SIZE 32
19 * Fill the CPU return stack buffer.
21 * Each entry in the RSB, if used for a speculative 'ret', contains an
22 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
24 * This is required in various cases for retpoline and IBRS-based
25 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
26 * eliminate potentially bogus entries from the RSB, and sometimes
27 * purely to ensure that it doesn't get empty, which on some CPUs would
28 * allow predictions from other (unwanted!) sources to be used.
30 * We define a CPP macro such that it can be used from both .S files and
31 * inline assembly. It's possible to do a .macro and then include that
32 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
35 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
38 * Google experimented with loop-unrolling and this turned out to be
39 * the optimal version - two calls, each with their own speculation
40 * trap should their return address end up getting used, in a loop.
42 #define __FILL_RETURN_BUFFER(reg, nr, sp) \
45 ANNOTATE_INTRA_FUNCTION_CALL; \
47 773: /* speculation trap */ \
53 ANNOTATE_INTRA_FUNCTION_CALL; \
55 775: /* speculation trap */ \
61 add $(BITS_PER_LONG/8) * 2, sp; \
64 /* barrier for jnz misprediction */ \
70 * This should be used immediately before an indirect jump/call. It tells
71 * objtool the subsequent indirect jump/call is vouched safe for retpoline
74 .macro ANNOTATE_RETPOLINE_SAFE
76 .pushsection .discard.retpoline_safe
77 _ASM_PTR .Lannotate_\@
82 * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
83 * vs RETBleed validation.
85 #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
88 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
89 * eventually turn into it's own annotation.
91 .macro ANNOTATE_UNRET_END
92 #ifdef CONFIG_DEBUG_ENTRY
93 ANNOTATE_RETPOLINE_SAFE
99 * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
100 * to the retpoline thunk with a CS prefix when the register requires
101 * a RAX prefix byte to encode. Also see apply_retpolines().
103 .macro __CS_PREFIX reg:req
104 .irp rs,r8,r9,r10,r11,r12,r13,r14,r15
112 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
113 * indirect jmp/call which may be susceptible to the Spectre variant 2
116 .macro JMP_NOSPEC reg:req
117 #ifdef CONFIG_RETPOLINE
119 jmp __x86_indirect_thunk_\reg
126 .macro CALL_NOSPEC reg:req
127 #ifdef CONFIG_RETPOLINE
129 call __x86_indirect_thunk_\reg
135 .macro ISSUE_UNBALANCED_RET_GUARD
136 ANNOTATE_INTRA_FUNCTION_CALL
137 call .Lunbalanced_ret_guard_\@
139 .Lunbalanced_ret_guard_\@:
140 add $(BITS_PER_LONG/8), %_ASM_SP
145 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
146 * monstrosity above, manually.
148 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
150 ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
152 ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
154 __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
156 ISSUE_UNBALANCED_RET_GUARD
160 #ifdef CONFIG_CPU_UNRET_ENTRY
161 #define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
163 #define CALL_ZEN_UNTRAIN_RET ""
167 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
168 * return thunk isn't mapped into the userspace tables (then again, AMD
169 * typically has NO_MELTDOWN).
171 * While zen_untrain_ret() doesn't clobber anything but requires stack,
172 * entry_ibpb() will clobber AX, CX, DX.
174 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
175 * where we have a stack but before any RET instruction.
178 #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY)
181 CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
182 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
186 #else /* __ASSEMBLY__ */
188 #define ANNOTATE_RETPOLINE_SAFE \
190 ".pushsection .discard.retpoline_safe\n\t" \
191 _ASM_PTR " 999b\n\t" \
194 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
195 extern retpoline_thunk_t __x86_indirect_thunk_array[];
197 extern void __x86_return_thunk(void);
198 extern void zen_untrain_ret(void);
199 extern void entry_ibpb(void);
201 #ifdef CONFIG_RETPOLINE
204 extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
205 #include <asm/GEN-for-each-reg.h>
211 * Inline asm uses the %V modifier which is only in newer GCC
212 * which is ensured when CONFIG_RETPOLINE is defined.
214 # define CALL_NOSPEC \
216 ANNOTATE_RETPOLINE_SAFE \
217 "call *%[thunk_target]\n", \
218 "call __x86_indirect_thunk_%V[thunk_target]\n", \
219 X86_FEATURE_RETPOLINE, \
221 ANNOTATE_RETPOLINE_SAFE \
222 "call *%[thunk_target]\n", \
223 X86_FEATURE_RETPOLINE_LFENCE)
225 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
227 #else /* CONFIG_X86_32 */
229 * For i386 we use the original ret-equivalent retpoline, because
230 * otherwise we'll run out of registers. We don't care about CET
233 # define CALL_NOSPEC \
235 ANNOTATE_RETPOLINE_SAFE \
236 "call *%[thunk_target]\n", \
239 "901: call 903f;\n" \
244 "903: lea 4(%%esp), %%esp;\n" \
245 " pushl %[thunk_target];\n" \
248 "904: call 901b;\n", \
249 X86_FEATURE_RETPOLINE, \
251 ANNOTATE_RETPOLINE_SAFE \
252 "call *%[thunk_target]\n", \
253 X86_FEATURE_RETPOLINE_LFENCE)
255 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
257 #else /* No retpoline for C / inline asm */
258 # define CALL_NOSPEC "call *%[thunk_target]\n"
259 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
262 /* The Spectre V2 mitigation variants */
263 enum spectre_v2_mitigation {
265 SPECTRE_V2_RETPOLINE,
268 SPECTRE_V2_EIBRS_RETPOLINE,
269 SPECTRE_V2_EIBRS_LFENCE,
273 /* The indirect branch speculation control variants */
274 enum spectre_v2_user_mitigation {
275 SPECTRE_V2_USER_NONE,
276 SPECTRE_V2_USER_STRICT,
277 SPECTRE_V2_USER_STRICT_PREFERRED,
278 SPECTRE_V2_USER_PRCTL,
279 SPECTRE_V2_USER_SECCOMP,
282 /* The Speculative Store Bypass disable variants */
283 enum ssb_mitigation {
284 SPEC_STORE_BYPASS_NONE,
285 SPEC_STORE_BYPASS_DISABLE,
286 SPEC_STORE_BYPASS_PRCTL,
287 SPEC_STORE_BYPASS_SECCOMP,
290 extern char __indirect_thunk_start[];
291 extern char __indirect_thunk_end[];
293 static __always_inline
294 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
296 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
299 "d" ((u32)(val >> 32)),
300 [feature] "i" (feature)
304 static inline void indirect_branch_prediction_barrier(void)
306 u64 val = PRED_CMD_IBPB;
308 alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
311 /* The Intel SPEC CTRL MSR base value cache */
312 extern u64 x86_spec_ctrl_base;
313 DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
314 extern void write_spec_ctrl_current(u64 val, bool force);
315 extern u64 spec_ctrl_current(void);
318 * With retpoline, we must use IBRS to restrict branch prediction
319 * before calling into firmware.
321 * (Implemented as CPP macros due to header hell.)
323 #define firmware_restrict_branch_speculation_start() \
326 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
327 spec_ctrl_current() | SPEC_CTRL_IBRS, \
328 X86_FEATURE_USE_IBRS_FW); \
329 alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
330 X86_FEATURE_USE_IBPB_FW); \
333 #define firmware_restrict_branch_speculation_end() \
335 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
336 spec_ctrl_current(), \
337 X86_FEATURE_USE_IBRS_FW); \
341 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
342 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
343 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
345 DECLARE_STATIC_KEY_FALSE(mds_user_clear);
346 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
348 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
350 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
352 #include <asm/segment.h>
355 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
357 * This uses the otherwise unused and obsolete VERW instruction in
358 * combination with microcode which triggers a CPU buffer flush when the
359 * instruction is executed.
361 static __always_inline void mds_clear_cpu_buffers(void)
363 static const u16 ds = __KERNEL_DS;
366 * Has to be the memory-operand variant because only that
367 * guarantees the CPU buffer flush functionality according to
368 * documentation. The register-operand variant does not.
369 * Works with any segment selector, but a valid writable
370 * data segment is the fastest variant.
372 * "cc" clobber is required because VERW modifies ZF.
374 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
378 * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
380 * Clear CPU buffers if the corresponding static key is enabled
382 static __always_inline void mds_user_clear_cpu_buffers(void)
384 if (static_branch_likely(&mds_user_clear))
385 mds_clear_cpu_buffers();
389 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
391 * Clear CPU buffers if the corresponding static key is enabled
393 static inline void mds_idle_clear_cpu_buffers(void)
395 if (static_branch_likely(&mds_idle_clear))
396 mds_clear_cpu_buffers();
399 #endif /* __ASSEMBLY__ */
401 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */