1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
6 #include <linux/static_key.h>
7 #include <linux/objtool.h>
8 #include <linux/linkage.h>
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/msr-index.h>
13 #include <asm/unwind_hints.h>
15 #define RETPOLINE_THUNK_SIZE 32
18 * Fill the CPU return stack buffer.
20 * Each entry in the RSB, if used for a speculative 'ret', contains an
21 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
23 * This is required in various cases for retpoline and IBRS-based
24 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
25 * eliminate potentially bogus entries from the RSB, and sometimes
26 * purely to ensure that it doesn't get empty, which on some CPUs would
27 * allow predictions from other (unwanted!) sources to be used.
29 * We define a CPP macro such that it can be used from both .S files and
30 * inline assembly. It's possible to do a .macro and then include that
31 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
34 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
37 * Google experimented with loop-unrolling and this turned out to be
38 * the optimal version - two calls, each with their own speculation
39 * trap should their return address end up getting used, in a loop.
41 #define __FILL_RETURN_BUFFER(reg, nr, sp) \
44 ANNOTATE_INTRA_FUNCTION_CALL; \
46 773: /* speculation trap */ \
52 ANNOTATE_INTRA_FUNCTION_CALL; \
54 775: /* speculation trap */ \
60 add $(BITS_PER_LONG/8) * 2, sp; \
67 * This should be used immediately before an indirect jump/call. It tells
68 * objtool the subsequent indirect jump/call is vouched safe for retpoline
71 .macro ANNOTATE_RETPOLINE_SAFE
73 .pushsection .discard.retpoline_safe
74 _ASM_PTR .Lannotate_\@
79 * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
80 * vs RETBleed validation.
82 #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
85 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
86 * indirect jmp/call which may be susceptible to the Spectre variant 2
89 .macro JMP_NOSPEC reg:req
90 #ifdef CONFIG_RETPOLINE
91 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
92 __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
93 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
99 .macro CALL_NOSPEC reg:req
100 #ifdef CONFIG_RETPOLINE
101 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
102 __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
103 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
110 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
111 * monstrosity above, manually.
113 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
114 #ifdef CONFIG_RETPOLINE
115 ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
116 __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
122 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
123 * return thunk isn't mapped into the userspace tables (then again, AMD
124 * typically has NO_MELTDOWN).
126 * Doesn't clobber any registers but does require a stable stack.
128 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
129 * where we have a stack but before any RET instruction.
132 #ifdef CONFIG_RETPOLINE
133 ALTERNATIVE "", "call zen_untrain_ret", X86_FEATURE_UNRET
137 #else /* __ASSEMBLY__ */
139 #define ANNOTATE_RETPOLINE_SAFE \
141 ".pushsection .discard.retpoline_safe\n\t" \
142 _ASM_PTR " 999b\n\t" \
145 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
146 extern retpoline_thunk_t __x86_indirect_thunk_array[];
148 extern void __x86_return_thunk(void);
149 extern void zen_untrain_ret(void);
151 #ifdef CONFIG_RETPOLINE
154 extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
155 #include <asm/GEN-for-each-reg.h>
161 * Inline asm uses the %V modifier which is only in newer GCC
162 * which is ensured when CONFIG_RETPOLINE is defined.
164 # define CALL_NOSPEC \
166 ANNOTATE_RETPOLINE_SAFE \
167 "call *%[thunk_target]\n", \
168 "call __x86_indirect_thunk_%V[thunk_target]\n", \
169 X86_FEATURE_RETPOLINE, \
171 ANNOTATE_RETPOLINE_SAFE \
172 "call *%[thunk_target]\n", \
173 X86_FEATURE_RETPOLINE_LFENCE)
175 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
177 #else /* CONFIG_X86_32 */
179 * For i386 we use the original ret-equivalent retpoline, because
180 * otherwise we'll run out of registers. We don't care about CET
183 # define CALL_NOSPEC \
185 ANNOTATE_RETPOLINE_SAFE \
186 "call *%[thunk_target]\n", \
189 "901: call 903f;\n" \
194 "903: lea 4(%%esp), %%esp;\n" \
195 " pushl %[thunk_target];\n" \
198 "904: call 901b;\n", \
199 X86_FEATURE_RETPOLINE, \
201 ANNOTATE_RETPOLINE_SAFE \
202 "call *%[thunk_target]\n", \
203 X86_FEATURE_RETPOLINE_LFENCE)
205 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
207 #else /* No retpoline for C / inline asm */
208 # define CALL_NOSPEC "call *%[thunk_target]\n"
209 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
212 /* The Spectre V2 mitigation variants */
213 enum spectre_v2_mitigation {
215 SPECTRE_V2_RETPOLINE,
218 SPECTRE_V2_EIBRS_RETPOLINE,
219 SPECTRE_V2_EIBRS_LFENCE,
223 /* The indirect branch speculation control variants */
224 enum spectre_v2_user_mitigation {
225 SPECTRE_V2_USER_NONE,
226 SPECTRE_V2_USER_STRICT,
227 SPECTRE_V2_USER_STRICT_PREFERRED,
228 SPECTRE_V2_USER_PRCTL,
229 SPECTRE_V2_USER_SECCOMP,
232 /* The Speculative Store Bypass disable variants */
233 enum ssb_mitigation {
234 SPEC_STORE_BYPASS_NONE,
235 SPEC_STORE_BYPASS_DISABLE,
236 SPEC_STORE_BYPASS_PRCTL,
237 SPEC_STORE_BYPASS_SECCOMP,
240 extern char __indirect_thunk_start[];
241 extern char __indirect_thunk_end[];
243 static __always_inline
244 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
246 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
249 "d" ((u32)(val >> 32)),
250 [feature] "i" (feature)
254 static inline void indirect_branch_prediction_barrier(void)
256 u64 val = PRED_CMD_IBPB;
258 alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
261 /* The Intel SPEC CTRL MSR base value cache */
262 extern u64 x86_spec_ctrl_base;
263 extern void write_spec_ctrl_current(u64 val, bool force);
264 extern u64 spec_ctrl_current(void);
267 * With retpoline, we must use IBRS to restrict branch prediction
268 * before calling into firmware.
270 * (Implemented as CPP macros due to header hell.)
272 #define firmware_restrict_branch_speculation_start() \
274 u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
277 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
278 X86_FEATURE_USE_IBRS_FW); \
281 #define firmware_restrict_branch_speculation_end() \
283 u64 val = x86_spec_ctrl_base; \
285 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
286 X86_FEATURE_USE_IBRS_FW); \
290 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
291 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
292 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
294 DECLARE_STATIC_KEY_FALSE(mds_user_clear);
295 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
297 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
299 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
301 #include <asm/segment.h>
304 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
306 * This uses the otherwise unused and obsolete VERW instruction in
307 * combination with microcode which triggers a CPU buffer flush when the
308 * instruction is executed.
310 static __always_inline void mds_clear_cpu_buffers(void)
312 static const u16 ds = __KERNEL_DS;
315 * Has to be the memory-operand variant because only that
316 * guarantees the CPU buffer flush functionality according to
317 * documentation. The register-operand variant does not.
318 * Works with any segment selector, but a valid writable
319 * data segment is the fastest variant.
321 * "cc" clobber is required because VERW modifies ZF.
323 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
327 * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
329 * Clear CPU buffers if the corresponding static key is enabled
331 static __always_inline void mds_user_clear_cpu_buffers(void)
333 if (static_branch_likely(&mds_user_clear))
334 mds_clear_cpu_buffers();
338 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
340 * Clear CPU buffers if the corresponding static key is enabled
342 static inline void mds_idle_clear_cpu_buffers(void)
344 if (static_branch_likely(&mds_idle_clear))
345 mds_clear_cpu_buffers();
348 #endif /* __ASSEMBLY__ */
350 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */