Merge tag 'sched-urgent-2022-06-19' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / arch / x86 / include / asm / nospec-branch.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
5
6 #include <linux/static_key.h>
7 #include <linux/objtool.h>
8 #include <linux/linkage.h>
9
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/msr-index.h>
13 #include <asm/unwind_hints.h>
14
15 #define RETPOLINE_THUNK_SIZE    32
16
17 /*
18  * Fill the CPU return stack buffer.
19  *
20  * Each entry in the RSB, if used for a speculative 'ret', contains an
21  * infinite 'pause; lfence; jmp' loop to capture speculative execution.
22  *
23  * This is required in various cases for retpoline and IBRS-based
24  * mitigations for the Spectre variant 2 vulnerability. Sometimes to
25  * eliminate potentially bogus entries from the RSB, and sometimes
26  * purely to ensure that it doesn't get empty, which on some CPUs would
27  * allow predictions from other (unwanted!) sources to be used.
28  *
29  * We define a CPP macro such that it can be used from both .S files and
30  * inline assembly. It's possible to do a .macro and then include that
31  * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
32  */
33
34 #define RSB_CLEAR_LOOPS         32      /* To forcibly overwrite all entries */
35
36 /*
37  * Google experimented with loop-unrolling and this turned out to be
38  * the optimal version - two calls, each with their own speculation
39  * trap should their return address end up getting used, in a loop.
40  */
41 #define __FILL_RETURN_BUFFER(reg, nr, sp)       \
42         mov     $(nr/2), reg;                   \
43 771:                                            \
44         ANNOTATE_INTRA_FUNCTION_CALL;           \
45         call    772f;                           \
46 773:    /* speculation trap */                  \
47         UNWIND_HINT_EMPTY;                      \
48         pause;                                  \
49         lfence;                                 \
50         jmp     773b;                           \
51 772:                                            \
52         ANNOTATE_INTRA_FUNCTION_CALL;           \
53         call    774f;                           \
54 775:    /* speculation trap */                  \
55         UNWIND_HINT_EMPTY;                      \
56         pause;                                  \
57         lfence;                                 \
58         jmp     775b;                           \
59 774:                                            \
60         add     $(BITS_PER_LONG/8) * 2, sp;     \
61         dec     reg;                            \
62         jnz     771b;
63
64 #ifdef __ASSEMBLY__
65
66 /*
67  * This should be used immediately before an indirect jump/call. It tells
68  * objtool the subsequent indirect jump/call is vouched safe for retpoline
69  * builds.
70  */
71 .macro ANNOTATE_RETPOLINE_SAFE
72         .Lannotate_\@:
73         .pushsection .discard.retpoline_safe
74         _ASM_PTR .Lannotate_\@
75         .popsection
76 .endm
77
78 /*
79  * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
80  * indirect jmp/call which may be susceptible to the Spectre variant 2
81  * attack.
82  */
83 .macro JMP_NOSPEC reg:req
84 #ifdef CONFIG_RETPOLINE
85         ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
86                       __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
87                       __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE
88 #else
89         jmp     *%\reg
90 #endif
91 .endm
92
93 .macro CALL_NOSPEC reg:req
94 #ifdef CONFIG_RETPOLINE
95         ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
96                       __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
97                       __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE
98 #else
99         call    *%\reg
100 #endif
101 .endm
102
103  /*
104   * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
105   * monstrosity above, manually.
106   */
107 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
108 #ifdef CONFIG_RETPOLINE
109         ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
110         __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
111 .Lskip_rsb_\@:
112 #endif
113 .endm
114
115 #else /* __ASSEMBLY__ */
116
117 #define ANNOTATE_RETPOLINE_SAFE                                 \
118         "999:\n\t"                                              \
119         ".pushsection .discard.retpoline_safe\n\t"              \
120         _ASM_PTR " 999b\n\t"                                    \
121         ".popsection\n\t"
122
123 #ifdef CONFIG_RETPOLINE
124
125 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
126
127 #define GEN(reg) \
128         extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
129 #include <asm/GEN-for-each-reg.h>
130 #undef GEN
131
132 extern retpoline_thunk_t __x86_indirect_thunk_array[];
133
134 #ifdef CONFIG_X86_64
135
136 /*
137  * Inline asm uses the %V modifier which is only in newer GCC
138  * which is ensured when CONFIG_RETPOLINE is defined.
139  */
140 # define CALL_NOSPEC                                            \
141         ALTERNATIVE_2(                                          \
142         ANNOTATE_RETPOLINE_SAFE                                 \
143         "call *%[thunk_target]\n",                              \
144         "call __x86_indirect_thunk_%V[thunk_target]\n",         \
145         X86_FEATURE_RETPOLINE,                                  \
146         "lfence;\n"                                             \
147         ANNOTATE_RETPOLINE_SAFE                                 \
148         "call *%[thunk_target]\n",                              \
149         X86_FEATURE_RETPOLINE_LFENCE)
150
151 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
152
153 #else /* CONFIG_X86_32 */
154 /*
155  * For i386 we use the original ret-equivalent retpoline, because
156  * otherwise we'll run out of registers. We don't care about CET
157  * here, anyway.
158  */
159 # define CALL_NOSPEC                                            \
160         ALTERNATIVE_2(                                          \
161         ANNOTATE_RETPOLINE_SAFE                                 \
162         "call *%[thunk_target]\n",                              \
163         "       jmp    904f;\n"                                 \
164         "       .align 16\n"                                    \
165         "901:   call   903f;\n"                                 \
166         "902:   pause;\n"                                       \
167         "       lfence;\n"                                      \
168         "       jmp    902b;\n"                                 \
169         "       .align 16\n"                                    \
170         "903:   lea    4(%%esp), %%esp;\n"                      \
171         "       pushl  %[thunk_target];\n"                      \
172         "       ret;\n"                                         \
173         "       .align 16\n"                                    \
174         "904:   call   901b;\n",                                \
175         X86_FEATURE_RETPOLINE,                                  \
176         "lfence;\n"                                             \
177         ANNOTATE_RETPOLINE_SAFE                                 \
178         "call *%[thunk_target]\n",                              \
179         X86_FEATURE_RETPOLINE_LFENCE)
180
181 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
182 #endif
183 #else /* No retpoline for C / inline asm */
184 # define CALL_NOSPEC "call *%[thunk_target]\n"
185 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
186 #endif
187
188 /* The Spectre V2 mitigation variants */
189 enum spectre_v2_mitigation {
190         SPECTRE_V2_NONE,
191         SPECTRE_V2_RETPOLINE,
192         SPECTRE_V2_LFENCE,
193         SPECTRE_V2_EIBRS,
194         SPECTRE_V2_EIBRS_RETPOLINE,
195         SPECTRE_V2_EIBRS_LFENCE,
196 };
197
198 /* The indirect branch speculation control variants */
199 enum spectre_v2_user_mitigation {
200         SPECTRE_V2_USER_NONE,
201         SPECTRE_V2_USER_STRICT,
202         SPECTRE_V2_USER_STRICT_PREFERRED,
203         SPECTRE_V2_USER_PRCTL,
204         SPECTRE_V2_USER_SECCOMP,
205 };
206
207 /* The Speculative Store Bypass disable variants */
208 enum ssb_mitigation {
209         SPEC_STORE_BYPASS_NONE,
210         SPEC_STORE_BYPASS_DISABLE,
211         SPEC_STORE_BYPASS_PRCTL,
212         SPEC_STORE_BYPASS_SECCOMP,
213 };
214
215 extern char __indirect_thunk_start[];
216 extern char __indirect_thunk_end[];
217
218 static __always_inline
219 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
220 {
221         asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
222                 : : "c" (msr),
223                     "a" ((u32)val),
224                     "d" ((u32)(val >> 32)),
225                     [feature] "i" (feature)
226                 : "memory");
227 }
228
229 static inline void indirect_branch_prediction_barrier(void)
230 {
231         u64 val = PRED_CMD_IBPB;
232
233         alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
234 }
235
236 /* The Intel SPEC CTRL MSR base value cache */
237 extern u64 x86_spec_ctrl_base;
238
239 /*
240  * With retpoline, we must use IBRS to restrict branch prediction
241  * before calling into firmware.
242  *
243  * (Implemented as CPP macros due to header hell.)
244  */
245 #define firmware_restrict_branch_speculation_start()                    \
246 do {                                                                    \
247         u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;                  \
248                                                                         \
249         preempt_disable();                                              \
250         alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
251                               X86_FEATURE_USE_IBRS_FW);                 \
252 } while (0)
253
254 #define firmware_restrict_branch_speculation_end()                      \
255 do {                                                                    \
256         u64 val = x86_spec_ctrl_base;                                   \
257                                                                         \
258         alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
259                               X86_FEATURE_USE_IBRS_FW);                 \
260         preempt_enable();                                               \
261 } while (0)
262
263 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
264 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
265 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
266
267 DECLARE_STATIC_KEY_FALSE(mds_user_clear);
268 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
269
270 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
271
272 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
273
274 #include <asm/segment.h>
275
276 /**
277  * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
278  *
279  * This uses the otherwise unused and obsolete VERW instruction in
280  * combination with microcode which triggers a CPU buffer flush when the
281  * instruction is executed.
282  */
283 static __always_inline void mds_clear_cpu_buffers(void)
284 {
285         static const u16 ds = __KERNEL_DS;
286
287         /*
288          * Has to be the memory-operand variant because only that
289          * guarantees the CPU buffer flush functionality according to
290          * documentation. The register-operand variant does not.
291          * Works with any segment selector, but a valid writable
292          * data segment is the fastest variant.
293          *
294          * "cc" clobber is required because VERW modifies ZF.
295          */
296         asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
297 }
298
299 /**
300  * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
301  *
302  * Clear CPU buffers if the corresponding static key is enabled
303  */
304 static __always_inline void mds_user_clear_cpu_buffers(void)
305 {
306         if (static_branch_likely(&mds_user_clear))
307                 mds_clear_cpu_buffers();
308 }
309
310 /**
311  * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
312  *
313  * Clear CPU buffers if the corresponding static key is enabled
314  */
315 static inline void mds_idle_clear_cpu_buffers(void)
316 {
317         if (static_branch_likely(&mds_idle_clear))
318                 mds_clear_cpu_buffers();
319 }
320
321 #endif /* __ASSEMBLY__ */
322
323 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */