Merge branch 'misc.namei' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / vmx_ops.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_INSN_H
3 #define __KVM_X86_VMX_INSN_H
4
5 #include <linux/nospec.h>
6
7 #include <asm/vmx.h>
8
9 #include "evmcs.h"
10 #include "vmcs.h"
11 #include "x86.h"
12
13 asmlinkage void vmread_error(unsigned long field, bool fault);
14 __attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
15                                                          bool fault);
16 void vmwrite_error(unsigned long field, unsigned long value);
17 void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
18 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
19 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
20 void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
21
22 static __always_inline void vmcs_check16(unsigned long field)
23 {
24         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
25                          "16-bit accessor invalid for 64-bit field");
26         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
27                          "16-bit accessor invalid for 64-bit high field");
28         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
29                          "16-bit accessor invalid for 32-bit high field");
30         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
31                          "16-bit accessor invalid for natural width field");
32 }
33
34 static __always_inline void vmcs_check32(unsigned long field)
35 {
36         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
37                          "32-bit accessor invalid for 16-bit field");
38         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
39                          "32-bit accessor invalid for 64-bit field");
40         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
41                          "32-bit accessor invalid for 64-bit high field");
42         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
43                          "32-bit accessor invalid for natural width field");
44 }
45
46 static __always_inline void vmcs_check64(unsigned long field)
47 {
48         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
49                          "64-bit accessor invalid for 16-bit field");
50         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
51                          "64-bit accessor invalid for 64-bit high field");
52         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
53                          "64-bit accessor invalid for 32-bit field");
54         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
55                          "64-bit accessor invalid for natural width field");
56 }
57
58 static __always_inline void vmcs_checkl(unsigned long field)
59 {
60         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
61                          "Natural width accessor invalid for 16-bit field");
62         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
63                          "Natural width accessor invalid for 64-bit field");
64         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
65                          "Natural width accessor invalid for 64-bit high field");
66         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
67                          "Natural width accessor invalid for 32-bit field");
68 }
69
70 static __always_inline unsigned long __vmcs_readl(unsigned long field)
71 {
72         unsigned long value;
73
74         asm volatile("1: vmread %2, %1\n\t"
75                      ".byte 0x3e\n\t" /* branch taken hint */
76                      "ja 3f\n\t"
77
78                      /*
79                       * VMREAD failed.  Push '0' for @fault, push the failing
80                       * @field, and bounce through the trampoline to preserve
81                       * volatile registers.
82                       */
83                      "push $0\n\t"
84                      "push %2\n\t"
85                      "2:call vmread_error_trampoline\n\t"
86
87                      /*
88                       * Unwind the stack.  Note, the trampoline zeros out the
89                       * memory for @fault so that the result is '0' on error.
90                       */
91                      "pop %2\n\t"
92                      "pop %1\n\t"
93                      "3:\n\t"
94
95                      /* VMREAD faulted.  As above, except push '1' for @fault. */
96                      ".pushsection .fixup, \"ax\"\n\t"
97                      "4: push $1\n\t"
98                      "push %2\n\t"
99                      "jmp 2b\n\t"
100                      ".popsection\n\t"
101                      _ASM_EXTABLE(1b, 4b)
102                      : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
103         return value;
104 }
105
106 static __always_inline u16 vmcs_read16(unsigned long field)
107 {
108         vmcs_check16(field);
109         if (static_branch_unlikely(&enable_evmcs))
110                 return evmcs_read16(field);
111         return __vmcs_readl(field);
112 }
113
114 static __always_inline u32 vmcs_read32(unsigned long field)
115 {
116         vmcs_check32(field);
117         if (static_branch_unlikely(&enable_evmcs))
118                 return evmcs_read32(field);
119         return __vmcs_readl(field);
120 }
121
122 static __always_inline u64 vmcs_read64(unsigned long field)
123 {
124         vmcs_check64(field);
125         if (static_branch_unlikely(&enable_evmcs))
126                 return evmcs_read64(field);
127 #ifdef CONFIG_X86_64
128         return __vmcs_readl(field);
129 #else
130         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
131 #endif
132 }
133
134 static __always_inline unsigned long vmcs_readl(unsigned long field)
135 {
136         vmcs_checkl(field);
137         if (static_branch_unlikely(&enable_evmcs))
138                 return evmcs_read64(field);
139         return __vmcs_readl(field);
140 }
141
142 #define vmx_asm1(insn, op1, error_args...)                              \
143 do {                                                                    \
144         asm_volatile_goto("1: " __stringify(insn) " %0\n\t"             \
145                           ".byte 0x2e\n\t" /* branch not taken hint */  \
146                           "jna %l[error]\n\t"                           \
147                           _ASM_EXTABLE(1b, %l[fault])                   \
148                           : : op1 : "cc" : error, fault);               \
149         return;                                                         \
150 error:                                                                  \
151         instrumentation_begin();                                        \
152         insn##_error(error_args);                                       \
153         instrumentation_end();                                          \
154         return;                                                         \
155 fault:                                                                  \
156         kvm_spurious_fault();                                           \
157 } while (0)
158
159 #define vmx_asm2(insn, op1, op2, error_args...)                         \
160 do {                                                                    \
161         asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"        \
162                           ".byte 0x2e\n\t" /* branch not taken hint */  \
163                           "jna %l[error]\n\t"                           \
164                           _ASM_EXTABLE(1b, %l[fault])                   \
165                           : : op1, op2 : "cc" : error, fault);          \
166         return;                                                         \
167 error:                                                                  \
168         instrumentation_begin();                                        \
169         insn##_error(error_args);                                       \
170         instrumentation_end();                                          \
171         return;                                                         \
172 fault:                                                                  \
173         kvm_spurious_fault();                                           \
174 } while (0)
175
176 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
177 {
178         vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
179 }
180
181 static __always_inline void vmcs_write16(unsigned long field, u16 value)
182 {
183         vmcs_check16(field);
184         if (static_branch_unlikely(&enable_evmcs))
185                 return evmcs_write16(field, value);
186
187         __vmcs_writel(field, value);
188 }
189
190 static __always_inline void vmcs_write32(unsigned long field, u32 value)
191 {
192         vmcs_check32(field);
193         if (static_branch_unlikely(&enable_evmcs))
194                 return evmcs_write32(field, value);
195
196         __vmcs_writel(field, value);
197 }
198
199 static __always_inline void vmcs_write64(unsigned long field, u64 value)
200 {
201         vmcs_check64(field);
202         if (static_branch_unlikely(&enable_evmcs))
203                 return evmcs_write64(field, value);
204
205         __vmcs_writel(field, value);
206 #ifndef CONFIG_X86_64
207         __vmcs_writel(field+1, value >> 32);
208 #endif
209 }
210
211 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
212 {
213         vmcs_checkl(field);
214         if (static_branch_unlikely(&enable_evmcs))
215                 return evmcs_write64(field, value);
216
217         __vmcs_writel(field, value);
218 }
219
220 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
221 {
222         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
223                          "vmcs_clear_bits does not support 64-bit fields");
224         if (static_branch_unlikely(&enable_evmcs))
225                 return evmcs_write32(field, evmcs_read32(field) & ~mask);
226
227         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
228 }
229
230 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
231 {
232         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
233                          "vmcs_set_bits does not support 64-bit fields");
234         if (static_branch_unlikely(&enable_evmcs))
235                 return evmcs_write32(field, evmcs_read32(field) | mask);
236
237         __vmcs_writel(field, __vmcs_readl(field) | mask);
238 }
239
240 static inline void vmcs_clear(struct vmcs *vmcs)
241 {
242         u64 phys_addr = __pa(vmcs);
243
244         vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
245 }
246
247 static inline void vmcs_load(struct vmcs *vmcs)
248 {
249         u64 phys_addr = __pa(vmcs);
250
251         if (static_branch_unlikely(&enable_evmcs))
252                 return evmcs_load(phys_addr);
253
254         vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
255 }
256
257 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
258 {
259         struct {
260                 u64 vpid : 16;
261                 u64 rsvd : 48;
262                 u64 gva;
263         } operand = { vpid, 0, gva };
264
265         vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
266 }
267
268 static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
269 {
270         struct {
271                 u64 eptp, gpa;
272         } operand = {eptp, gpa};
273
274         vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
275 }
276
277 static inline void vpid_sync_vcpu_single(int vpid)
278 {
279         if (vpid == 0)
280                 return;
281
282         __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
283 }
284
285 static inline void vpid_sync_vcpu_global(void)
286 {
287         __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
288 }
289
290 static inline void vpid_sync_context(int vpid)
291 {
292         if (cpu_has_vmx_invvpid_single())
293                 vpid_sync_vcpu_single(vpid);
294         else if (vpid != 0)
295                 vpid_sync_vcpu_global();
296 }
297
298 static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
299 {
300         if (vpid == 0)
301                 return;
302
303         if (cpu_has_vmx_invvpid_individual_addr())
304                 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
305         else
306                 vpid_sync_context(vpid);
307 }
308
309 static inline void ept_sync_global(void)
310 {
311         __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
312 }
313
314 static inline void ept_sync_context(u64 eptp)
315 {
316         if (cpu_has_vmx_invept_context())
317                 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
318         else
319                 ept_sync_global();
320 }
321
322 #endif /* __KVM_X86_VMX_INSN_H */