1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_INSN_H
3 #define __KVM_X86_VMX_INSN_H
5 #include <linux/nospec.h>
7 #include <asm/kvm_host.h>
13 #define __ex(x) __kvm_handle_fault_on_reboot(x)
15 asmlinkage void vmread_error(unsigned long field, bool fault);
16 __attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
18 void vmwrite_error(unsigned long field, unsigned long value);
19 void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
20 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
21 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
22 void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
24 static __always_inline void vmcs_check16(unsigned long field)
26 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
27 "16-bit accessor invalid for 64-bit field");
28 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
29 "16-bit accessor invalid for 64-bit high field");
30 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
31 "16-bit accessor invalid for 32-bit high field");
32 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
33 "16-bit accessor invalid for natural width field");
36 static __always_inline void vmcs_check32(unsigned long field)
38 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
39 "32-bit accessor invalid for 16-bit field");
40 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
41 "32-bit accessor invalid for 64-bit field");
42 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
43 "32-bit accessor invalid for 64-bit high field");
44 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
45 "32-bit accessor invalid for natural width field");
48 static __always_inline void vmcs_check64(unsigned long field)
50 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
51 "64-bit accessor invalid for 16-bit field");
52 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
53 "64-bit accessor invalid for 64-bit high field");
54 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
55 "64-bit accessor invalid for 32-bit field");
56 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
57 "64-bit accessor invalid for natural width field");
60 static __always_inline void vmcs_checkl(unsigned long field)
62 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
63 "Natural width accessor invalid for 16-bit field");
64 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
65 "Natural width accessor invalid for 64-bit field");
66 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
67 "Natural width accessor invalid for 64-bit high field");
68 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
69 "Natural width accessor invalid for 32-bit field");
72 static __always_inline unsigned long __vmcs_readl(unsigned long field)
76 asm volatile("1: vmread %2, %1\n\t"
77 ".byte 0x3e\n\t" /* branch taken hint */
81 * VMREAD failed. Push '0' for @fault, push the failing
82 * @field, and bounce through the trampoline to preserve
87 "2:call vmread_error_trampoline\n\t"
90 * Unwind the stack. Note, the trampoline zeros out the
91 * memory for @fault so that the result is '0' on error.
97 /* VMREAD faulted. As above, except push '1' for @fault. */
98 ".pushsection .fixup, \"ax\"\n\t"
104 : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
108 static __always_inline u16 vmcs_read16(unsigned long field)
111 if (static_branch_unlikely(&enable_evmcs))
112 return evmcs_read16(field);
113 return __vmcs_readl(field);
116 static __always_inline u32 vmcs_read32(unsigned long field)
119 if (static_branch_unlikely(&enable_evmcs))
120 return evmcs_read32(field);
121 return __vmcs_readl(field);
124 static __always_inline u64 vmcs_read64(unsigned long field)
127 if (static_branch_unlikely(&enable_evmcs))
128 return evmcs_read64(field);
130 return __vmcs_readl(field);
132 return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
136 static __always_inline unsigned long vmcs_readl(unsigned long field)
139 if (static_branch_unlikely(&enable_evmcs))
140 return evmcs_read64(field);
141 return __vmcs_readl(field);
144 #define vmx_asm1(insn, op1, error_args...) \
146 asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
147 ".byte 0x2e\n\t" /* branch not taken hint */ \
148 "jna %l[error]\n\t" \
149 _ASM_EXTABLE(1b, %l[fault]) \
150 : : op1 : "cc" : error, fault); \
153 instrumentation_begin(); \
154 insn##_error(error_args); \
155 instrumentation_end(); \
158 kvm_spurious_fault(); \
161 #define vmx_asm2(insn, op1, op2, error_args...) \
163 asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
164 ".byte 0x2e\n\t" /* branch not taken hint */ \
165 "jna %l[error]\n\t" \
166 _ASM_EXTABLE(1b, %l[fault]) \
167 : : op1, op2 : "cc" : error, fault); \
170 instrumentation_begin(); \
171 insn##_error(error_args); \
172 instrumentation_end(); \
175 kvm_spurious_fault(); \
178 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
180 vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
183 static __always_inline void vmcs_write16(unsigned long field, u16 value)
186 if (static_branch_unlikely(&enable_evmcs))
187 return evmcs_write16(field, value);
189 __vmcs_writel(field, value);
192 static __always_inline void vmcs_write32(unsigned long field, u32 value)
195 if (static_branch_unlikely(&enable_evmcs))
196 return evmcs_write32(field, value);
198 __vmcs_writel(field, value);
201 static __always_inline void vmcs_write64(unsigned long field, u64 value)
204 if (static_branch_unlikely(&enable_evmcs))
205 return evmcs_write64(field, value);
207 __vmcs_writel(field, value);
208 #ifndef CONFIG_X86_64
209 __vmcs_writel(field+1, value >> 32);
213 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
216 if (static_branch_unlikely(&enable_evmcs))
217 return evmcs_write64(field, value);
219 __vmcs_writel(field, value);
222 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
224 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
225 "vmcs_clear_bits does not support 64-bit fields");
226 if (static_branch_unlikely(&enable_evmcs))
227 return evmcs_write32(field, evmcs_read32(field) & ~mask);
229 __vmcs_writel(field, __vmcs_readl(field) & ~mask);
232 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
234 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
235 "vmcs_set_bits does not support 64-bit fields");
236 if (static_branch_unlikely(&enable_evmcs))
237 return evmcs_write32(field, evmcs_read32(field) | mask);
239 __vmcs_writel(field, __vmcs_readl(field) | mask);
242 static inline void vmcs_clear(struct vmcs *vmcs)
244 u64 phys_addr = __pa(vmcs);
246 vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
249 static inline void vmcs_load(struct vmcs *vmcs)
251 u64 phys_addr = __pa(vmcs);
253 if (static_branch_unlikely(&enable_evmcs))
254 return evmcs_load(phys_addr);
256 vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
259 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
265 } operand = { vpid, 0, gva };
267 vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
270 static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
274 } operand = {eptp, gpa};
276 vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
279 static inline void vpid_sync_vcpu_single(int vpid)
284 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
287 static inline void vpid_sync_vcpu_global(void)
289 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
292 static inline void vpid_sync_context(int vpid)
294 if (cpu_has_vmx_invvpid_single())
295 vpid_sync_vcpu_single(vpid);
297 vpid_sync_vcpu_global();
300 static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
305 if (cpu_has_vmx_invvpid_individual_addr())
306 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
308 vpid_sync_context(vpid);
311 static inline void ept_sync_global(void)
313 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
316 static inline void ept_sync_context(u64 eptp)
318 if (cpu_has_vmx_invept_context())
319 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
324 #endif /* __KVM_X86_VMX_INSN_H */