1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/linkage.h>
4 #include <asm/bitsperlong.h>
5 #include <asm/kvm_vcpu_regs.h>
6 #include <asm/nospec-branch.h>
7 #include "kvm-asm-offsets.h"
9 #define WORD_SIZE (BITS_PER_LONG / 8)
11 /* Intentionally omit RAX as it's context switched by hardware */
12 #define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
13 #define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
14 #define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
15 /* Intentionally omit RSP as it's context switched by hardware */
16 #define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
17 #define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
18 #define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
21 #define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE)
22 #define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE)
23 #define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
24 #define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
25 #define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
26 #define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
27 #define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
28 #define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
31 #define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
33 .section .noinstr.text, "ax"
36 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
37 * @svm: struct vcpu_svm *
39 SYM_FUNC_START(__svm_vcpu_run)
55 .ifnc _ASM_ARG1, _ASM_DI
56 /* Move @svm to RDI. */
57 mov %_ASM_ARG1, %_ASM_DI
61 * Use a single vmcb (vmcb01 because it's always valid) for
62 * context switching guest state via VMLOAD/VMSAVE, that way
63 * the state doesn't need to be copied between vmcb01 and
64 * vmcb02 when switching vmcbs for nested virtualization.
66 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
70 /* Get svm->current_vmcb->pa into RAX. */
71 mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
72 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
74 /* Load guest registers. */
75 mov VCPU_RCX(%_ASM_DI), %_ASM_CX
76 mov VCPU_RDX(%_ASM_DI), %_ASM_DX
77 mov VCPU_RBX(%_ASM_DI), %_ASM_BX
78 mov VCPU_RBP(%_ASM_DI), %_ASM_BP
79 mov VCPU_RSI(%_ASM_DI), %_ASM_SI
81 mov VCPU_R8 (%_ASM_DI), %r8
82 mov VCPU_R9 (%_ASM_DI), %r9
83 mov VCPU_R10(%_ASM_DI), %r10
84 mov VCPU_R11(%_ASM_DI), %r11
85 mov VCPU_R12(%_ASM_DI), %r12
86 mov VCPU_R13(%_ASM_DI), %r13
87 mov VCPU_R14(%_ASM_DI), %r14
88 mov VCPU_R15(%_ASM_DI), %r15
90 mov VCPU_RDI(%_ASM_DI), %_ASM_DI
92 /* Enter guest mode */
99 /* Pop @svm to RAX while it's the only available register. */
102 /* Save all guest registers. */
103 mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
104 mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
105 mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
106 mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
107 mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
108 mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
110 mov %r8, VCPU_R8 (%_ASM_AX)
111 mov %r9, VCPU_R9 (%_ASM_AX)
112 mov %r10, VCPU_R10(%_ASM_AX)
113 mov %r11, VCPU_R11(%_ASM_AX)
114 mov %r12, VCPU_R12(%_ASM_AX)
115 mov %r13, VCPU_R13(%_ASM_AX)
116 mov %r14, VCPU_R14(%_ASM_AX)
117 mov %r15, VCPU_R15(%_ASM_AX)
120 /* @svm can stay in RDI from now on. */
121 mov %_ASM_AX, %_ASM_DI
123 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
127 #ifdef CONFIG_RETPOLINE
128 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
129 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
133 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
134 * untrained as soon as we exit the VM and are back to the
135 * kernel. This should be done before re-enabling interrupts
136 * because interrupt handlers won't sanitize 'ret' if the return is
142 * Clear all general purpose registers except RSP and RAX to prevent
143 * speculative use of the guest's values, even those that are reloaded
144 * via the stack. In theory, an L1 cache miss when restoring registers
145 * could lead to speculative execution with the guest's values.
146 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
147 * free. RSP and RAX are exempt as they are restored by hardware
181 10: cmpb $0, kvm_rebooting
184 30: cmpb $0, kvm_rebooting
187 50: cmpb $0, kvm_rebooting
191 _ASM_EXTABLE(1b, 10b)
192 _ASM_EXTABLE(3b, 30b)
193 _ASM_EXTABLE(5b, 50b)
195 SYM_FUNC_END(__svm_vcpu_run)
198 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
199 * @svm: struct vcpu_svm *
201 SYM_FUNC_START(__svm_sev_es_vcpu_run)
214 /* Get svm->current_vmcb->pa into RAX. */
215 mov SVM_current_vmcb(%_ASM_ARG1), %_ASM_AX
216 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
218 /* Enter guest mode */
225 #ifdef CONFIG_RETPOLINE
226 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
227 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
231 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
232 * untrained as soon as we exit the VM and are back to the
233 * kernel. This should be done before re-enabling interrupts
234 * because interrupt handlers won't sanitize RET if the return is
253 3: cmpb $0, kvm_rebooting
259 SYM_FUNC_END(__svm_sev_es_vcpu_run)