1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/linkage.h>
4 #include <asm/bitsperlong.h>
5 #include <asm/kvm_vcpu_regs.h>
7 #define WORD_SIZE (BITS_PER_LONG / 8)
9 /* Intentionally omit RAX as it's context switched by hardware */
10 #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
11 #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
12 #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
13 /* Intentionally omit RSP as it's context switched by hardware */
14 #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
15 #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
16 #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
19 #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
20 #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
21 #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
22 #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
23 #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
24 #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
25 #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
26 #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
32 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
33 * @vmcb_pa: unsigned long
34 * @regs: unsigned long * (to guest registers)
36 SYM_FUNC_START(__svm_vcpu_run)
38 mov %_ASM_SP, %_ASM_BP
56 /* Move @regs to RAX. */
57 mov %_ASM_ARG2, %_ASM_AX
59 /* Load guest registers. */
60 mov VCPU_RCX(%_ASM_AX), %_ASM_CX
61 mov VCPU_RDX(%_ASM_AX), %_ASM_DX
62 mov VCPU_RBX(%_ASM_AX), %_ASM_BX
63 mov VCPU_RBP(%_ASM_AX), %_ASM_BP
64 mov VCPU_RSI(%_ASM_AX), %_ASM_SI
65 mov VCPU_RDI(%_ASM_AX), %_ASM_DI
67 mov VCPU_R8 (%_ASM_AX), %r8
68 mov VCPU_R9 (%_ASM_AX), %r9
69 mov VCPU_R10(%_ASM_AX), %r10
70 mov VCPU_R11(%_ASM_AX), %r11
71 mov VCPU_R12(%_ASM_AX), %r12
72 mov VCPU_R13(%_ASM_AX), %r13
73 mov VCPU_R14(%_ASM_AX), %r14
74 mov VCPU_R15(%_ASM_AX), %r15
77 /* "POP" @vmcb to RAX. */
80 /* Enter guest mode */
83 2: cmpb $0, kvm_rebooting
90 4: cmpb $0, kvm_rebooting
97 6: cmpb $0, kvm_rebooting
102 /* "POP" @regs to RAX. */
105 /* Save all guest registers. */
106 mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
107 mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
108 mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
109 mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
110 mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
111 mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
113 mov %r8, VCPU_R8 (%_ASM_AX)
114 mov %r9, VCPU_R9 (%_ASM_AX)
115 mov %r10, VCPU_R10(%_ASM_AX)
116 mov %r11, VCPU_R11(%_ASM_AX)
117 mov %r12, VCPU_R12(%_ASM_AX)
118 mov %r13, VCPU_R13(%_ASM_AX)
119 mov %r14, VCPU_R14(%_ASM_AX)
120 mov %r15, VCPU_R15(%_ASM_AX)
124 * Clear all general purpose registers except RSP and RAX to prevent
125 * speculative use of the guest's values, even those that are reloaded
126 * via the stack. In theory, an L1 cache miss when restoring registers
127 * could lead to speculative execution with the guest's values.
128 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
129 * free. RSP and RAX are exempt as they are restored by hardware
162 SYM_FUNC_END(__svm_vcpu_run)