1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/linkage.h>
9 #include <asm/alternative.h>
10 #include <asm/asm-offsets.h>
11 #include <asm/assembler.h>
12 #include <asm/fpsimdmacros.h>
14 #include <asm/kvm_arm.h>
15 #include <asm/kvm_asm.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_ptrauth.h>
19 #define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x)
20 #define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)
25 * We treat x18 as callee-saved as the host may use it as a platform
26 * register (e.g. for shadow call stack).
28 .macro save_callee_saved_regs ctxt
29 str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
30 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
31 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
32 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
33 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
34 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
35 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
38 .macro restore_callee_saved_regs ctxt
39 // We require \ctxt is not x18-x28
40 ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
41 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
42 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
43 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
44 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
45 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
46 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
49 .macro save_sp_el0 ctxt, tmp
51 str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
54 .macro restore_sp_el0 ctxt, tmp
55 ldr \tmp, [\ctxt, #CPU_SP_EL0_OFFSET]
60 * u64 __guest_enter(struct kvm_vcpu *vcpu,
61 * struct kvm_cpu_context *host_ctxt);
63 SYM_FUNC_START(__guest_enter)
66 // x2-x17: clobbered by macros
69 // Store the host regs
70 save_callee_saved_regs x1
72 // Save the host's sp_el0
75 // Now the host state is stored if we have a pending RAS SError it must
76 // affect the host. If any asynchronous exception is pending we defer
77 // the guest entry. The DSB isn't necessary before v8.2 as any SError
79 alternative_if ARM64_HAS_RAS_EXTN
82 alternative_else_nop_endif
85 mov x0, #ARM_EXCEPTION_IRQ
89 add x29, x0, #VCPU_CONTEXT
91 // Macro ptrauth_switch_to_guest format:
92 // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
93 // The below macro to restore guest keys is not implemented in C code
94 // as it may cause Pointer Authentication key signing mismatch errors
95 // when this feature is enabled for kernel code.
96 ptrauth_switch_to_guest x29, x0, x1, x2
98 // Restore the guest's sp_el0
99 restore_sp_el0 x29, x0
101 // Restore guest regs x0-x17
102 ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
103 ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
104 ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
105 ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
106 ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
107 ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
108 ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
109 ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
110 ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
112 // Restore guest regs x18-x29, lr
113 restore_callee_saved_regs x29
115 // Do not touch any register after this!
119 SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
122 // x2-x29,lr: vcpu regs
123 // vcpu x0-x1 on the stack
125 add x1, x1, #VCPU_CONTEXT
127 ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
129 // Store the guest regs x2 and x3
130 stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
132 // Retrieve the guest regs x0-x1 from the stack
133 ldp x2, x3, [sp], #16 // x0, x1
135 // Store the guest regs x0-x1 and x4-x17
136 stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
137 stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
138 stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
139 stp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
140 stp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
141 stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
142 stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
143 stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
145 // Store the guest regs x18-x29, lr
146 save_callee_saved_regs x1
148 // Store the guest's sp_el0
153 // Macro ptrauth_switch_to_guest format:
154 // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
155 // The below macro to save/restore keys is not implemented in C code
156 // as it may cause Pointer Authentication key signing mismatch errors
157 // when this feature is enabled for kernel code.
158 ptrauth_switch_to_host x1, x2, x3, x4, x5
160 // Restore the hosts's sp_el0
161 restore_sp_el0 x2, x3
163 // Now restore the host regs
164 restore_callee_saved_regs x2
166 alternative_if ARM64_HAS_RAS_EXTN
167 // If we have the RAS extensions we can consume a pending error
168 // without an unmask-SError and isb. The ESB-instruction consumed any
169 // pending guest error when we took the exception from the guest.
170 mrs_s x2, SYS_DISR_EL1
171 str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
173 msr_s SYS_DISR_EL1, xzr
174 orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
177 dsb sy // Synchronize against in-flight ld/st
178 isb // Prevent an early read of side-effect free ISR
180 tbnz x2, #8, 2f // ISR_EL1.A
185 // We know we have a pending asynchronous abort, now is the
186 // time to flush it out. From your VAXorcist book, page 666:
187 // "Threaten me not, oh Evil one! For I speak with
188 // the power of DEC, and I command thee to show thyself!"
194 msr daifclr, #4 // Unmask aborts
196 // This is our single instruction exception window. A pending
197 // SError is guaranteed to occur at the earliest when we unmask
198 // it, and at the latest just after the ISB.
199 abort_guest_exit_start:
203 abort_guest_exit_end:
205 msr daifset, #4 // Mask aborts
208 _kvm_extable abort_guest_exit_start, 9997f
209 _kvm_extable abort_guest_exit_end, 9997f
211 msr daifset, #4 // Mask aborts
212 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
214 // restore the EL1 exception context so that we can report some
215 // information. Merge the exception code with the SError pending bit.
221 SYM_FUNC_END(__guest_enter)