1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #ifndef __ARM64_KVM_HYP_SWITCH_H__
8 #define __ARM64_KVM_HYP_SWITCH_H__
10 #include <hyp/adjust_pc.h>
11 #include <hyp/fault.h>
13 #include <linux/arm-smccc.h>
14 #include <linux/kvm_host.h>
15 #include <linux/types.h>
16 #include <linux/jump_label.h>
17 #include <uapi/linux/psci.h>
19 #include <kvm/arm_psci.h>
21 #include <asm/barrier.h>
22 #include <asm/cpufeature.h>
23 #include <asm/extable.h>
24 #include <asm/kprobes.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_hyp.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/fpsimd.h>
30 #include <asm/debug-monitors.h>
31 #include <asm/processor.h>
33 struct kvm_exception_table_entry {
37 extern struct kvm_exception_table_entry __start___kvm_ex_table;
38 extern struct kvm_exception_table_entry __stop___kvm_ex_table;
40 /* Check whether the FP regs were dirtied while in the host-side run loop: */
41 static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
44 * When the system doesn't support FP/SIMD, we cannot rely on
45 * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
46 * abort on the very first access to FP and thus we should never
47 * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
50 if (!system_supports_fpsimd() ||
51 vcpu->arch.flags & KVM_ARM64_FP_FOREIGN_FPSTATE)
52 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
55 return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
58 /* Save the 32-bit only FPSIMD system register state */
59 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
61 if (!vcpu_el1_is_32bit(vcpu))
64 __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
67 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
70 * We are about to set CPTR_EL2.TFP to trap all floating point
71 * register accesses to EL2, however, the ARM ARM clearly states that
72 * traps are only taken to EL2 if the operation would not otherwise
73 * trap to EL1. Therefore, always make sure that for 32-bit guests,
74 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
75 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
76 * it will cause an exception.
78 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
79 write_sysreg(1 << 30, fpexc32_el2);
84 static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
86 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
87 write_sysreg(1 << 15, hstr_el2);
90 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
91 * PMSELR_EL0 to make sure it never contains the cycle
92 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
93 * EL1 instead of being trapped to EL2.
95 if (kvm_arm_support_pmu_v3()) {
96 write_sysreg(0, pmselr_el0);
97 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
100 vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
101 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
104 static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
106 write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2);
108 write_sysreg(0, hstr_el2);
109 if (kvm_arm_support_pmu_v3())
110 write_sysreg(0, pmuserenr_el0);
113 static inline void ___activate_traps(struct kvm_vcpu *vcpu)
115 u64 hcr = vcpu->arch.hcr_el2;
117 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
120 write_sysreg(hcr, hcr_el2);
122 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
123 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
126 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
129 * If we pended a virtual abort, preserve it until it gets
130 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
131 * the crucial bit is "On taking a vSError interrupt,
132 * HCR_EL2.VSE is cleared to 0."
134 if (vcpu->arch.hcr_el2 & HCR_VSE) {
135 vcpu->arch.hcr_el2 &= ~HCR_VSE;
136 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
140 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
142 return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
145 static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
147 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
148 __sve_restore_state(vcpu_sve_pffr(vcpu),
149 &vcpu->arch.ctxt.fp_regs.fpsr);
150 write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
154 * We trap the first access to the FP/SIMD to save the host context and
155 * restore the guest context lazily.
156 * If FP/SIMD is not implemented, handle the trap and inject an undefined
157 * instruction exception to the guest. Similarly for trapped SVE accesses.
159 static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
165 if (!system_supports_fpsimd())
168 sve_guest = vcpu_has_sve(vcpu);
169 esr_ec = kvm_vcpu_trap_get_class(vcpu);
171 /* Don't handle SVE traps for non-SVE vcpus here: */
172 if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD)
175 /* Valid trap. Switch the context: */
177 reg = CPACR_EL1_FPEN;
179 reg |= CPACR_EL1_ZEN;
181 sysreg_clear_set(cpacr_el1, 0, reg);
187 sysreg_clear_set(cptr_el2, reg, 0);
191 if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
192 __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
193 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
197 __hyp_sve_restore_guest(vcpu);
199 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
201 /* Skip restoring fpexc32 for AArch64 guests */
202 if (!(read_sysreg(hcr_el2) & HCR_RW))
203 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
205 vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
210 static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
212 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
213 int rt = kvm_vcpu_sys_get_rt(vcpu);
214 u64 val = vcpu_get_reg(vcpu, rt);
217 * The normal sysreg handling code expects to see the traps,
218 * let's not do anything here.
220 if (vcpu->arch.hcr_el2 & HCR_TVM)
225 write_sysreg_el1(val, SYS_SCTLR);
228 write_sysreg_el1(val, SYS_TTBR0);
231 write_sysreg_el1(val, SYS_TTBR1);
234 write_sysreg_el1(val, SYS_TCR);
237 write_sysreg_el1(val, SYS_ESR);
240 write_sysreg_el1(val, SYS_FAR);
243 write_sysreg_el1(val, SYS_AFSR0);
246 write_sysreg_el1(val, SYS_AFSR1);
249 write_sysreg_el1(val, SYS_MAIR);
252 write_sysreg_el1(val, SYS_AMAIR);
254 case SYS_CONTEXTIDR_EL1:
255 write_sysreg_el1(val, SYS_CONTEXTIDR);
261 __kvm_skip_instr(vcpu);
265 static inline bool esr_is_ptrauth_trap(u32 esr)
267 switch (esr_sys64_to_sysreg(esr)) {
268 case SYS_APIAKEYLO_EL1:
269 case SYS_APIAKEYHI_EL1:
270 case SYS_APIBKEYLO_EL1:
271 case SYS_APIBKEYHI_EL1:
272 case SYS_APDAKEYLO_EL1:
273 case SYS_APDAKEYHI_EL1:
274 case SYS_APDBKEYLO_EL1:
275 case SYS_APDBKEYHI_EL1:
276 case SYS_APGAKEYLO_EL1:
277 case SYS_APGAKEYHI_EL1:
284 #define __ptrauth_save_key(ctxt, key) \
287 __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
288 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
289 __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
290 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
293 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
295 static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
297 struct kvm_cpu_context *ctxt;
300 if (!vcpu_has_ptrauth(vcpu))
303 ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
304 __ptrauth_save_key(ctxt, APIA);
305 __ptrauth_save_key(ctxt, APIB);
306 __ptrauth_save_key(ctxt, APDA);
307 __ptrauth_save_key(ctxt, APDB);
308 __ptrauth_save_key(ctxt, APGA);
310 vcpu_ptrauth_enable(vcpu);
312 val = read_sysreg(hcr_el2);
313 val |= (HCR_API | HCR_APK);
314 write_sysreg(val, hcr_el2);
319 static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
321 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
322 handle_tx2_tvm(vcpu))
325 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
326 __vgic_v3_perform_cpuif_access(vcpu) == 1)
329 if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
330 return kvm_hyp_handle_ptrauth(vcpu, exit_code);
335 static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
337 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
338 __vgic_v3_perform_cpuif_access(vcpu) == 1)
344 static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
346 if (!__populate_fault_info(vcpu))
352 static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
354 if (!__populate_fault_info(vcpu))
357 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
360 valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
361 kvm_vcpu_dabt_isvalid(vcpu) &&
362 !kvm_vcpu_abt_issea(vcpu) &&
363 !kvm_vcpu_abt_iss1tw(vcpu);
366 int ret = __vgic_v2_perform_cpuif_access(vcpu);
371 /* Promote an illegal access to an SError.*/
373 *exit_code = ARM_EXCEPTION_EL1_SERROR;
380 typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
382 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
384 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
387 * Allow the hypervisor to handle the exit with an exit handler if it has one.
389 * Returns true if the hypervisor handled the exit, and control should go back
390 * to the guest, or false if it hasn't.
392 static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
394 const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
397 fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
400 return fn(vcpu, exit_code);
405 static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code)
408 * Check for the conditions of Cortex-A510's #2077057. When these occur
409 * SPSR_EL2 can't be trusted, but isn't needed either as it is
410 * unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
411 * Are we single-stepping the guest, and took a PAC exception from the
412 * active-not-pending state?
414 if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) &&
415 vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
416 *vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
417 ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
418 write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
420 vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
424 * Return true when we were able to fixup the guest exit and should return to
425 * the guest, false when we should restore the host state and return to the
428 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
431 * Save PSTATE early so that we can evaluate the vcpu mode
434 synchronize_vcpu_pstate(vcpu, exit_code);
437 * Check whether we want to repaint the state one way or
440 early_exit_filter(vcpu, exit_code);
442 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
443 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
445 if (ARM_SERROR_PENDING(*exit_code) &&
446 ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
447 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
450 * HVC already have an adjusted PC, which we need to
451 * correct in order to return to after having injected
454 * SMC, on the other hand, is *trapped*, meaning its
455 * preferred return address is the SMC itself.
457 if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
458 write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
462 * We're using the raw exception code in order to only process
463 * the trap if no SError is pending. We will come back to the
464 * same PC once the SError has been injected, and replay the
465 * trapping instruction.
467 if (*exit_code != ARM_EXCEPTION_TRAP)
470 /* Check if there's an exit handler and allow it to handle the exit. */
471 if (kvm_hyp_handle_exit(vcpu, exit_code))
474 /* Return to the host kernel and handle the exit */
478 /* Re-enter the guest */
479 asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
483 static inline void __kvm_unexpected_el2_exception(void)
485 extern char __guest_exit_panic[];
486 unsigned long addr, fixup;
487 struct kvm_exception_table_entry *entry, *end;
488 unsigned long elr_el2 = read_sysreg(elr_el2);
490 entry = &__start___kvm_ex_table;
491 end = &__stop___kvm_ex_table;
493 while (entry < end) {
494 addr = (unsigned long)&entry->insn + entry->insn;
495 fixup = (unsigned long)&entry->fixup + entry->fixup;
497 if (addr != elr_el2) {
502 write_sysreg(fixup, elr_el2);
506 /* Trigger a panic after restoring the hyp context. */
507 write_sysreg(__guest_exit_panic, elr_el2);
510 #endif /* __ARM64_KVM_HYP_SWITCH_H__ */