1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #ifndef __ARM64_KVM_HYP_SWITCH_H__
8 #define __ARM64_KVM_HYP_SWITCH_H__
10 #include <linux/arm-smccc.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 #include <linux/jump_label.h>
14 #include <uapi/linux/psci.h>
16 #include <kvm/arm_psci.h>
18 #include <asm/barrier.h>
19 #include <asm/cpufeature.h>
20 #include <asm/kprobes.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
25 #include <asm/fpsimd.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/processor.h>
28 #include <asm/thread_info.h>
30 extern const char __hyp_panic_string[];
32 /* Check whether the FP regs were dirtied while in the host-side run loop: */
33 static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
36 * When the system doesn't support FP/SIMD, we cannot rely on
37 * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
38 * abort on the very first access to FP and thus we should never
39 * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
42 if (!system_supports_fpsimd() ||
43 vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
44 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
47 return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
50 /* Save the 32-bit only FPSIMD system register state */
51 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
53 if (!vcpu_el1_is_32bit(vcpu))
56 __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
59 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
62 * We are about to set CPTR_EL2.TFP to trap all floating point
63 * register accesses to EL2, however, the ARM ARM clearly states that
64 * traps are only taken to EL2 if the operation would not otherwise
65 * trap to EL1. Therefore, always make sure that for 32-bit guests,
66 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
67 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
68 * it will cause an exception.
70 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
71 write_sysreg(1 << 30, fpexc32_el2);
76 static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
78 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
79 write_sysreg(1 << 15, hstr_el2);
82 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
83 * PMSELR_EL0 to make sure it never contains the cycle
84 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
85 * EL1 instead of being trapped to EL2.
87 write_sysreg(0, pmselr_el0);
88 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
89 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
92 static inline void __deactivate_traps_common(void)
94 write_sysreg(0, hstr_el2);
95 write_sysreg(0, pmuserenr_el0);
98 static inline void ___activate_traps(struct kvm_vcpu *vcpu)
100 u64 hcr = vcpu->arch.hcr_el2;
102 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
105 write_sysreg(hcr, hcr_el2);
107 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
108 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
111 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
114 * If we pended a virtual abort, preserve it until it gets
115 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
116 * the crucial bit is "On taking a vSError interrupt,
117 * HCR_EL2.VSE is cleared to 0."
119 if (vcpu->arch.hcr_el2 & HCR_VSE) {
120 vcpu->arch.hcr_el2 &= ~HCR_VSE;
121 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
125 static inline void __activate_vm(struct kvm_s2_mmu *mmu)
127 __load_guest_stage2(mmu);
130 static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
135 * Resolve the IPA the hard way using the guest VA.
137 * Stage-1 translation already validated the memory access
138 * rights. As such, we can use the EL1 translation regime, and
139 * don't have to distinguish between EL0 and EL1 access.
141 * We do need to save/restore PAR_EL1 though, as we haven't
142 * saved the guest context yet, and we may return early...
144 par = read_sysreg(par_el1);
145 asm volatile("at s1e1r, %0" : : "r" (far));
148 tmp = read_sysreg(par_el1);
149 write_sysreg(par, par_el1);
151 if (unlikely(tmp & SYS_PAR_EL1_F))
152 return false; /* Translation failed, back to guest */
154 /* Convert PAR to HPFAR format */
155 *hpfar = PAR_TO_HPFAR(tmp);
159 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
165 esr = vcpu->arch.fault.esr_el2;
166 ec = ESR_ELx_EC(esr);
168 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
171 far = read_sysreg_el2(SYS_FAR);
174 * The HPFAR can be invalid if the stage 2 fault did not
175 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
176 * bit is clear) and one of the two following cases are true:
177 * 1. The fault was due to a permission fault
178 * 2. The processor carries errata 834220
180 * Therefore, for all non S1PTW faults where we either have a
181 * permission fault or the errata workaround is enabled, we
182 * resolve the IPA using the AT instruction.
184 if (!(esr & ESR_ELx_S1PTW) &&
185 (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
186 (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
187 if (!__translate_far_to_hpfar(far, &hpfar))
190 hpfar = read_sysreg(hpfar_el2);
193 vcpu->arch.fault.far_el2 = far;
194 vcpu->arch.fault.hpfar_el2 = hpfar;
198 /* Check for an FPSIMD/SVE trap and handle as appropriate */
199 static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
201 bool vhe, sve_guest, sve_host;
204 if (!system_supports_fpsimd())
208 * Currently system_supports_sve() currently implies has_vhe(),
209 * so the check is redundant. However, has_vhe() can be determined
210 * statically and helps the compiler remove dead code.
212 if (has_vhe() && system_supports_sve()) {
213 sve_guest = vcpu_has_sve(vcpu);
214 sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
222 esr_ec = kvm_vcpu_trap_get_class(vcpu);
223 if (esr_ec != ESR_ELx_EC_FP_ASIMD &&
224 esr_ec != ESR_ELx_EC_SVE)
227 /* Don't handle SVE traps for non-SVE vcpus here: */
229 if (esr_ec != ESR_ELx_EC_FP_ASIMD)
232 /* Valid trap. Switch the context: */
235 u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
238 reg |= CPACR_EL1_ZEN;
240 write_sysreg(reg, cpacr_el1);
242 write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
248 if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
250 * In the SVE case, VHE is assumed: it is enforced by
251 * Kconfig and kvm_arch_init().
254 struct thread_struct *thread = container_of(
255 vcpu->arch.host_fpsimd_state,
256 struct thread_struct, uw.fpsimd_state);
258 sve_save_state(sve_pffr(thread),
259 &vcpu->arch.host_fpsimd_state->fpsr);
261 __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
264 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
268 sve_load_state(vcpu_sve_pffr(vcpu),
269 &vcpu->arch.ctxt.fp_regs.fpsr,
270 sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
271 write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
273 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
276 /* Skip restoring fpexc32 for AArch64 guests */
277 if (!(read_sysreg(hcr_el2) & HCR_RW))
278 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
280 vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
285 static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
287 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
288 int rt = kvm_vcpu_sys_get_rt(vcpu);
289 u64 val = vcpu_get_reg(vcpu, rt);
292 * The normal sysreg handling code expects to see the traps,
293 * let's not do anything here.
295 if (vcpu->arch.hcr_el2 & HCR_TVM)
300 write_sysreg_el1(val, SYS_SCTLR);
303 write_sysreg_el1(val, SYS_TTBR0);
306 write_sysreg_el1(val, SYS_TTBR1);
309 write_sysreg_el1(val, SYS_TCR);
312 write_sysreg_el1(val, SYS_ESR);
315 write_sysreg_el1(val, SYS_FAR);
318 write_sysreg_el1(val, SYS_AFSR0);
321 write_sysreg_el1(val, SYS_AFSR1);
324 write_sysreg_el1(val, SYS_MAIR);
327 write_sysreg_el1(val, SYS_AMAIR);
329 case SYS_CONTEXTIDR_EL1:
330 write_sysreg_el1(val, SYS_CONTEXTIDR);
336 __kvm_skip_instr(vcpu);
340 static inline bool esr_is_ptrauth_trap(u32 esr)
342 u32 ec = ESR_ELx_EC(esr);
344 if (ec == ESR_ELx_EC_PAC)
347 if (ec != ESR_ELx_EC_SYS64)
350 switch (esr_sys64_to_sysreg(esr)) {
351 case SYS_APIAKEYLO_EL1:
352 case SYS_APIAKEYHI_EL1:
353 case SYS_APIBKEYLO_EL1:
354 case SYS_APIBKEYHI_EL1:
355 case SYS_APDAKEYLO_EL1:
356 case SYS_APDAKEYHI_EL1:
357 case SYS_APDBKEYLO_EL1:
358 case SYS_APDBKEYHI_EL1:
359 case SYS_APGAKEYLO_EL1:
360 case SYS_APGAKEYHI_EL1:
367 #define __ptrauth_save_key(ctxt, key) \
370 __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
371 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
372 __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
373 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
376 static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
378 struct kvm_cpu_context *ctxt;
381 if (!vcpu_has_ptrauth(vcpu) ||
382 !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
385 ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
386 __ptrauth_save_key(ctxt, APIA);
387 __ptrauth_save_key(ctxt, APIB);
388 __ptrauth_save_key(ctxt, APDA);
389 __ptrauth_save_key(ctxt, APDB);
390 __ptrauth_save_key(ctxt, APGA);
392 vcpu_ptrauth_enable(vcpu);
394 val = read_sysreg(hcr_el2);
395 val |= (HCR_API | HCR_APK);
396 write_sysreg(val, hcr_el2);
402 * Return true when we were able to fixup the guest exit and should return to
403 * the guest, false when we should restore the host state and return to the
406 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
408 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
409 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
412 * We're using the raw exception code in order to only process
413 * the trap if no SError is pending. We will come back to the
414 * same PC once the SError has been injected, and replay the
415 * trapping instruction.
417 if (*exit_code != ARM_EXCEPTION_TRAP)
420 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
421 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
422 handle_tx2_tvm(vcpu))
426 * We trap the first access to the FP/SIMD to save the host context
427 * and restore the guest context lazily.
428 * If FP/SIMD is not implemented, handle the trap and inject an
429 * undefined instruction exception to the guest.
430 * Similarly for trapped SVE accesses.
432 if (__hyp_handle_fpsimd(vcpu))
435 if (__hyp_handle_ptrauth(vcpu))
438 if (!__populate_fault_info(vcpu))
441 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
444 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
445 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
446 kvm_vcpu_dabt_isvalid(vcpu) &&
447 !kvm_vcpu_abt_issea(vcpu) &&
448 !kvm_vcpu_dabt_iss1tw(vcpu);
451 int ret = __vgic_v2_perform_cpuif_access(vcpu);
456 /* Promote an illegal access to an SError.*/
458 *exit_code = ARM_EXCEPTION_EL1_SERROR;
464 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
465 (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
466 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
467 int ret = __vgic_v3_perform_cpuif_access(vcpu);
474 /* Return to the host kernel and handle the exit */
478 static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
480 if (!cpus_have_final_cap(ARM64_SSBD))
483 return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
486 static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
488 #ifdef CONFIG_ARM64_SSBD
490 * The host runs with the workaround always present. If the
491 * guest wants it disabled, so be it...
493 if (__needs_ssbd_off(vcpu) &&
494 __hyp_this_cpu_read(arm64_ssbd_callback_required))
495 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
499 static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
501 #ifdef CONFIG_ARM64_SSBD
503 * If the guest has disabled the workaround, bring it back on.
505 if (__needs_ssbd_off(vcpu) &&
506 __hyp_this_cpu_read(arm64_ssbd_callback_required))
507 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
511 #endif /* __ARM64_KVM_HYP_SWITCH_H__ */