1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #ifndef __ARM64_KVM_HYP_SWITCH_H__
8 #define __ARM64_KVM_HYP_SWITCH_H__
10 #include <hyp/adjust_pc.h>
12 #include <linux/arm-smccc.h>
13 #include <linux/kvm_host.h>
14 #include <linux/types.h>
15 #include <linux/jump_label.h>
16 #include <uapi/linux/psci.h>
18 #include <kvm/arm_psci.h>
20 #include <asm/barrier.h>
21 #include <asm/cpufeature.h>
22 #include <asm/extable.h>
23 #include <asm/kprobes.h>
24 #include <asm/kvm_asm.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/fpsimd.h>
29 #include <asm/debug-monitors.h>
30 #include <asm/processor.h>
31 #include <asm/thread_info.h>
33 extern struct exception_table_entry __start___kvm_ex_table;
34 extern struct exception_table_entry __stop___kvm_ex_table;
36 /* Check whether the FP regs were dirtied while in the host-side run loop: */
37 static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
40 * When the system doesn't support FP/SIMD, we cannot rely on
41 * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
42 * abort on the very first access to FP and thus we should never
43 * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
46 if (!system_supports_fpsimd() ||
47 vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
48 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
51 return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
54 /* Save the 32-bit only FPSIMD system register state */
55 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
57 if (!vcpu_el1_is_32bit(vcpu))
60 __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
63 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
66 * We are about to set CPTR_EL2.TFP to trap all floating point
67 * register accesses to EL2, however, the ARM ARM clearly states that
68 * traps are only taken to EL2 if the operation would not otherwise
69 * trap to EL1. Therefore, always make sure that for 32-bit guests,
70 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
71 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
72 * it will cause an exception.
74 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
75 write_sysreg(1 << 30, fpexc32_el2);
80 static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
82 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
83 write_sysreg(1 << 15, hstr_el2);
86 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
87 * PMSELR_EL0 to make sure it never contains the cycle
88 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
89 * EL1 instead of being trapped to EL2.
91 if (kvm_arm_support_pmu_v3()) {
92 write_sysreg(0, pmselr_el0);
93 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
95 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
98 static inline void __deactivate_traps_common(void)
100 write_sysreg(0, hstr_el2);
101 if (kvm_arm_support_pmu_v3())
102 write_sysreg(0, pmuserenr_el0);
105 static inline void ___activate_traps(struct kvm_vcpu *vcpu)
107 u64 hcr = vcpu->arch.hcr_el2;
109 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
112 write_sysreg(hcr, hcr_el2);
114 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
115 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
118 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
121 * If we pended a virtual abort, preserve it until it gets
122 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
123 * the crucial bit is "On taking a vSError interrupt,
124 * HCR_EL2.VSE is cleared to 0."
126 if (vcpu->arch.hcr_el2 & HCR_VSE) {
127 vcpu->arch.hcr_el2 &= ~HCR_VSE;
128 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
132 static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
137 * Resolve the IPA the hard way using the guest VA.
139 * Stage-1 translation already validated the memory access
140 * rights. As such, we can use the EL1 translation regime, and
141 * don't have to distinguish between EL0 and EL1 access.
143 * We do need to save/restore PAR_EL1 though, as we haven't
144 * saved the guest context yet, and we may return early...
146 par = read_sysreg_par();
147 if (!__kvm_at("s1e1r", far))
148 tmp = read_sysreg_par();
150 tmp = SYS_PAR_EL1_F; /* back to the guest */
151 write_sysreg(par, par_el1);
153 if (unlikely(tmp & SYS_PAR_EL1_F))
154 return false; /* Translation failed, back to guest */
156 /* Convert PAR to HPFAR format */
157 *hpfar = PAR_TO_HPFAR(tmp);
161 static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
165 far = read_sysreg_el2(SYS_FAR);
168 * The HPFAR can be invalid if the stage 2 fault did not
169 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
170 * bit is clear) and one of the two following cases are true:
171 * 1. The fault was due to a permission fault
172 * 2. The processor carries errata 834220
174 * Therefore, for all non S1PTW faults where we either have a
175 * permission fault or the errata workaround is enabled, we
176 * resolve the IPA using the AT instruction.
178 if (!(esr & ESR_ELx_S1PTW) &&
179 (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
180 (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
181 if (!__translate_far_to_hpfar(far, &hpfar))
184 hpfar = read_sysreg(hpfar_el2);
187 fault->far_el2 = far;
188 fault->hpfar_el2 = hpfar;
192 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
197 esr = vcpu->arch.fault.esr_el2;
198 ec = ESR_ELx_EC(esr);
200 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
203 return __get_fault_info(esr, &vcpu->arch.fault);
206 static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu)
208 struct thread_struct *thread;
210 thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct,
213 __sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr);
216 static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
218 sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
219 __sve_restore_state(vcpu_sve_pffr(vcpu),
220 &vcpu->arch.ctxt.fp_regs.fpsr);
221 write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
224 /* Check for an FPSIMD/SVE trap and handle as appropriate */
225 static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
227 bool sve_guest, sve_host;
231 if (!system_supports_fpsimd())
234 if (system_supports_sve()) {
235 sve_guest = vcpu_has_sve(vcpu);
236 sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
242 esr_ec = kvm_vcpu_trap_get_class(vcpu);
243 if (esr_ec != ESR_ELx_EC_FP_ASIMD &&
244 esr_ec != ESR_ELx_EC_SVE)
247 /* Don't handle SVE traps for non-SVE vcpus here: */
248 if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD)
251 /* Valid trap. Switch the context: */
253 reg = CPACR_EL1_FPEN;
255 reg |= CPACR_EL1_ZEN;
257 sysreg_clear_set(cpacr_el1, 0, reg);
263 sysreg_clear_set(cptr_el2, reg, 0);
267 if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
269 __hyp_sve_save_host(vcpu);
271 __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
273 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
277 __hyp_sve_restore_guest(vcpu);
279 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
281 /* Skip restoring fpexc32 for AArch64 guests */
282 if (!(read_sysreg(hcr_el2) & HCR_RW))
283 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
285 vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
290 static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
292 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
293 int rt = kvm_vcpu_sys_get_rt(vcpu);
294 u64 val = vcpu_get_reg(vcpu, rt);
297 * The normal sysreg handling code expects to see the traps,
298 * let's not do anything here.
300 if (vcpu->arch.hcr_el2 & HCR_TVM)
305 write_sysreg_el1(val, SYS_SCTLR);
308 write_sysreg_el1(val, SYS_TTBR0);
311 write_sysreg_el1(val, SYS_TTBR1);
314 write_sysreg_el1(val, SYS_TCR);
317 write_sysreg_el1(val, SYS_ESR);
320 write_sysreg_el1(val, SYS_FAR);
323 write_sysreg_el1(val, SYS_AFSR0);
326 write_sysreg_el1(val, SYS_AFSR1);
329 write_sysreg_el1(val, SYS_MAIR);
332 write_sysreg_el1(val, SYS_AMAIR);
334 case SYS_CONTEXTIDR_EL1:
335 write_sysreg_el1(val, SYS_CONTEXTIDR);
341 __kvm_skip_instr(vcpu);
345 static inline bool esr_is_ptrauth_trap(u32 esr)
347 u32 ec = ESR_ELx_EC(esr);
349 if (ec == ESR_ELx_EC_PAC)
352 if (ec != ESR_ELx_EC_SYS64)
355 switch (esr_sys64_to_sysreg(esr)) {
356 case SYS_APIAKEYLO_EL1:
357 case SYS_APIAKEYHI_EL1:
358 case SYS_APIBKEYLO_EL1:
359 case SYS_APIBKEYHI_EL1:
360 case SYS_APDAKEYLO_EL1:
361 case SYS_APDAKEYHI_EL1:
362 case SYS_APDBKEYLO_EL1:
363 case SYS_APDBKEYHI_EL1:
364 case SYS_APGAKEYLO_EL1:
365 case SYS_APGAKEYHI_EL1:
372 #define __ptrauth_save_key(ctxt, key) \
375 __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
376 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
377 __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
378 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
381 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
383 static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
385 struct kvm_cpu_context *ctxt;
388 if (!vcpu_has_ptrauth(vcpu) ||
389 !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
392 ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
393 __ptrauth_save_key(ctxt, APIA);
394 __ptrauth_save_key(ctxt, APIB);
395 __ptrauth_save_key(ctxt, APDA);
396 __ptrauth_save_key(ctxt, APDB);
397 __ptrauth_save_key(ctxt, APGA);
399 vcpu_ptrauth_enable(vcpu);
401 val = read_sysreg(hcr_el2);
402 val |= (HCR_API | HCR_APK);
403 write_sysreg(val, hcr_el2);
409 * Return true when we were able to fixup the guest exit and should return to
410 * the guest, false when we should restore the host state and return to the
413 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
415 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
416 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
418 if (ARM_SERROR_PENDING(*exit_code)) {
419 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
422 * HVC already have an adjusted PC, which we need to
423 * correct in order to return to after having injected
426 * SMC, on the other hand, is *trapped*, meaning its
427 * preferred return address is the SMC itself.
429 if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
430 write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
434 * We're using the raw exception code in order to only process
435 * the trap if no SError is pending. We will come back to the
436 * same PC once the SError has been injected, and replay the
437 * trapping instruction.
439 if (*exit_code != ARM_EXCEPTION_TRAP)
442 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
443 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
444 handle_tx2_tvm(vcpu))
448 * We trap the first access to the FP/SIMD to save the host context
449 * and restore the guest context lazily.
450 * If FP/SIMD is not implemented, handle the trap and inject an
451 * undefined instruction exception to the guest.
452 * Similarly for trapped SVE accesses.
454 if (__hyp_handle_fpsimd(vcpu))
457 if (__hyp_handle_ptrauth(vcpu))
460 if (!__populate_fault_info(vcpu))
463 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
466 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
467 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
468 kvm_vcpu_dabt_isvalid(vcpu) &&
469 !kvm_vcpu_abt_issea(vcpu) &&
470 !kvm_vcpu_abt_iss1tw(vcpu);
473 int ret = __vgic_v2_perform_cpuif_access(vcpu);
478 /* Promote an illegal access to an SError.*/
480 *exit_code = ARM_EXCEPTION_EL1_SERROR;
486 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
487 (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
488 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
489 int ret = __vgic_v3_perform_cpuif_access(vcpu);
496 /* Return to the host kernel and handle the exit */
500 /* Re-enter the guest */
501 asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
505 static inline void __kvm_unexpected_el2_exception(void)
507 extern char __guest_exit_panic[];
508 unsigned long addr, fixup;
509 struct exception_table_entry *entry, *end;
510 unsigned long elr_el2 = read_sysreg(elr_el2);
512 entry = &__start___kvm_ex_table;
513 end = &__stop___kvm_ex_table;
515 while (entry < end) {
516 addr = (unsigned long)&entry->insn + entry->insn;
517 fixup = (unsigned long)&entry->fixup + entry->fixup;
519 if (addr != elr_el2) {
524 write_sysreg(fixup, elr_el2);
528 /* Trigger a panic after restoring the hyp context. */
529 write_sysreg(__guest_exit_panic, elr_el2);
532 #endif /* __ARM64_KVM_HYP_SWITCH_H__ */