1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #ifndef __ARM64_KVM_HYP_SWITCH_H__
8 #define __ARM64_KVM_HYP_SWITCH_H__
10 #include <hyp/adjust_pc.h>
12 #include <linux/arm-smccc.h>
13 #include <linux/kvm_host.h>
14 #include <linux/types.h>
15 #include <linux/jump_label.h>
16 #include <uapi/linux/psci.h>
18 #include <kvm/arm_psci.h>
20 #include <asm/barrier.h>
21 #include <asm/cpufeature.h>
22 #include <asm/extable.h>
23 #include <asm/kprobes.h>
24 #include <asm/kvm_asm.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/fpsimd.h>
29 #include <asm/debug-monitors.h>
30 #include <asm/processor.h>
31 #include <asm/thread_info.h>
33 extern const char __hyp_panic_string[];
35 extern struct exception_table_entry __start___kvm_ex_table;
36 extern struct exception_table_entry __stop___kvm_ex_table;
38 /* Check whether the FP regs were dirtied while in the host-side run loop: */
39 static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
42 * When the system doesn't support FP/SIMD, we cannot rely on
43 * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
44 * abort on the very first access to FP and thus we should never
45 * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
48 if (!system_supports_fpsimd() ||
49 vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
50 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
53 return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
56 /* Save the 32-bit only FPSIMD system register state */
57 static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
59 if (!vcpu_el1_is_32bit(vcpu))
62 __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
65 static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
68 * We are about to set CPTR_EL2.TFP to trap all floating point
69 * register accesses to EL2, however, the ARM ARM clearly states that
70 * traps are only taken to EL2 if the operation would not otherwise
71 * trap to EL1. Therefore, always make sure that for 32-bit guests,
72 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
73 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
74 * it will cause an exception.
76 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
77 write_sysreg(1 << 30, fpexc32_el2);
82 static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
84 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
85 write_sysreg(1 << 15, hstr_el2);
88 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
89 * PMSELR_EL0 to make sure it never contains the cycle
90 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
91 * EL1 instead of being trapped to EL2.
93 write_sysreg(0, pmselr_el0);
94 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
95 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
98 static inline void __deactivate_traps_common(void)
100 write_sysreg(0, hstr_el2);
101 write_sysreg(0, pmuserenr_el0);
104 static inline void ___activate_traps(struct kvm_vcpu *vcpu)
106 u64 hcr = vcpu->arch.hcr_el2;
108 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
111 write_sysreg(hcr, hcr_el2);
113 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
114 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
117 static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
120 * If we pended a virtual abort, preserve it until it gets
121 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
122 * the crucial bit is "On taking a vSError interrupt,
123 * HCR_EL2.VSE is cleared to 0."
125 if (vcpu->arch.hcr_el2 & HCR_VSE) {
126 vcpu->arch.hcr_el2 &= ~HCR_VSE;
127 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
131 static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
136 * Resolve the IPA the hard way using the guest VA.
138 * Stage-1 translation already validated the memory access
139 * rights. As such, we can use the EL1 translation regime, and
140 * don't have to distinguish between EL0 and EL1 access.
142 * We do need to save/restore PAR_EL1 though, as we haven't
143 * saved the guest context yet, and we may return early...
145 par = read_sysreg_par();
146 if (!__kvm_at("s1e1r", far))
147 tmp = read_sysreg_par();
149 tmp = SYS_PAR_EL1_F; /* back to the guest */
150 write_sysreg(par, par_el1);
152 if (unlikely(tmp & SYS_PAR_EL1_F))
153 return false; /* Translation failed, back to guest */
155 /* Convert PAR to HPFAR format */
156 *hpfar = PAR_TO_HPFAR(tmp);
160 static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
166 esr = vcpu->arch.fault.esr_el2;
167 ec = ESR_ELx_EC(esr);
169 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
172 far = read_sysreg_el2(SYS_FAR);
175 * The HPFAR can be invalid if the stage 2 fault did not
176 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
177 * bit is clear) and one of the two following cases are true:
178 * 1. The fault was due to a permission fault
179 * 2. The processor carries errata 834220
181 * Therefore, for all non S1PTW faults where we either have a
182 * permission fault or the errata workaround is enabled, we
183 * resolve the IPA using the AT instruction.
185 if (!(esr & ESR_ELx_S1PTW) &&
186 (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
187 (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
188 if (!__translate_far_to_hpfar(far, &hpfar))
191 hpfar = read_sysreg(hpfar_el2);
194 vcpu->arch.fault.far_el2 = far;
195 vcpu->arch.fault.hpfar_el2 = hpfar;
199 /* Check for an FPSIMD/SVE trap and handle as appropriate */
200 static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
202 bool vhe, sve_guest, sve_host;
205 if (!system_supports_fpsimd())
209 * Currently system_supports_sve() currently implies has_vhe(),
210 * so the check is redundant. However, has_vhe() can be determined
211 * statically and helps the compiler remove dead code.
213 if (has_vhe() && system_supports_sve()) {
214 sve_guest = vcpu_has_sve(vcpu);
215 sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
223 esr_ec = kvm_vcpu_trap_get_class(vcpu);
224 if (esr_ec != ESR_ELx_EC_FP_ASIMD &&
225 esr_ec != ESR_ELx_EC_SVE)
228 /* Don't handle SVE traps for non-SVE vcpus here: */
230 if (esr_ec != ESR_ELx_EC_FP_ASIMD)
233 /* Valid trap. Switch the context: */
236 u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
239 reg |= CPACR_EL1_ZEN;
241 write_sysreg(reg, cpacr_el1);
243 write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
249 if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
251 * In the SVE case, VHE is assumed: it is enforced by
252 * Kconfig and kvm_arch_init().
255 struct thread_struct *thread = container_of(
256 vcpu->arch.host_fpsimd_state,
257 struct thread_struct, uw.fpsimd_state);
259 sve_save_state(sve_pffr(thread),
260 &vcpu->arch.host_fpsimd_state->fpsr);
262 __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
265 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
269 sve_load_state(vcpu_sve_pffr(vcpu),
270 &vcpu->arch.ctxt.fp_regs.fpsr,
271 sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
272 write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
274 __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
277 /* Skip restoring fpexc32 for AArch64 guests */
278 if (!(read_sysreg(hcr_el2) & HCR_RW))
279 write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
281 vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
286 static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
288 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
289 int rt = kvm_vcpu_sys_get_rt(vcpu);
290 u64 val = vcpu_get_reg(vcpu, rt);
293 * The normal sysreg handling code expects to see the traps,
294 * let's not do anything here.
296 if (vcpu->arch.hcr_el2 & HCR_TVM)
301 write_sysreg_el1(val, SYS_SCTLR);
304 write_sysreg_el1(val, SYS_TTBR0);
307 write_sysreg_el1(val, SYS_TTBR1);
310 write_sysreg_el1(val, SYS_TCR);
313 write_sysreg_el1(val, SYS_ESR);
316 write_sysreg_el1(val, SYS_FAR);
319 write_sysreg_el1(val, SYS_AFSR0);
322 write_sysreg_el1(val, SYS_AFSR1);
325 write_sysreg_el1(val, SYS_MAIR);
328 write_sysreg_el1(val, SYS_AMAIR);
330 case SYS_CONTEXTIDR_EL1:
331 write_sysreg_el1(val, SYS_CONTEXTIDR);
337 __kvm_skip_instr(vcpu);
341 static inline bool esr_is_ptrauth_trap(u32 esr)
343 u32 ec = ESR_ELx_EC(esr);
345 if (ec == ESR_ELx_EC_PAC)
348 if (ec != ESR_ELx_EC_SYS64)
351 switch (esr_sys64_to_sysreg(esr)) {
352 case SYS_APIAKEYLO_EL1:
353 case SYS_APIAKEYHI_EL1:
354 case SYS_APIBKEYLO_EL1:
355 case SYS_APIBKEYHI_EL1:
356 case SYS_APDAKEYLO_EL1:
357 case SYS_APDAKEYHI_EL1:
358 case SYS_APDBKEYLO_EL1:
359 case SYS_APDBKEYHI_EL1:
360 case SYS_APGAKEYLO_EL1:
361 case SYS_APGAKEYHI_EL1:
368 #define __ptrauth_save_key(ctxt, key) \
371 __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
372 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
373 __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
374 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
377 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
379 static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
381 struct kvm_cpu_context *ctxt;
384 if (!vcpu_has_ptrauth(vcpu) ||
385 !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
388 ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
389 __ptrauth_save_key(ctxt, APIA);
390 __ptrauth_save_key(ctxt, APIB);
391 __ptrauth_save_key(ctxt, APDA);
392 __ptrauth_save_key(ctxt, APDB);
393 __ptrauth_save_key(ctxt, APGA);
395 vcpu_ptrauth_enable(vcpu);
397 val = read_sysreg(hcr_el2);
398 val |= (HCR_API | HCR_APK);
399 write_sysreg(val, hcr_el2);
405 * Return true when we were able to fixup the guest exit and should return to
406 * the guest, false when we should restore the host state and return to the
409 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
411 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
412 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
414 if (ARM_SERROR_PENDING(*exit_code)) {
415 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
418 * HVC already have an adjusted PC, which we need to
419 * correct in order to return to after having injected
422 * SMC, on the other hand, is *trapped*, meaning its
423 * preferred return address is the SMC itself.
425 if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
426 write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR);
430 * We're using the raw exception code in order to only process
431 * the trap if no SError is pending. We will come back to the
432 * same PC once the SError has been injected, and replay the
433 * trapping instruction.
435 if (*exit_code != ARM_EXCEPTION_TRAP)
438 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
439 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
440 handle_tx2_tvm(vcpu))
444 * We trap the first access to the FP/SIMD to save the host context
445 * and restore the guest context lazily.
446 * If FP/SIMD is not implemented, handle the trap and inject an
447 * undefined instruction exception to the guest.
448 * Similarly for trapped SVE accesses.
450 if (__hyp_handle_fpsimd(vcpu))
453 if (__hyp_handle_ptrauth(vcpu))
456 if (!__populate_fault_info(vcpu))
459 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
462 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
463 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
464 kvm_vcpu_dabt_isvalid(vcpu) &&
465 !kvm_vcpu_abt_issea(vcpu) &&
466 !kvm_vcpu_abt_iss1tw(vcpu);
469 int ret = __vgic_v2_perform_cpuif_access(vcpu);
474 /* Promote an illegal access to an SError.*/
476 *exit_code = ARM_EXCEPTION_EL1_SERROR;
482 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
483 (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
484 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
485 int ret = __vgic_v3_perform_cpuif_access(vcpu);
492 /* Return to the host kernel and handle the exit */
496 /* Re-enter the guest */
497 asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412));
501 static inline void __kvm_unexpected_el2_exception(void)
503 extern char __guest_exit_panic[];
504 unsigned long addr, fixup;
505 struct exception_table_entry *entry, *end;
506 unsigned long elr_el2 = read_sysreg(elr_el2);
508 entry = hyp_symbol_addr(__start___kvm_ex_table);
509 end = hyp_symbol_addr(__stop___kvm_ex_table);
511 while (entry < end) {
512 addr = (unsigned long)&entry->insn + entry->insn;
513 fixup = (unsigned long)&entry->fixup + entry->fixup;
515 if (addr != elr_el2) {
520 write_sysreg(fixup, elr_el2);
524 /* Trigger a panic after restoring the hyp context. */
525 write_sysreg(__guest_exit_panic, elr_el2);
528 #endif /* __ARM64_KVM_HYP_SWITCH_H__ */