1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/arm-smccc.h>
8 #include <linux/kvm_host.h>
9 #include <linux/types.h>
10 #include <linux/jump_label.h>
11 #include <uapi/linux/psci.h>
13 #include <kvm/arm_psci.h>
15 #include <asm/barrier.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kprobes.h>
18 #include <asm/kvm_asm.h>
19 #include <asm/kvm_emulate.h>
20 #include <asm/kvm_host.h>
21 #include <asm/kvm_hyp.h>
22 #include <asm/kvm_mmu.h>
23 #include <asm/fpsimd.h>
24 #include <asm/debug-monitors.h>
25 #include <asm/processor.h>
26 #include <asm/thread_info.h>
28 /* Check whether the FP regs were dirtied while in the host-side run loop: */
29 static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
32 * When the system doesn't support FP/SIMD, we cannot rely on
33 * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
34 * abort on the very first access to FP and thus we should never
35 * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
38 if (!system_supports_fpsimd() ||
39 vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
40 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
43 return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
46 /* Save the 32-bit only FPSIMD system register state */
47 static void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
49 if (!vcpu_el1_is_32bit(vcpu))
52 vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
55 static void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
58 * We are about to set CPTR_EL2.TFP to trap all floating point
59 * register accesses to EL2, however, the ARM ARM clearly states that
60 * traps are only taken to EL2 if the operation would not otherwise
61 * trap to EL1. Therefore, always make sure that for 32-bit guests,
62 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
63 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
64 * it will cause an exception.
66 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
67 write_sysreg(1 << 30, fpexc32_el2);
72 static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
74 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
75 write_sysreg(1 << 15, hstr_el2);
78 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
79 * PMSELR_EL0 to make sure it never contains the cycle
80 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
81 * EL1 instead of being trapped to EL2.
83 write_sysreg(0, pmselr_el0);
84 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
85 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
88 static void __hyp_text __deactivate_traps_common(void)
90 write_sysreg(0, hstr_el2);
91 write_sysreg(0, pmuserenr_el0);
94 static void activate_traps_vhe(struct kvm_vcpu *vcpu)
98 val = read_sysreg(cpacr_el1);
100 val &= ~CPACR_EL1_ZEN;
103 * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
104 * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
105 * except for some missing controls, such as TAM.
106 * In this case, CPTR_EL2.TAM has the same position with or without
107 * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
108 * shift value for trapping the AMU accesses.
113 if (update_fp_enabled(vcpu)) {
114 if (vcpu_has_sve(vcpu))
115 val |= CPACR_EL1_ZEN;
117 val &= ~CPACR_EL1_FPEN;
118 __activate_traps_fpsimd32(vcpu);
121 write_sysreg(val, cpacr_el1);
123 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
125 NOKPROBE_SYMBOL(activate_traps_vhe);
127 static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
131 __activate_traps_common(vcpu);
133 val = CPTR_EL2_DEFAULT;
134 val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM;
135 if (!update_fp_enabled(vcpu)) {
137 __activate_traps_fpsimd32(vcpu);
140 write_sysreg(val, cptr_el2);
142 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
143 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
147 * At this stage, and thanks to the above isb(), S2 is
148 * configured and enabled. We can now restore the guest's S1
149 * configuration: SCTLR, and only then TCR.
151 write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
153 write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
157 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
159 u64 hcr = vcpu->arch.hcr_el2;
161 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
164 write_sysreg(hcr, hcr_el2);
166 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
167 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
170 activate_traps_vhe(vcpu);
172 __activate_traps_nvhe(vcpu);
175 static void deactivate_traps_vhe(void)
177 extern char vectors[]; /* kernel exception vectors */
178 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
181 * ARM errata 1165522 and 1530923 require the actual execution of the
182 * above before we can switch to the EL2/EL0 translation regime used by
185 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
187 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
188 write_sysreg(vectors, vbar_el1);
190 NOKPROBE_SYMBOL(deactivate_traps_vhe);
192 static void __hyp_text __deactivate_traps_nvhe(void)
194 u64 mdcr_el2 = read_sysreg(mdcr_el2);
196 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
200 * Set the TCR and SCTLR registers in the exact opposite
201 * sequence as __activate_traps_nvhe (first prevent walks,
202 * then force the MMU on). A generous sprinkling of isb()
203 * ensure that things happen in this exact order.
205 val = read_sysreg_el1(SYS_TCR);
206 write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
208 val = read_sysreg_el1(SYS_SCTLR);
209 write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
213 __deactivate_traps_common();
215 mdcr_el2 &= MDCR_EL2_HPMN_MASK;
216 mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
218 write_sysreg(mdcr_el2, mdcr_el2);
219 write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
220 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
223 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
226 * If we pended a virtual abort, preserve it until it gets
227 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
228 * the crucial bit is "On taking a vSError interrupt,
229 * HCR_EL2.VSE is cleared to 0."
231 if (vcpu->arch.hcr_el2 & HCR_VSE) {
232 vcpu->arch.hcr_el2 &= ~HCR_VSE;
233 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
237 deactivate_traps_vhe();
239 __deactivate_traps_nvhe();
242 void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
244 __activate_traps_common(vcpu);
247 void deactivate_traps_vhe_put(void)
249 u64 mdcr_el2 = read_sysreg(mdcr_el2);
251 mdcr_el2 &= MDCR_EL2_HPMN_MASK |
252 MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
255 write_sysreg(mdcr_el2, mdcr_el2);
257 __deactivate_traps_common();
260 static void __hyp_text __activate_vm(struct kvm *kvm)
262 __load_guest_stage2(kvm);
265 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
267 write_sysreg(0, vttbr_el2);
270 /* Save VGICv3 state on non-VHE systems */
271 static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
273 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
274 __vgic_v3_save_state(vcpu);
275 __vgic_v3_deactivate_traps(vcpu);
279 /* Restore VGICv3 state on non_VEH systems */
280 static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
282 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
283 __vgic_v3_activate_traps(vcpu);
284 __vgic_v3_restore_state(vcpu);
288 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
293 * Resolve the IPA the hard way using the guest VA.
295 * Stage-1 translation already validated the memory access
296 * rights. As such, we can use the EL1 translation regime, and
297 * don't have to distinguish between EL0 and EL1 access.
299 * We do need to save/restore PAR_EL1 though, as we haven't
300 * saved the guest context yet, and we may return early...
302 par = read_sysreg(par_el1);
303 asm volatile("at s1e1r, %0" : : "r" (far));
306 tmp = read_sysreg(par_el1);
307 write_sysreg(par, par_el1);
309 if (unlikely(tmp & SYS_PAR_EL1_F))
310 return false; /* Translation failed, back to guest */
312 /* Convert PAR to HPFAR format */
313 *hpfar = PAR_TO_HPFAR(tmp);
317 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
323 esr = vcpu->arch.fault.esr_el2;
324 ec = ESR_ELx_EC(esr);
326 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
329 far = read_sysreg_el2(SYS_FAR);
332 * The HPFAR can be invalid if the stage 2 fault did not
333 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
334 * bit is clear) and one of the two following cases are true:
335 * 1. The fault was due to a permission fault
336 * 2. The processor carries errata 834220
338 * Therefore, for all non S1PTW faults where we either have a
339 * permission fault or the errata workaround is enabled, we
340 * resolve the IPA using the AT instruction.
342 if (!(esr & ESR_ELx_S1PTW) &&
343 (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
344 (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
345 if (!__translate_far_to_hpfar(far, &hpfar))
348 hpfar = read_sysreg(hpfar_el2);
351 vcpu->arch.fault.far_el2 = far;
352 vcpu->arch.fault.hpfar_el2 = hpfar;
356 /* Check for an FPSIMD/SVE trap and handle as appropriate */
357 static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
359 bool vhe, sve_guest, sve_host;
362 if (!system_supports_fpsimd())
365 if (system_supports_sve()) {
366 sve_guest = vcpu_has_sve(vcpu);
367 sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
375 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
376 if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
377 hsr_ec != ESR_ELx_EC_SVE)
380 /* Don't handle SVE traps for non-SVE vcpus here: */
382 if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
385 /* Valid trap. Switch the context: */
388 u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
391 reg |= CPACR_EL1_ZEN;
393 write_sysreg(reg, cpacr_el1);
395 write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
401 if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
403 * In the SVE case, VHE is assumed: it is enforced by
404 * Kconfig and kvm_arch_init().
407 struct thread_struct *thread = container_of(
408 vcpu->arch.host_fpsimd_state,
409 struct thread_struct, uw.fpsimd_state);
411 sve_save_state(sve_pffr(thread),
412 &vcpu->arch.host_fpsimd_state->fpsr);
414 __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
417 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
421 sve_load_state(vcpu_sve_pffr(vcpu),
422 &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
423 sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
424 write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12);
426 __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
429 /* Skip restoring fpexc32 for AArch64 guests */
430 if (!(read_sysreg(hcr_el2) & HCR_RW))
431 write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
434 vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
439 static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
441 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
442 int rt = kvm_vcpu_sys_get_rt(vcpu);
443 u64 val = vcpu_get_reg(vcpu, rt);
446 * The normal sysreg handling code expects to see the traps,
447 * let's not do anything here.
449 if (vcpu->arch.hcr_el2 & HCR_TVM)
454 write_sysreg_el1(val, SYS_SCTLR);
457 write_sysreg_el1(val, SYS_TTBR0);
460 write_sysreg_el1(val, SYS_TTBR1);
463 write_sysreg_el1(val, SYS_TCR);
466 write_sysreg_el1(val, SYS_ESR);
469 write_sysreg_el1(val, SYS_FAR);
472 write_sysreg_el1(val, SYS_AFSR0);
475 write_sysreg_el1(val, SYS_AFSR1);
478 write_sysreg_el1(val, SYS_MAIR);
481 write_sysreg_el1(val, SYS_AMAIR);
483 case SYS_CONTEXTIDR_EL1:
484 write_sysreg_el1(val, SYS_CONTEXTIDR);
490 __kvm_skip_instr(vcpu);
495 * Return true when we were able to fixup the guest exit and should return to
496 * the guest, false when we should restore the host state and return to the
499 static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
501 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
502 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
505 * We're using the raw exception code in order to only process
506 * the trap if no SError is pending. We will come back to the
507 * same PC once the SError has been injected, and replay the
508 * trapping instruction.
510 if (*exit_code != ARM_EXCEPTION_TRAP)
513 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
514 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
515 handle_tx2_tvm(vcpu))
519 * We trap the first access to the FP/SIMD to save the host context
520 * and restore the guest context lazily.
521 * If FP/SIMD is not implemented, handle the trap and inject an
522 * undefined instruction exception to the guest.
523 * Similarly for trapped SVE accesses.
525 if (__hyp_handle_fpsimd(vcpu))
528 if (!__populate_fault_info(vcpu))
531 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
534 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
535 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
536 kvm_vcpu_dabt_isvalid(vcpu) &&
537 !kvm_vcpu_dabt_isextabt(vcpu) &&
538 !kvm_vcpu_dabt_iss1tw(vcpu);
541 int ret = __vgic_v2_perform_cpuif_access(vcpu);
546 /* Promote an illegal access to an SError.*/
548 *exit_code = ARM_EXCEPTION_EL1_SERROR;
554 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
555 (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
556 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
557 int ret = __vgic_v3_perform_cpuif_access(vcpu);
564 /* Return to the host kernel and handle the exit */
568 static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
570 if (!cpus_have_final_cap(ARM64_SSBD))
573 return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
576 static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
578 #ifdef CONFIG_ARM64_SSBD
580 * The host runs with the workaround always present. If the
581 * guest wants it disabled, so be it...
583 if (__needs_ssbd_off(vcpu) &&
584 __hyp_this_cpu_read(arm64_ssbd_callback_required))
585 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
589 static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
591 #ifdef CONFIG_ARM64_SSBD
593 * If the guest has disabled the workaround, bring it back on.
595 if (__needs_ssbd_off(vcpu) &&
596 __hyp_this_cpu_read(arm64_ssbd_callback_required))
597 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
602 * Disable host events, enable guest events
604 static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
606 struct kvm_host_data *host;
607 struct kvm_pmu_events *pmu;
609 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
610 pmu = &host->pmu_events;
612 if (pmu->events_host)
613 write_sysreg(pmu->events_host, pmcntenclr_el0);
615 if (pmu->events_guest)
616 write_sysreg(pmu->events_guest, pmcntenset_el0);
618 return (pmu->events_host || pmu->events_guest);
622 * Disable guest events, enable host events
624 static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
626 struct kvm_host_data *host;
627 struct kvm_pmu_events *pmu;
629 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
630 pmu = &host->pmu_events;
632 if (pmu->events_guest)
633 write_sysreg(pmu->events_guest, pmcntenclr_el0);
635 if (pmu->events_host)
636 write_sysreg(pmu->events_host, pmcntenset_el0);
639 /* Switch to the guest for VHE systems running in EL2 */
640 static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
642 struct kvm_cpu_context *host_ctxt;
643 struct kvm_cpu_context *guest_ctxt;
646 host_ctxt = vcpu->arch.host_cpu_context;
647 host_ctxt->__hyp_running_vcpu = vcpu;
648 guest_ctxt = &vcpu->arch.ctxt;
650 sysreg_save_host_state_vhe(host_ctxt);
653 * ARM erratum 1165522 requires us to configure both stage 1 and
654 * stage 2 translation for the guest context before we clear
657 * We have already configured the guest's stage 1 translation in
658 * kvm_vcpu_load_sysregs above. We must now call __activate_vm
659 * before __activate_traps, because __activate_vm configures
660 * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
661 * (among other things).
663 __activate_vm(vcpu->kvm);
664 __activate_traps(vcpu);
666 sysreg_restore_guest_state_vhe(guest_ctxt);
667 __debug_switch_to_guest(vcpu);
669 __set_guest_arch_workaround_state(vcpu);
672 /* Jump in the fire! */
673 exit_code = __guest_enter(vcpu, host_ctxt);
675 /* And we're baaack! */
676 } while (fixup_guest_exit(vcpu, &exit_code));
678 __set_host_arch_workaround_state(vcpu);
680 sysreg_save_guest_state_vhe(guest_ctxt);
682 __deactivate_traps(vcpu);
684 sysreg_restore_host_state_vhe(host_ctxt);
686 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
687 __fpsimd_save_fpexc32(vcpu);
689 __debug_switch_to_host(vcpu);
693 NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
695 int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
702 * Having IRQs masked via PMR when entering the guest means the GIC
703 * will not signal the CPU of interrupts of lower priority, and the
704 * only way to get out will be via guest exceptions.
705 * Naturally, we want to avoid this.
707 * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
708 * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
712 ret = __kvm_vcpu_run_vhe(vcpu);
715 * local_daif_restore() takes care to properly restore PSTATE.DAIF
716 * and the GIC PMR if the host is using IRQ priorities.
718 local_daif_restore(DAIF_PROCCTX_NOIRQ);
721 * When we exit from the guest we change a number of CPU configuration
722 * parameters, such as traps. Make sure these changes take effect
723 * before running the host or additional guests.
730 /* Switch to the guest for legacy non-VHE systems */
731 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
733 struct kvm_cpu_context *host_ctxt;
734 struct kvm_cpu_context *guest_ctxt;
735 bool pmu_switch_needed;
739 * Having IRQs masked via PMR when entering the guest means the GIC
740 * will not signal the CPU of interrupts of lower priority, and the
741 * only way to get out will be via guest exceptions.
742 * Naturally, we want to avoid this.
744 if (system_uses_irq_prio_masking()) {
745 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
749 vcpu = kern_hyp_va(vcpu);
751 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
752 host_ctxt->__hyp_running_vcpu = vcpu;
753 guest_ctxt = &vcpu->arch.ctxt;
755 pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
757 __sysreg_save_state_nvhe(host_ctxt);
760 * We must restore the 32-bit state before the sysregs, thanks
761 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
763 * Also, and in order to be able to deal with erratum #1319537 (A57)
764 * and #1319367 (A72), we must ensure that all VM-related sysreg are
765 * restored before we enable S2 translation.
767 __sysreg32_restore_state(vcpu);
768 __sysreg_restore_state_nvhe(guest_ctxt);
770 __activate_vm(kern_hyp_va(vcpu->kvm));
771 __activate_traps(vcpu);
773 __hyp_vgic_restore_state(vcpu);
774 __timer_enable_traps(vcpu);
776 __debug_switch_to_guest(vcpu);
778 __set_guest_arch_workaround_state(vcpu);
781 /* Jump in the fire! */
782 exit_code = __guest_enter(vcpu, host_ctxt);
784 /* And we're baaack! */
785 } while (fixup_guest_exit(vcpu, &exit_code));
787 __set_host_arch_workaround_state(vcpu);
789 __sysreg_save_state_nvhe(guest_ctxt);
790 __sysreg32_save_state(vcpu);
791 __timer_disable_traps(vcpu);
792 __hyp_vgic_save_state(vcpu);
794 __deactivate_traps(vcpu);
795 __deactivate_vm(vcpu);
797 __sysreg_restore_state_nvhe(host_ctxt);
799 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
800 __fpsimd_save_fpexc32(vcpu);
803 * This must come after restoring the host sysregs, since a non-VHE
804 * system may enable SPE here and make use of the TTBRs.
806 __debug_switch_to_host(vcpu);
808 if (pmu_switch_needed)
809 __pmu_switch_to_host(host_ctxt);
811 /* Returning to host will clear PSR.I, remask PMR if needed */
812 if (system_uses_irq_prio_masking())
813 gic_write_pmr(GIC_PRIO_IRQOFF);
818 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
820 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
821 struct kvm_cpu_context *__host_ctxt)
823 struct kvm_vcpu *vcpu;
824 unsigned long str_va;
826 vcpu = __host_ctxt->__hyp_running_vcpu;
828 if (read_sysreg(vttbr_el2)) {
829 __timer_disable_traps(vcpu);
830 __deactivate_traps(vcpu);
831 __deactivate_vm(vcpu);
832 __sysreg_restore_state_nvhe(__host_ctxt);
836 * Force the panic string to be loaded from the literal pool,
837 * making sure it is a kernel address and not a PC-relative
840 asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
842 __hyp_do_panic(str_va,
844 read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR),
845 read_sysreg(hpfar_el2), par, vcpu);
848 static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
849 struct kvm_cpu_context *host_ctxt)
851 struct kvm_vcpu *vcpu;
852 vcpu = host_ctxt->__hyp_running_vcpu;
854 __deactivate_traps(vcpu);
855 sysreg_restore_host_state_vhe(host_ctxt);
857 panic(__hyp_panic_string,
859 read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
860 read_sysreg(hpfar_el2), par, vcpu);
862 NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
864 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
866 u64 spsr = read_sysreg_el2(SYS_SPSR);
867 u64 elr = read_sysreg_el2(SYS_ELR);
868 u64 par = read_sysreg(par_el1);
871 __hyp_call_panic_nvhe(spsr, elr, par, host_ctxt);
873 __hyp_call_panic_vhe(spsr, elr, par, host_ctxt);