1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <hyp/switch.h>
8 #include <hyp/sysreg-sr.h>
10 #include <linux/arm-smccc.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 #include <linux/jump_label.h>
14 #include <uapi/linux/psci.h>
16 #include <kvm/arm_psci.h>
18 #include <asm/barrier.h>
19 #include <asm/cpufeature.h>
20 #include <asm/kprobes.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
25 #include <asm/fpsimd.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/processor.h>
28 #include <asm/thread_info.h>
30 static void __activate_traps(struct kvm_vcpu *vcpu)
34 ___activate_traps(vcpu);
35 __activate_traps_common(vcpu);
37 val = CPTR_EL2_DEFAULT;
38 val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM;
39 if (!update_fp_enabled(vcpu)) {
41 __activate_traps_fpsimd32(vcpu);
44 write_sysreg(val, cptr_el2);
46 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
47 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
51 * At this stage, and thanks to the above isb(), S2 is
52 * configured and enabled. We can now restore the guest's S1
53 * configuration: SCTLR, and only then TCR.
55 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
57 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
61 static void __deactivate_traps(struct kvm_vcpu *vcpu)
65 ___deactivate_traps(vcpu);
67 mdcr_el2 = read_sysreg(mdcr_el2);
69 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
73 * Set the TCR and SCTLR registers in the exact opposite
74 * sequence as __activate_traps (first prevent walks,
75 * then force the MMU on). A generous sprinkling of isb()
76 * ensure that things happen in this exact order.
78 val = read_sysreg_el1(SYS_TCR);
79 write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
81 val = read_sysreg_el1(SYS_SCTLR);
82 write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
86 __deactivate_traps_common();
88 mdcr_el2 &= MDCR_EL2_HPMN_MASK;
89 mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
91 write_sysreg(mdcr_el2, mdcr_el2);
92 write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
93 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
96 static void __deactivate_vm(struct kvm_vcpu *vcpu)
98 write_sysreg(0, vttbr_el2);
101 /* Save VGICv3 state on non-VHE systems */
102 static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
104 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
105 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
106 __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
110 /* Restore VGICv3 state on non_VEH systems */
111 static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
113 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
114 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
115 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
120 * Disable host events, enable guest events
122 static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
124 struct kvm_host_data *host;
125 struct kvm_pmu_events *pmu;
127 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
128 pmu = &host->pmu_events;
130 if (pmu->events_host)
131 write_sysreg(pmu->events_host, pmcntenclr_el0);
133 if (pmu->events_guest)
134 write_sysreg(pmu->events_guest, pmcntenset_el0);
136 return (pmu->events_host || pmu->events_guest);
140 * Disable guest events, enable host events
142 static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
144 struct kvm_host_data *host;
145 struct kvm_pmu_events *pmu;
147 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
148 pmu = &host->pmu_events;
150 if (pmu->events_guest)
151 write_sysreg(pmu->events_guest, pmcntenclr_el0);
153 if (pmu->events_host)
154 write_sysreg(pmu->events_host, pmcntenset_el0);
157 /* Switch to the guest for legacy non-VHE systems */
158 int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
160 struct kvm_cpu_context *host_ctxt;
161 struct kvm_cpu_context *guest_ctxt;
162 bool pmu_switch_needed;
166 * Having IRQs masked via PMR when entering the guest means the GIC
167 * will not signal the CPU of interrupts of lower priority, and the
168 * only way to get out will be via guest exceptions.
169 * Naturally, we want to avoid this.
171 if (system_uses_irq_prio_masking()) {
172 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
176 vcpu = kern_hyp_va(vcpu);
178 host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
179 host_ctxt->__hyp_running_vcpu = vcpu;
180 guest_ctxt = &vcpu->arch.ctxt;
182 pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
184 __sysreg_save_state_nvhe(host_ctxt);
187 * We must restore the 32-bit state before the sysregs, thanks
188 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
190 * Also, and in order to be able to deal with erratum #1319537 (A57)
191 * and #1319367 (A72), we must ensure that all VM-related sysreg are
192 * restored before we enable S2 translation.
194 __sysreg32_restore_state(vcpu);
195 __sysreg_restore_state_nvhe(guest_ctxt);
197 __activate_vm(kern_hyp_va(vcpu->arch.hw_mmu));
198 __activate_traps(vcpu);
200 __hyp_vgic_restore_state(vcpu);
201 __timer_enable_traps(vcpu);
203 __debug_switch_to_guest(vcpu);
205 __set_guest_arch_workaround_state(vcpu);
208 /* Jump in the fire! */
209 exit_code = __guest_enter(vcpu, host_ctxt);
211 /* And we're baaack! */
212 } while (fixup_guest_exit(vcpu, &exit_code));
214 __set_host_arch_workaround_state(vcpu);
216 __sysreg_save_state_nvhe(guest_ctxt);
217 __sysreg32_save_state(vcpu);
218 __timer_disable_traps(vcpu);
219 __hyp_vgic_save_state(vcpu);
221 __deactivate_traps(vcpu);
222 __deactivate_vm(vcpu);
224 __sysreg_restore_state_nvhe(host_ctxt);
226 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
227 __fpsimd_save_fpexc32(vcpu);
230 * This must come after restoring the host sysregs, since a non-VHE
231 * system may enable SPE here and make use of the TTBRs.
233 __debug_switch_to_host(vcpu);
235 if (pmu_switch_needed)
236 __pmu_switch_to_host(host_ctxt);
238 /* Returning to host will clear PSR.I, remask PMR if needed */
239 if (system_uses_irq_prio_masking())
240 gic_write_pmr(GIC_PRIO_IRQOFF);
245 void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
247 u64 spsr = read_sysreg_el2(SYS_SPSR);
248 u64 elr = read_sysreg_el2(SYS_ELR);
249 u64 par = read_sysreg(par_el1);
250 struct kvm_vcpu *vcpu = host_ctxt->__hyp_running_vcpu;
251 unsigned long str_va;
253 if (read_sysreg(vttbr_el2)) {
254 __timer_disable_traps(vcpu);
255 __deactivate_traps(vcpu);
256 __deactivate_vm(vcpu);
257 __sysreg_restore_state_nvhe(host_ctxt);
261 * Force the panic string to be loaded from the literal pool,
262 * making sure it is a kernel address and not a PC-relative
265 asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
267 __hyp_do_panic(str_va,
269 read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR),
270 read_sysreg(hpfar_el2), par, vcpu);
274 asmlinkage void kvm_unexpected_el2_exception(void)
276 return __kvm_unexpected_el2_exception();