1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <hyp/adjust_pc.h>
8 #include <hyp/switch.h>
9 #include <hyp/sysreg-sr.h>
11 #include <linux/arm-smccc.h>
12 #include <linux/kvm_host.h>
13 #include <linux/types.h>
14 #include <linux/jump_label.h>
15 #include <uapi/linux/psci.h>
17 #include <kvm/arm_psci.h>
19 #include <asm/barrier.h>
20 #include <asm/cpufeature.h>
21 #include <asm/kprobes.h>
22 #include <asm/kvm_asm.h>
23 #include <asm/kvm_emulate.h>
24 #include <asm/kvm_hyp.h>
25 #include <asm/kvm_mmu.h>
26 #include <asm/fpsimd.h>
27 #include <asm/debug-monitors.h>
28 #include <asm/processor.h>
29 #include <asm/thread_info.h>
31 /* Non-VHE specific context */
32 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
33 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
34 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
36 static void __activate_traps(struct kvm_vcpu *vcpu)
40 ___activate_traps(vcpu);
41 __activate_traps_common(vcpu);
43 val = CPTR_EL2_DEFAULT;
44 val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM;
45 if (!update_fp_enabled(vcpu)) {
47 __activate_traps_fpsimd32(vcpu);
50 write_sysreg(val, cptr_el2);
51 write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
53 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
54 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
58 * At this stage, and thanks to the above isb(), S2 is
59 * configured and enabled. We can now restore the guest's S1
60 * configuration: SCTLR, and only then TCR.
62 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
64 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
68 static void __deactivate_traps(struct kvm_vcpu *vcpu)
70 extern char __kvm_hyp_host_vector[];
73 ___deactivate_traps(vcpu);
75 mdcr_el2 = read_sysreg(mdcr_el2);
77 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
81 * Set the TCR and SCTLR registers in the exact opposite
82 * sequence as __activate_traps (first prevent walks,
83 * then force the MMU on). A generous sprinkling of isb()
84 * ensure that things happen in this exact order.
86 val = read_sysreg_el1(SYS_TCR);
87 write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
89 val = read_sysreg_el1(SYS_SCTLR);
90 write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
94 __deactivate_traps_common();
96 mdcr_el2 &= MDCR_EL2_HPMN_MASK;
97 mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
98 mdcr_el2 |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
100 write_sysreg(mdcr_el2, mdcr_el2);
101 if (is_protected_kvm_enabled())
102 write_sysreg(HCR_HOST_NVHE_PROTECTED_FLAGS, hcr_el2);
104 write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
105 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
106 write_sysreg(__kvm_hyp_host_vector, vbar_el2);
109 static void __load_host_stage2(void)
111 write_sysreg(0, vttbr_el2);
114 /* Save VGICv3 state on non-VHE systems */
115 static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
117 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
118 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
119 __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
123 /* Restore VGICv3 state on non_VEH systems */
124 static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
126 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
127 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
128 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
133 * Disable host events, enable guest events
135 static bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
137 struct kvm_host_data *host;
138 struct kvm_pmu_events *pmu;
140 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
141 pmu = &host->pmu_events;
143 if (pmu->events_host)
144 write_sysreg(pmu->events_host, pmcntenclr_el0);
146 if (pmu->events_guest)
147 write_sysreg(pmu->events_guest, pmcntenset_el0);
149 return (pmu->events_host || pmu->events_guest);
153 * Disable guest events, enable host events
155 static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
157 struct kvm_host_data *host;
158 struct kvm_pmu_events *pmu;
160 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
161 pmu = &host->pmu_events;
163 if (pmu->events_guest)
164 write_sysreg(pmu->events_guest, pmcntenclr_el0);
166 if (pmu->events_host)
167 write_sysreg(pmu->events_host, pmcntenset_el0);
170 /* Switch to the guest for legacy non-VHE systems */
171 int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
173 struct kvm_cpu_context *host_ctxt;
174 struct kvm_cpu_context *guest_ctxt;
175 bool pmu_switch_needed;
179 * Having IRQs masked via PMR when entering the guest means the GIC
180 * will not signal the CPU of interrupts of lower priority, and the
181 * only way to get out will be via guest exceptions.
182 * Naturally, we want to avoid this.
184 if (system_uses_irq_prio_masking()) {
185 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
189 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
190 host_ctxt->__hyp_running_vcpu = vcpu;
191 guest_ctxt = &vcpu->arch.ctxt;
193 pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
195 __sysreg_save_state_nvhe(host_ctxt);
197 * We must flush and disable the SPE buffer for nVHE, as
198 * the translation regime(EL1&0) is going to be loaded with
199 * that of the guest. And we must do this before we change the
200 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
201 * before we load guest Stage1.
203 __debug_save_host_buffers_nvhe(vcpu);
208 * We must restore the 32-bit state before the sysregs, thanks
209 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
211 * Also, and in order to be able to deal with erratum #1319537 (A57)
212 * and #1319367 (A72), we must ensure that all VM-related sysreg are
213 * restored before we enable S2 translation.
215 __sysreg32_restore_state(vcpu);
216 __sysreg_restore_state_nvhe(guest_ctxt);
218 __load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
219 __activate_traps(vcpu);
221 __hyp_vgic_restore_state(vcpu);
222 __timer_enable_traps(vcpu);
224 __debug_switch_to_guest(vcpu);
227 /* Jump in the fire! */
228 exit_code = __guest_enter(vcpu);
230 /* And we're baaack! */
231 } while (fixup_guest_exit(vcpu, &exit_code));
233 __sysreg_save_state_nvhe(guest_ctxt);
234 __sysreg32_save_state(vcpu);
235 __timer_disable_traps(vcpu);
236 __hyp_vgic_save_state(vcpu);
238 __deactivate_traps(vcpu);
239 __load_host_stage2();
241 __sysreg_restore_state_nvhe(host_ctxt);
243 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
244 __fpsimd_save_fpexc32(vcpu);
246 __debug_switch_to_host(vcpu);
248 * This must come after restoring the host sysregs, since a non-VHE
249 * system may enable SPE here and make use of the TTBRs.
251 __debug_restore_host_buffers_nvhe(vcpu);
253 if (pmu_switch_needed)
254 __pmu_switch_to_host(host_ctxt);
256 /* Returning to host will clear PSR.I, remask PMR if needed */
257 if (system_uses_irq_prio_masking())
258 gic_write_pmr(GIC_PRIO_IRQOFF);
260 host_ctxt->__hyp_running_vcpu = NULL;
265 void __noreturn hyp_panic(void)
267 u64 spsr = read_sysreg_el2(SYS_SPSR);
268 u64 elr = read_sysreg_el2(SYS_ELR);
269 u64 par = read_sysreg_par();
270 struct kvm_cpu_context *host_ctxt;
271 struct kvm_vcpu *vcpu;
273 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
274 vcpu = host_ctxt->__hyp_running_vcpu;
277 __timer_disable_traps(vcpu);
278 __deactivate_traps(vcpu);
279 __load_host_stage2();
280 __sysreg_restore_state_nvhe(host_ctxt);
283 __hyp_do_panic(host_ctxt, spsr, elr, par);
287 asmlinkage void kvm_unexpected_el2_exception(void)
289 return __kvm_unexpected_el2_exception();