1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/handle_exit.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
15 #include <asm/exception.h>
16 #include <asm/kvm_asm.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_mmu.h>
19 #include <asm/debug-monitors.h>
20 #include <asm/stacktrace/nvhe.h>
21 #include <asm/traps.h>
23 #include <kvm/arm_hypercalls.h>
25 #define CREATE_TRACE_POINTS
26 #include "trace_handle_exit.h"
28 typedef int (*exit_handle_fn)(struct kvm_vcpu *);
30 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
32 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
33 kvm_inject_vabt(vcpu);
36 static int handle_hvc(struct kvm_vcpu *vcpu)
40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
41 kvm_vcpu_hvc_get_imm(vcpu));
42 vcpu->stat.hvc_exit_stat++;
44 ret = kvm_hvc_call_handler(vcpu);
46 vcpu_set_reg(vcpu, 0, ~0UL);
53 static int handle_smc(struct kvm_vcpu *vcpu)
56 * "If an SMC instruction executed at Non-secure EL1 is
57 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
58 * Trap exception, not a Secure Monitor Call exception [...]"
60 * We need to advance the PC after the trap, as it would
61 * otherwise return to the same address...
63 vcpu_set_reg(vcpu, 0, ~0UL);
69 * Guest access to FP/ASIMD registers are routed to this handler only
70 * when the system doesn't support FP/ASIMD.
72 static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
74 kvm_inject_undefined(vcpu);
79 * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
80 * instruction executed by a guest
82 * @vcpu: the vcpu pointer
84 * WFE[T]: Yield the CPU and come back to this vcpu when the scheduler
86 * WFI: Simply call kvm_vcpu_halt(), which will halt execution of
87 * world-switches and schedule other host processes until there is an
88 * incoming IRQ or FIQ to the VM.
89 * WFIT: Same as WFI, with a timed wakeup implemented as a background timer
91 * WF{I,E}T can immediately return if the deadline has already expired.
93 static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
95 u64 esr = kvm_vcpu_get_esr(vcpu);
97 if (esr & ESR_ELx_WFx_ISS_WFE) {
98 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
99 vcpu->stat.wfe_exit_stat++;
101 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
102 vcpu->stat.wfi_exit_stat++;
105 if (esr & ESR_ELx_WFx_ISS_WFxT) {
106 if (esr & ESR_ELx_WFx_ISS_RV) {
109 now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
110 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
115 /* Treat WFxT as WFx if RN is invalid */
116 esr &= ~ESR_ELx_WFx_ISS_WFxT;
120 if (esr & ESR_ELx_WFx_ISS_WFE) {
121 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
123 if (esr & ESR_ELx_WFx_ISS_WFxT)
124 vcpu_set_flag(vcpu, IN_WFIT);
135 * kvm_handle_guest_debug - handle a debug exception instruction
137 * @vcpu: the vcpu pointer
139 * We route all debug exceptions through the same handler. If both the
140 * guest and host are using the same debug facilities it will be up to
141 * userspace to re-inject the correct exception for guest delivery.
143 * @return: 0 (while setting vcpu->run->exit_reason)
145 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
147 struct kvm_run *run = vcpu->run;
148 u64 esr = kvm_vcpu_get_esr(vcpu);
150 run->exit_reason = KVM_EXIT_DEBUG;
151 run->debug.arch.hsr = lower_32_bits(esr);
152 run->debug.arch.hsr_high = upper_32_bits(esr);
153 run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID;
155 switch (ESR_ELx_EC(esr)) {
156 case ESR_ELx_EC_WATCHPT_LOW:
157 run->debug.arch.far = vcpu->arch.fault.far_el2;
159 case ESR_ELx_EC_SOFTSTP_LOW:
160 vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
167 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
169 u64 esr = kvm_vcpu_get_esr(vcpu);
171 kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
172 esr, esr_get_class_string(esr));
174 kvm_inject_undefined(vcpu);
179 * Guest access to SVE registers should be routed to this handler only
180 * when the system doesn't support SVE.
182 static int handle_sve(struct kvm_vcpu *vcpu)
184 kvm_inject_undefined(vcpu);
189 * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
190 * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
191 * that we can do is give the guest an UNDEF.
193 static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
195 kvm_inject_undefined(vcpu);
199 static exit_handle_fn arm_exit_handlers[] = {
200 [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
201 [ESR_ELx_EC_WFx] = kvm_handle_wfx,
202 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
203 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
204 [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
205 [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
206 [ESR_ELx_EC_CP10_ID] = kvm_handle_cp10_id,
207 [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
208 [ESR_ELx_EC_HVC32] = handle_hvc,
209 [ESR_ELx_EC_SMC32] = handle_smc,
210 [ESR_ELx_EC_HVC64] = handle_hvc,
211 [ESR_ELx_EC_SMC64] = handle_smc,
212 [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
213 [ESR_ELx_EC_SVE] = handle_sve,
214 [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
215 [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
216 [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
217 [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
218 [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
219 [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
220 [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
221 [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
222 [ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
225 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
227 u64 esr = kvm_vcpu_get_esr(vcpu);
228 u8 esr_ec = ESR_ELx_EC(esr);
230 return arm_exit_handlers[esr_ec];
234 * We may be single-stepping an emulated instruction. If the emulation
235 * has been completed in the kernel, we can return to userspace with a
236 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
239 static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
244 * See ARM ARM B1.14.1: "Hyp traps on instructions
245 * that fail their condition code check"
247 if (!kvm_condition_valid(vcpu)) {
251 exit_handle_fn exit_handler;
253 exit_handler = kvm_get_exit_handler(vcpu);
254 handled = exit_handler(vcpu);
261 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
262 * proper exit to userspace.
264 int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
266 struct kvm_run *run = vcpu->run;
268 if (ARM_SERROR_PENDING(exception_index)) {
270 * The SError is handled by handle_exit_early(). If the guest
271 * survives it will re-execute the original instruction.
276 exception_index = ARM_EXCEPTION_CODE(exception_index);
278 switch (exception_index) {
279 case ARM_EXCEPTION_IRQ:
281 case ARM_EXCEPTION_EL1_SERROR:
283 case ARM_EXCEPTION_TRAP:
284 return handle_trap_exceptions(vcpu);
285 case ARM_EXCEPTION_HYP_GONE:
287 * EL2 has been reset to the hyp-stub. This happens when a guest
288 * is pre-emptied by kvm_reboot()'s shutdown call.
290 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
292 case ARM_EXCEPTION_IL:
294 * We attempted an illegal exception return. Guest state must
295 * have been corrupted somehow. Give up.
297 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
300 kvm_pr_unimpl("Unsupported exception type: %d",
302 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
307 /* For exit types that need handling before we can be preempted */
308 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
310 if (ARM_SERROR_PENDING(exception_index)) {
311 if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
312 u64 disr = kvm_vcpu_get_disr(vcpu);
314 kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
316 kvm_inject_vabt(vcpu);
322 exception_index = ARM_EXCEPTION_CODE(exception_index);
324 if (exception_index == ARM_EXCEPTION_EL1_SERROR)
325 kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
328 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
329 u64 elr_virt, u64 elr_phys,
330 u64 par, uintptr_t vcpu,
331 u64 far, u64 hpfar) {
332 u64 elr_in_kimg = __phys_to_kimg(elr_phys);
333 u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
334 u64 mode = spsr & PSR_MODE_MASK;
335 u64 panic_addr = elr_virt + hyp_offset;
337 if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
338 kvm_err("Invalid host exception to nVHE hyp!\n");
339 } else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
340 (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
341 const char *file = NULL;
342 unsigned int line = 0;
344 /* All hyp bugs, including warnings, are treated as fatal. */
345 if (!is_protected_kvm_enabled() ||
346 IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
347 struct bug_entry *bug = find_bug(elr_in_kimg);
350 bug_get_file_line(bug, &file, &line);
354 kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
356 kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr,
357 (void *)(panic_addr + kaslr_offset()));
359 kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr,
360 (void *)(panic_addr + kaslr_offset()));
363 /* Dump the nVHE hypervisor backtrace */
364 kvm_nvhe_dump_backtrace(hyp_offset);
367 * Hyp has panicked and we're going to handle that by panicking the
368 * kernel. The kernel offset will be revealed in the panic so we're
369 * also safe to reveal the hyp offset as a debugging aid for translating
370 * hyp VAs to vmlinux addresses.
372 kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
374 panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%016llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
375 spsr, elr_virt, esr, far, hpfar, par, vcpu);