2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/handle_exit.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
25 #include <kvm/arm_psci.h>
28 #include <asm/exception.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_coproc.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/kvm_mmu.h>
33 #include <asm/debug-monitors.h>
34 #include <asm/traps.h>
36 #define CREATE_TRACE_POINTS
39 typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
41 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
43 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
44 kvm_inject_vabt(vcpu);
47 static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
51 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
52 kvm_vcpu_hvc_get_imm(vcpu));
53 vcpu->stat.hvc_exit_stat++;
55 ret = kvm_hvc_call_handler(vcpu);
57 vcpu_set_reg(vcpu, 0, ~0UL);
64 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
67 * "If an SMC instruction executed at Non-secure EL1 is
68 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
69 * Trap exception, not a Secure Monitor Call exception [...]"
71 * We need to advance the PC after the trap, as it would
72 * otherwise return to the same address...
74 vcpu_set_reg(vcpu, 0, ~0UL);
75 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
80 * Guest access to FP/ASIMD registers are routed to this handler only
81 * when the system doesn't support FP/ASIMD.
83 static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
85 kvm_inject_undefined(vcpu);
90 * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
91 * instruction executed by a guest
93 * @vcpu: the vcpu pointer
95 * WFE: Yield the CPU and come back to this vcpu when the scheduler
97 * WFI: Simply call kvm_vcpu_block(), which will halt execution of
98 * world-switches and schedule other host processes until there is an
99 * incoming IRQ or FIQ to the VM.
101 static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
103 if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
104 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
105 vcpu->stat.wfe_exit_stat++;
106 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
108 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
109 vcpu->stat.wfi_exit_stat++;
110 kvm_vcpu_block(vcpu);
111 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
114 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
120 * kvm_handle_guest_debug - handle a debug exception instruction
122 * @vcpu: the vcpu pointer
123 * @run: access to the kvm_run structure for results
125 * We route all debug exceptions through the same handler. If both the
126 * guest and host are using the same debug facilities it will be up to
127 * userspace to re-inject the correct exception for guest delivery.
129 * @return: 0 (while setting run->exit_reason), -1 for error
131 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
133 u32 hsr = kvm_vcpu_get_hsr(vcpu);
136 run->exit_reason = KVM_EXIT_DEBUG;
137 run->debug.arch.hsr = hsr;
139 switch (ESR_ELx_EC(hsr)) {
140 case ESR_ELx_EC_WATCHPT_LOW:
141 run->debug.arch.far = vcpu->arch.fault.far_el2;
143 case ESR_ELx_EC_SOFTSTP_LOW:
144 case ESR_ELx_EC_BREAKPT_LOW:
145 case ESR_ELx_EC_BKPT32:
146 case ESR_ELx_EC_BRK64:
149 kvm_err("%s: un-handled case hsr: %#08x\n",
150 __func__, (unsigned int) hsr);
158 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
160 u32 hsr = kvm_vcpu_get_hsr(vcpu);
162 kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
163 hsr, esr_get_class_string(hsr));
165 kvm_inject_undefined(vcpu);
169 static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
171 /* Until SVE is supported for guests: */
172 kvm_inject_undefined(vcpu);
176 #define __ptrauth_save_key(regs, key) \
178 regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
179 regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
183 * Handle the guest trying to use a ptrauth instruction, or trying to access a
186 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
188 struct kvm_cpu_context *ctxt;
190 if (vcpu_has_ptrauth(vcpu)) {
191 vcpu_ptrauth_enable(vcpu);
192 ctxt = vcpu->arch.host_cpu_context;
193 __ptrauth_save_key(ctxt->sys_regs, APIA);
194 __ptrauth_save_key(ctxt->sys_regs, APIB);
195 __ptrauth_save_key(ctxt->sys_regs, APDA);
196 __ptrauth_save_key(ctxt->sys_regs, APDB);
197 __ptrauth_save_key(ctxt->sys_regs, APGA);
199 kvm_inject_undefined(vcpu);
204 * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
207 static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
209 kvm_arm_vcpu_ptrauth_trap(vcpu);
213 static exit_handle_fn arm_exit_handlers[] = {
214 [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
215 [ESR_ELx_EC_WFx] = kvm_handle_wfx,
216 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
217 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
218 [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
219 [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
220 [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
221 [ESR_ELx_EC_HVC32] = handle_hvc,
222 [ESR_ELx_EC_SMC32] = handle_smc,
223 [ESR_ELx_EC_HVC64] = handle_hvc,
224 [ESR_ELx_EC_SMC64] = handle_smc,
225 [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
226 [ESR_ELx_EC_SVE] = handle_sve,
227 [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
228 [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
229 [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
230 [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
231 [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
232 [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
233 [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
234 [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
235 [ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
238 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
240 u32 hsr = kvm_vcpu_get_hsr(vcpu);
241 u8 hsr_ec = ESR_ELx_EC(hsr);
243 return arm_exit_handlers[hsr_ec];
247 * We may be single-stepping an emulated instruction. If the emulation
248 * has been completed in the kernel, we can return to userspace with a
249 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
252 static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
257 * See ARM ARM B1.14.1: "Hyp traps on instructions
258 * that fail their condition code check"
260 if (!kvm_condition_valid(vcpu)) {
261 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
264 exit_handle_fn exit_handler;
266 exit_handler = kvm_get_exit_handler(vcpu);
267 handled = exit_handler(vcpu, run);
274 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
275 * proper exit to userspace.
277 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
280 if (ARM_SERROR_PENDING(exception_index)) {
281 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
284 * HVC/SMC already have an adjusted PC, which we need
285 * to correct in order to return to after having
286 * injected the SError.
288 if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 ||
289 hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) {
290 u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
291 *vcpu_pc(vcpu) -= adj;
297 exception_index = ARM_EXCEPTION_CODE(exception_index);
299 switch (exception_index) {
300 case ARM_EXCEPTION_IRQ:
302 case ARM_EXCEPTION_EL1_SERROR:
304 case ARM_EXCEPTION_TRAP:
305 return handle_trap_exceptions(vcpu, run);
306 case ARM_EXCEPTION_HYP_GONE:
308 * EL2 has been reset to the hyp-stub. This happens when a guest
309 * is pre-empted by kvm_reboot()'s shutdown call.
311 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
313 case ARM_EXCEPTION_IL:
315 * We attempted an illegal exception return. Guest state must
316 * have been corrupted somehow. Give up.
318 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
321 kvm_pr_unimpl("Unsupported exception type: %d",
323 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
328 /* For exit types that need handling before we can be preempted */
329 void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
332 if (ARM_SERROR_PENDING(exception_index)) {
333 if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
334 u64 disr = kvm_vcpu_get_disr(vcpu);
336 kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
338 kvm_inject_vabt(vcpu);
344 exception_index = ARM_EXCEPTION_CODE(exception_index);
346 if (exception_index == ARM_EXCEPTION_EL1_SERROR)
347 kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));