1 // SPDX-License-Identifier: GPL-2.0-only
3 * Guest PC manipulation helpers
5 * Copyright (C) 2012,2013 - ARM Ltd
6 * Copyright (C) 2020 - Google LLC
7 * Author: Marc Zyngier <maz@kernel.org>
10 #ifndef __ARM64_KVM_HYP_ADJUST_PC_H__
11 #define __ARM64_KVM_HYP_ADJUST_PC_H__
13 #include <asm/kvm_emulate.h>
14 #include <asm/kvm_host.h>
16 void kvm_inject_exception(struct kvm_vcpu *vcpu);
18 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
20 if (vcpu_mode_is_32bit(vcpu)) {
21 kvm_skip_instr32(vcpu);
24 *vcpu_cpsr(vcpu) &= ~PSR_BTYPE_MASK;
27 /* advance the singlestep state machine */
28 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
32 * Skip an instruction which has been emulated at hyp while most guest sysregs
35 static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
37 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
38 vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR);
42 write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR);
43 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
47 * Adjust the guest PC on entry, depending on flags provided by EL1
48 * for the purpose of emulation (MMIO, sysreg) or exception injection.
50 static inline void __adjust_pc(struct kvm_vcpu *vcpu)
52 if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
53 kvm_inject_exception(vcpu);
54 vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
55 KVM_ARM64_EXCEPT_MASK);
56 } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
58 vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;