mt76: mt7663: introduce coredump support
[linux-2.6-microblaze.git] / arch / arm64 / kvm / hyp / include / hyp / adjust_pc.h
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Guest PC manipulation helpers
4  *
5  * Copyright (C) 2012,2013 - ARM Ltd
6  * Copyright (C) 2020 - Google LLC
7  * Author: Marc Zyngier <maz@kernel.org>
8  */
9
10 #ifndef __ARM64_KVM_HYP_ADJUST_PC_H__
11 #define __ARM64_KVM_HYP_ADJUST_PC_H__
12
13 #include <asm/kvm_emulate.h>
14 #include <asm/kvm_host.h>
15
16 void kvm_inject_exception(struct kvm_vcpu *vcpu);
17
18 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
19 {
20         if (vcpu_mode_is_32bit(vcpu)) {
21                 kvm_skip_instr32(vcpu);
22         } else {
23                 *vcpu_pc(vcpu) += 4;
24                 *vcpu_cpsr(vcpu) &= ~PSR_BTYPE_MASK;
25         }
26
27         /* advance the singlestep state machine */
28         *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
29 }
30
31 /*
32  * Skip an instruction which has been emulated at hyp while most guest sysregs
33  * are live.
34  */
35 static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
36 {
37         *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
38         vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR);
39
40         kvm_skip_instr(vcpu);
41
42         write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR);
43         write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
44 }
45
46 /*
47  * Adjust the guest PC on entry, depending on flags provided by EL1
48  * for the purpose of emulation (MMIO, sysreg) or exception injection.
49  */
50 static inline void __adjust_pc(struct kvm_vcpu *vcpu)
51 {
52         if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
53                 kvm_inject_exception(vcpu);
54                 vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
55                                       KVM_ARM64_EXCEPT_MASK);
56         } else  if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
57                 kvm_skip_instr(vcpu);
58                 vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
59         }
60 }
61
62 #endif