1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
14 #include <linux/kvm_host.h>
16 #include <asm/debug-monitors.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/ptrace.h>
21 #include <asm/cputype.h>
24 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
25 unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
26 void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
28 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
29 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
31 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
32 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
33 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
34 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
35 void kvm_inject_undef32(struct kvm_vcpu *vcpu);
36 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
37 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
39 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
41 return !(vcpu->arch.hcr_el2 & HCR_RW);
44 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
46 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
47 if (is_kernel_in_hyp_mode())
48 vcpu->arch.hcr_el2 |= HCR_E2H;
49 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
50 /* route synchronous external abort exceptions to EL2 */
51 vcpu->arch.hcr_el2 |= HCR_TEA;
52 /* trap error record accesses */
53 vcpu->arch.hcr_el2 |= HCR_TERR;
56 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
57 vcpu->arch.hcr_el2 |= HCR_FWB;
60 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
61 * get set in SCTLR_EL1 such that we can detect when the guest
62 * MMU gets turned on and do the necessary cache maintenance
65 vcpu->arch.hcr_el2 |= HCR_TVM;
68 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
69 vcpu->arch.hcr_el2 &= ~HCR_RW;
72 * TID3: trap feature register accesses that we virtualise.
73 * For now this is conditional, since no AArch32 feature regs
74 * are currently virtualised.
76 if (!vcpu_el1_is_32bit(vcpu))
77 vcpu->arch.hcr_el2 |= HCR_TID3;
79 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
80 vcpu_el1_is_32bit(vcpu))
81 vcpu->arch.hcr_el2 |= HCR_TID2;
84 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
86 return (unsigned long *)&vcpu->arch.hcr_el2;
89 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
91 vcpu->arch.hcr_el2 &= ~HCR_TWE;
92 if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
93 vcpu->kvm->arch.vgic.nassgireq)
94 vcpu->arch.hcr_el2 &= ~HCR_TWI;
96 vcpu->arch.hcr_el2 |= HCR_TWI;
99 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
101 vcpu->arch.hcr_el2 |= HCR_TWE;
102 vcpu->arch.hcr_el2 |= HCR_TWI;
105 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
107 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
110 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
112 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
115 static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
117 if (vcpu_has_ptrauth(vcpu))
118 vcpu_ptrauth_disable(vcpu);
121 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
123 return vcpu->arch.vsesr_el2;
126 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
128 vcpu->arch.vsesr_el2 = vsesr;
131 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
133 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
136 static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
138 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
141 static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
143 if (vcpu->arch.sysregs_loaded_on_cpu)
144 return read_sysreg_el1(SYS_ELR);
146 return *__vcpu_elr_el1(vcpu);
149 static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
151 if (vcpu->arch.sysregs_loaded_on_cpu)
152 write_sysreg_el1(v, SYS_ELR);
154 *__vcpu_elr_el1(vcpu) = v;
157 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
159 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
162 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
164 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
167 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
169 if (vcpu_mode_is_32bit(vcpu))
170 return kvm_condition_valid32(vcpu);
175 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
177 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
181 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
182 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
183 * AArch32 with banked registers.
185 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
188 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
191 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
195 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
198 static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
200 if (vcpu_mode_is_32bit(vcpu))
201 return vcpu_read_spsr32(vcpu);
203 if (vcpu->arch.sysregs_loaded_on_cpu)
204 return read_sysreg_el1(SYS_SPSR);
206 return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
209 static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
211 if (vcpu_mode_is_32bit(vcpu)) {
212 vcpu_write_spsr32(vcpu, v);
216 if (vcpu->arch.sysregs_loaded_on_cpu)
217 write_sysreg_el1(v, SYS_SPSR);
219 vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
223 * The layout of SPSR for an AArch32 state is different when observed from an
224 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
225 * view given an AArch64 view.
227 * In ARM DDI 0487E.a see:
229 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
230 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
231 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
233 * Which show the following differences:
235 * | Bit | AA64 | AA32 | Notes |
236 * +-----+------+------+-----------------------------|
237 * | 24 | DIT | J | J is RES0 in ARMv8 |
238 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
240 * ... and all other bits are (currently) common.
242 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
244 const unsigned long overlap = BIT(24) | BIT(21);
245 unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
254 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
258 if (vcpu_mode_is_32bit(vcpu)) {
259 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
260 return mode > PSR_AA32_MODE_USR;
263 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
265 return mode != PSR_MODE_EL0t;
268 static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
270 return vcpu->arch.fault.esr_el2;
273 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
275 u32 esr = kvm_vcpu_get_hsr(vcpu);
277 if (esr & ESR_ELx_CV)
278 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
283 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
285 return vcpu->arch.fault.far_el2;
288 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
290 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
293 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
295 return vcpu->arch.fault.disr_el1;
298 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
300 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
303 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
305 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
308 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
310 return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
313 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
315 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
318 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
320 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
323 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
325 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
328 static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
330 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
333 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
335 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
336 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
339 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
341 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
344 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
346 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
349 /* This one is not specific to Data Abort */
350 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
352 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
355 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
357 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
360 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
362 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
365 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
367 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
370 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
372 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
375 static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
377 switch (kvm_vcpu_trap_get_fault(vcpu)) {
394 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
396 u32 esr = kvm_vcpu_get_hsr(vcpu);
397 return ESR_ELx_SYS64_ISS_RT(esr);
400 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
402 if (kvm_vcpu_trap_is_iabt(vcpu))
405 return kvm_vcpu_dabt_iswrite(vcpu);
408 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
410 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
413 static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
415 return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
418 static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
422 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
424 vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
427 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
429 if (vcpu_mode_is_32bit(vcpu)) {
430 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
432 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
434 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
438 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
440 if (vcpu_mode_is_32bit(vcpu))
441 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
443 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
446 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
450 if (kvm_vcpu_is_be(vcpu)) {
455 return be16_to_cpu(data & 0xffff);
457 return be32_to_cpu(data & 0xffffffff);
459 return be64_to_cpu(data);
466 return le16_to_cpu(data & 0xffff);
468 return le32_to_cpu(data & 0xffffffff);
470 return le64_to_cpu(data);
474 return data; /* Leave LE untouched */
477 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
481 if (kvm_vcpu_is_be(vcpu)) {
486 return cpu_to_be16(data & 0xffff);
488 return cpu_to_be32(data & 0xffffffff);
490 return cpu_to_be64(data);
497 return cpu_to_le16(data & 0xffff);
499 return cpu_to_le32(data & 0xffffffff);
501 return cpu_to_le64(data);
505 return data; /* Leave LE untouched */
508 static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
510 if (vcpu_mode_is_32bit(vcpu)) {
511 kvm_skip_instr32(vcpu, is_wide_instr);
514 *vcpu_cpsr(vcpu) &= ~PSR_BTYPE_MASK;
517 /* advance the singlestep state machine */
518 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
522 * Skip an instruction which has been emulated at hyp while most guest sysregs
525 static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
527 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
528 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
530 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
532 write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR);
533 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
536 #endif /* __ARM64_KVM_EMULATE_H__ */