1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012-2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #ifndef __ARM64_KVM_HYP_SYSREG_SR_H__
8 #define __ARM64_KVM_HYP_SYSREG_SR_H__
10 #include <linux/compiler.h>
11 #include <linux/kvm_host.h>
13 #include <asm/kprobes.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_emulate.h>
16 #include <asm/kvm_hyp.h>
18 static inline void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
20 ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
23 static inline void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
25 ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
26 ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
29 static inline void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
31 ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
32 ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
33 ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR);
34 ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0);
35 ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1);
36 ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(SYS_TCR);
37 ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(SYS_ESR);
38 ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(SYS_AFSR0);
39 ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(SYS_AFSR1);
40 ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(SYS_FAR);
41 ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(SYS_MAIR);
42 ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(SYS_VBAR);
43 ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(SYS_CONTEXTIDR);
44 ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(SYS_AMAIR);
45 ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(SYS_CNTKCTL);
46 ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
47 ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
49 ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
50 ctxt->gp_regs.elr_el1 = read_sysreg_el1(SYS_ELR);
51 ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR);
54 static inline void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
56 ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR);
57 ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
59 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
60 ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
63 static inline void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
65 write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
68 static inline void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
70 write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
71 write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
74 static inline void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
76 write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
77 write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
80 !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
81 write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
82 write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
83 } else if (!ctxt->__hyp_running_vcpu) {
85 * Must only be done for guest registers, hence the context
86 * test. We're coming from the host, so SCTLR.M is already
87 * set. Pairs with nVHE's __activate_traps().
89 write_sysreg_el1((ctxt->sys_regs[TCR_EL1] |
90 TCR_EPD1_MASK | TCR_EPD0_MASK),
95 write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR);
96 write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0);
97 write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1);
98 write_sysreg_el1(ctxt->sys_regs[ESR_EL1], SYS_ESR);
99 write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], SYS_AFSR0);
100 write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], SYS_AFSR1);
101 write_sysreg_el1(ctxt->sys_regs[FAR_EL1], SYS_FAR);
102 write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], SYS_MAIR);
103 write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], SYS_VBAR);
104 write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],SYS_CONTEXTIDR);
105 write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], SYS_AMAIR);
106 write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], SYS_CNTKCTL);
107 write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
108 write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
111 cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
112 ctxt->__hyp_running_vcpu) {
114 * Must only be done for host registers, hence the context
115 * test. Pairs with nVHE's __deactivate_traps().
119 * At this stage, and thanks to the above isb(), S2 is
120 * deconfigured and disabled. We can now restore the host's
121 * S1 configuration: SCTLR, and only then TCR.
123 write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
125 write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
128 write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
129 write_sysreg_el1(ctxt->gp_regs.elr_el1, SYS_ELR);
130 write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
133 static inline void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
135 u64 pstate = ctxt->gp_regs.regs.pstate;
136 u64 mode = pstate & PSR_AA32_MODE_MASK;
139 * Safety check to ensure we're setting the CPU up to enter the guest
140 * in a less privileged mode.
142 * If we are attempting a return to EL2 or higher in AArch64 state,
143 * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
144 * we'll take an illegal exception state exception immediately after
145 * the ERET to the guest. Attempts to return to AArch32 Hyp will
146 * result in an illegal exception return because EL2's execution state
147 * is determined by SCR_EL3.RW.
149 if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
150 pstate = PSR_MODE_EL2h | PSR_IL_BIT;
152 write_sysreg_el2(ctxt->gp_regs.regs.pc, SYS_ELR);
153 write_sysreg_el2(pstate, SYS_SPSR);
155 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
156 write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
159 static inline void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
163 if (!vcpu_el1_is_32bit(vcpu))
166 spsr = vcpu->arch.ctxt.gp_regs.spsr;
167 sysreg = vcpu->arch.ctxt.sys_regs;
169 spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
170 spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
171 spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
172 spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
174 sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
175 sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
177 if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
178 sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
181 static inline void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
185 if (!vcpu_el1_is_32bit(vcpu))
188 spsr = vcpu->arch.ctxt.gp_regs.spsr;
189 sysreg = vcpu->arch.ctxt.sys_regs;
191 write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
192 write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
193 write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
194 write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
196 write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
197 write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
199 if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
200 write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
203 #endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */