3e0585fbd4032c6993a71ae9fee4ac85899e05cb
[linux-2.6-microblaze.git] / arch / arm64 / kvm / hyp / include / hyp / sysreg-sr.h
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012-2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6
7 #ifndef __ARM64_KVM_HYP_SYSREG_SR_H__
8 #define __ARM64_KVM_HYP_SYSREG_SR_H__
9
10 #include <linux/compiler.h>
11 #include <linux/kvm_host.h>
12
13 #include <asm/kprobes.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_emulate.h>
16 #include <asm/kvm_hyp.h>
17
18 static inline void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
19 {
20         ctxt->sys_regs[MDSCR_EL1]       = read_sysreg(mdscr_el1);
21 }
22
23 static inline void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
24 {
25         ctxt->sys_regs[TPIDR_EL0]       = read_sysreg(tpidr_el0);
26         ctxt->sys_regs[TPIDRRO_EL0]     = read_sysreg(tpidrro_el0);
27 }
28
29 static inline void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
30 {
31         ctxt->sys_regs[CSSELR_EL1]      = read_sysreg(csselr_el1);
32         ctxt->sys_regs[SCTLR_EL1]       = read_sysreg_el1(SYS_SCTLR);
33         ctxt->sys_regs[CPACR_EL1]       = read_sysreg_el1(SYS_CPACR);
34         ctxt->sys_regs[TTBR0_EL1]       = read_sysreg_el1(SYS_TTBR0);
35         ctxt->sys_regs[TTBR1_EL1]       = read_sysreg_el1(SYS_TTBR1);
36         ctxt->sys_regs[TCR_EL1]         = read_sysreg_el1(SYS_TCR);
37         ctxt->sys_regs[ESR_EL1]         = read_sysreg_el1(SYS_ESR);
38         ctxt->sys_regs[AFSR0_EL1]       = read_sysreg_el1(SYS_AFSR0);
39         ctxt->sys_regs[AFSR1_EL1]       = read_sysreg_el1(SYS_AFSR1);
40         ctxt->sys_regs[FAR_EL1]         = read_sysreg_el1(SYS_FAR);
41         ctxt->sys_regs[MAIR_EL1]        = read_sysreg_el1(SYS_MAIR);
42         ctxt->sys_regs[VBAR_EL1]        = read_sysreg_el1(SYS_VBAR);
43         ctxt->sys_regs[CONTEXTIDR_EL1]  = read_sysreg_el1(SYS_CONTEXTIDR);
44         ctxt->sys_regs[AMAIR_EL1]       = read_sysreg_el1(SYS_AMAIR);
45         ctxt->sys_regs[CNTKCTL_EL1]     = read_sysreg_el1(SYS_CNTKCTL);
46         ctxt->sys_regs[PAR_EL1]         = read_sysreg(par_el1);
47         ctxt->sys_regs[TPIDR_EL1]       = read_sysreg(tpidr_el1);
48
49         ctxt->gp_regs.sp_el1            = read_sysreg(sp_el1);
50         ctxt->gp_regs.elr_el1           = read_sysreg_el1(SYS_ELR);
51         ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR);
52 }
53
54 static inline void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
55 {
56         ctxt->gp_regs.regs.pc           = read_sysreg_el2(SYS_ELR);
57         ctxt->gp_regs.regs.pstate       = read_sysreg_el2(SYS_SPSR);
58
59         if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
60                 ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
61 }
62
63 static inline void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
64 {
65         write_sysreg(ctxt->sys_regs[MDSCR_EL1],   mdscr_el1);
66 }
67
68 static inline void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
69 {
70         write_sysreg(ctxt->sys_regs[TPIDR_EL0],         tpidr_el0);
71         write_sysreg(ctxt->sys_regs[TPIDRRO_EL0],       tpidrro_el0);
72 }
73
74 static inline void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
75 {
76         write_sysreg(ctxt->sys_regs[MPIDR_EL1],         vmpidr_el2);
77         write_sysreg(ctxt->sys_regs[CSSELR_EL1],        csselr_el1);
78
79         if (has_vhe() ||
80             !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
81                 write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
82                 write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
83         } else  if (!ctxt->__hyp_running_vcpu) {
84                 /*
85                  * Must only be done for guest registers, hence the context
86                  * test. We're coming from the host, so SCTLR.M is already
87                  * set. Pairs with nVHE's __activate_traps().
88                  */
89                 write_sysreg_el1((ctxt->sys_regs[TCR_EL1] |
90                                   TCR_EPD1_MASK | TCR_EPD0_MASK),
91                                  SYS_TCR);
92                 isb();
93         }
94
95         write_sysreg_el1(ctxt->sys_regs[CPACR_EL1],     SYS_CPACR);
96         write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1],     SYS_TTBR0);
97         write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1],     SYS_TTBR1);
98         write_sysreg_el1(ctxt->sys_regs[ESR_EL1],       SYS_ESR);
99         write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1],     SYS_AFSR0);
100         write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1],     SYS_AFSR1);
101         write_sysreg_el1(ctxt->sys_regs[FAR_EL1],       SYS_FAR);
102         write_sysreg_el1(ctxt->sys_regs[MAIR_EL1],      SYS_MAIR);
103         write_sysreg_el1(ctxt->sys_regs[VBAR_EL1],      SYS_VBAR);
104         write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],SYS_CONTEXTIDR);
105         write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1],     SYS_AMAIR);
106         write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1],   SYS_CNTKCTL);
107         write_sysreg(ctxt->sys_regs[PAR_EL1],           par_el1);
108         write_sysreg(ctxt->sys_regs[TPIDR_EL1],         tpidr_el1);
109
110         if (!has_vhe() &&
111             cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
112             ctxt->__hyp_running_vcpu) {
113                 /*
114                  * Must only be done for host registers, hence the context
115                  * test. Pairs with nVHE's __deactivate_traps().
116                  */
117                 isb();
118                 /*
119                  * At this stage, and thanks to the above isb(), S2 is
120                  * deconfigured and disabled. We can now restore the host's
121                  * S1 configuration: SCTLR, and only then TCR.
122                  */
123                 write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
124                 isb();
125                 write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
126         }
127
128         write_sysreg(ctxt->gp_regs.sp_el1,              sp_el1);
129         write_sysreg_el1(ctxt->gp_regs.elr_el1,         SYS_ELR);
130         write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
131 }
132
133 static inline void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
134 {
135         u64 pstate = ctxt->gp_regs.regs.pstate;
136         u64 mode = pstate & PSR_AA32_MODE_MASK;
137
138         /*
139          * Safety check to ensure we're setting the CPU up to enter the guest
140          * in a less privileged mode.
141          *
142          * If we are attempting a return to EL2 or higher in AArch64 state,
143          * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
144          * we'll take an illegal exception state exception immediately after
145          * the ERET to the guest.  Attempts to return to AArch32 Hyp will
146          * result in an illegal exception return because EL2's execution state
147          * is determined by SCR_EL3.RW.
148          */
149         if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
150                 pstate = PSR_MODE_EL2h | PSR_IL_BIT;
151
152         write_sysreg_el2(ctxt->gp_regs.regs.pc,         SYS_ELR);
153         write_sysreg_el2(pstate,                        SYS_SPSR);
154
155         if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
156                 write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
157 }
158
159 static inline void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
160 {
161         u64 *spsr, *sysreg;
162
163         if (!vcpu_el1_is_32bit(vcpu))
164                 return;
165
166         spsr = vcpu->arch.ctxt.gp_regs.spsr;
167         sysreg = vcpu->arch.ctxt.sys_regs;
168
169         spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
170         spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
171         spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
172         spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
173
174         sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
175         sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
176
177         if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
178                 sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
179 }
180
181 static inline void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
182 {
183         u64 *spsr, *sysreg;
184
185         if (!vcpu_el1_is_32bit(vcpu))
186                 return;
187
188         spsr = vcpu->arch.ctxt.gp_regs.spsr;
189         sysreg = vcpu->arch.ctxt.sys_regs;
190
191         write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
192         write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
193         write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
194         write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
195
196         write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
197         write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
198
199         if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
200                 write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
201 }
202
203 #endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */