Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[linux-2.6-microblaze.git] / arch / arm64 / kvm / hyp / sysreg-sr.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012-2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6
7 #include <linux/compiler.h>
8 #include <linux/kvm_host.h>
9
10 #include <asm/kprobes.h>
11 #include <asm/kvm_asm.h>
12 #include <asm/kvm_emulate.h>
13 #include <asm/kvm_hyp.h>
14
15 /*
16  * Non-VHE: Both host and guest must save everything.
17  *
18  * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
19  * pstate, which are handled as part of the el2 return state) on every
20  * switch (sp_el0 is being dealt with in the assembly code).
21  * tpidr_el0 and tpidrro_el0 only need to be switched when going
22  * to host userspace or a different VCPU.  EL1 registers only need to be
23  * switched when potentially going to run a different VCPU.  The latter two
24  * classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put.
25  */
26
27 static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
28 {
29         ctxt->sys_regs[MDSCR_EL1]       = read_sysreg(mdscr_el1);
30 }
31
32 static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
33 {
34         ctxt->sys_regs[TPIDR_EL0]       = read_sysreg(tpidr_el0);
35         ctxt->sys_regs[TPIDRRO_EL0]     = read_sysreg(tpidrro_el0);
36 }
37
38 static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
39 {
40         ctxt->sys_regs[CSSELR_EL1]      = read_sysreg(csselr_el1);
41         ctxt->sys_regs[SCTLR_EL1]       = read_sysreg_el1(SYS_SCTLR);
42         ctxt->sys_regs[ACTLR_EL1]       = read_sysreg(actlr_el1);
43         ctxt->sys_regs[CPACR_EL1]       = read_sysreg_el1(SYS_CPACR);
44         ctxt->sys_regs[TTBR0_EL1]       = read_sysreg_el1(SYS_TTBR0);
45         ctxt->sys_regs[TTBR1_EL1]       = read_sysreg_el1(SYS_TTBR1);
46         ctxt->sys_regs[TCR_EL1]         = read_sysreg_el1(SYS_TCR);
47         ctxt->sys_regs[ESR_EL1]         = read_sysreg_el1(SYS_ESR);
48         ctxt->sys_regs[AFSR0_EL1]       = read_sysreg_el1(SYS_AFSR0);
49         ctxt->sys_regs[AFSR1_EL1]       = read_sysreg_el1(SYS_AFSR1);
50         ctxt->sys_regs[FAR_EL1]         = read_sysreg_el1(SYS_FAR);
51         ctxt->sys_regs[MAIR_EL1]        = read_sysreg_el1(SYS_MAIR);
52         ctxt->sys_regs[VBAR_EL1]        = read_sysreg_el1(SYS_VBAR);
53         ctxt->sys_regs[CONTEXTIDR_EL1]  = read_sysreg_el1(SYS_CONTEXTIDR);
54         ctxt->sys_regs[AMAIR_EL1]       = read_sysreg_el1(SYS_AMAIR);
55         ctxt->sys_regs[CNTKCTL_EL1]     = read_sysreg_el1(SYS_CNTKCTL);
56         ctxt->sys_regs[PAR_EL1]         = read_sysreg(par_el1);
57         ctxt->sys_regs[TPIDR_EL1]       = read_sysreg(tpidr_el1);
58
59         ctxt->gp_regs.sp_el1            = read_sysreg(sp_el1);
60         ctxt->gp_regs.elr_el1           = read_sysreg_el1(SYS_ELR);
61         ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR);
62 }
63
64 static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
65 {
66         ctxt->gp_regs.regs.pc           = read_sysreg_el2(SYS_ELR);
67         ctxt->gp_regs.regs.pstate       = read_sysreg_el2(SYS_SPSR);
68
69         if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
70                 ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
71 }
72
73 void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
74 {
75         __sysreg_save_el1_state(ctxt);
76         __sysreg_save_common_state(ctxt);
77         __sysreg_save_user_state(ctxt);
78         __sysreg_save_el2_return_state(ctxt);
79 }
80
81 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
82 {
83         __sysreg_save_common_state(ctxt);
84 }
85 NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
86
87 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
88 {
89         __sysreg_save_common_state(ctxt);
90         __sysreg_save_el2_return_state(ctxt);
91 }
92 NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
93
94 static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
95 {
96         write_sysreg(ctxt->sys_regs[MDSCR_EL1],   mdscr_el1);
97 }
98
99 static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
100 {
101         write_sysreg(ctxt->sys_regs[TPIDR_EL0],         tpidr_el0);
102         write_sysreg(ctxt->sys_regs[TPIDRRO_EL0],       tpidrro_el0);
103 }
104
105 static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
106 {
107         write_sysreg(ctxt->sys_regs[MPIDR_EL1],         vmpidr_el2);
108         write_sysreg(ctxt->sys_regs[CSSELR_EL1],        csselr_el1);
109
110         if (!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
111                 write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
112                 write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
113         } else  if (!ctxt->__hyp_running_vcpu) {
114                 /*
115                  * Must only be done for guest registers, hence the context
116                  * test. We're coming from the host, so SCTLR.M is already
117                  * set. Pairs with __activate_traps_nvhe().
118                  */
119                 write_sysreg_el1((ctxt->sys_regs[TCR_EL1] |
120                                   TCR_EPD1_MASK | TCR_EPD0_MASK),
121                                  SYS_TCR);
122                 isb();
123         }
124
125         write_sysreg(ctxt->sys_regs[ACTLR_EL1],         actlr_el1);
126         write_sysreg_el1(ctxt->sys_regs[CPACR_EL1],     SYS_CPACR);
127         write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1],     SYS_TTBR0);
128         write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1],     SYS_TTBR1);
129         write_sysreg_el1(ctxt->sys_regs[ESR_EL1],       SYS_ESR);
130         write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1],     SYS_AFSR0);
131         write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1],     SYS_AFSR1);
132         write_sysreg_el1(ctxt->sys_regs[FAR_EL1],       SYS_FAR);
133         write_sysreg_el1(ctxt->sys_regs[MAIR_EL1],      SYS_MAIR);
134         write_sysreg_el1(ctxt->sys_regs[VBAR_EL1],      SYS_VBAR);
135         write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],SYS_CONTEXTIDR);
136         write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1],     SYS_AMAIR);
137         write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1],   SYS_CNTKCTL);
138         write_sysreg(ctxt->sys_regs[PAR_EL1],           par_el1);
139         write_sysreg(ctxt->sys_regs[TPIDR_EL1],         tpidr_el1);
140
141         if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) &&
142             ctxt->__hyp_running_vcpu) {
143                 /*
144                  * Must only be done for host registers, hence the context
145                  * test. Pairs with __deactivate_traps_nvhe().
146                  */
147                 isb();
148                 /*
149                  * At this stage, and thanks to the above isb(), S2 is
150                  * deconfigured and disabled. We can now restore the host's
151                  * S1 configuration: SCTLR, and only then TCR.
152                  */
153                 write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],     SYS_SCTLR);
154                 isb();
155                 write_sysreg_el1(ctxt->sys_regs[TCR_EL1],       SYS_TCR);
156         }
157
158         write_sysreg(ctxt->gp_regs.sp_el1,              sp_el1);
159         write_sysreg_el1(ctxt->gp_regs.elr_el1,         SYS_ELR);
160         write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
161 }
162
163 static void __hyp_text
164 __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
165 {
166         u64 pstate = ctxt->gp_regs.regs.pstate;
167         u64 mode = pstate & PSR_AA32_MODE_MASK;
168
169         /*
170          * Safety check to ensure we're setting the CPU up to enter the guest
171          * in a less privileged mode.
172          *
173          * If we are attempting a return to EL2 or higher in AArch64 state,
174          * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
175          * we'll take an illegal exception state exception immediately after
176          * the ERET to the guest.  Attempts to return to AArch32 Hyp will
177          * result in an illegal exception return because EL2's execution state
178          * is determined by SCR_EL3.RW.
179          */
180         if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
181                 pstate = PSR_MODE_EL2h | PSR_IL_BIT;
182
183         write_sysreg_el2(ctxt->gp_regs.regs.pc,         SYS_ELR);
184         write_sysreg_el2(pstate,                        SYS_SPSR);
185
186         if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
187                 write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
188 }
189
190 void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
191 {
192         __sysreg_restore_el1_state(ctxt);
193         __sysreg_restore_common_state(ctxt);
194         __sysreg_restore_user_state(ctxt);
195         __sysreg_restore_el2_return_state(ctxt);
196 }
197
198 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
199 {
200         __sysreg_restore_common_state(ctxt);
201 }
202 NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
203
204 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
205 {
206         __sysreg_restore_common_state(ctxt);
207         __sysreg_restore_el2_return_state(ctxt);
208 }
209 NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
210
211 void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
212 {
213         u64 *spsr, *sysreg;
214
215         if (!vcpu_el1_is_32bit(vcpu))
216                 return;
217
218         spsr = vcpu->arch.ctxt.gp_regs.spsr;
219         sysreg = vcpu->arch.ctxt.sys_regs;
220
221         spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
222         spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
223         spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
224         spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
225
226         sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
227         sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
228
229         if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
230                 sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
231 }
232
233 void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
234 {
235         u64 *spsr, *sysreg;
236
237         if (!vcpu_el1_is_32bit(vcpu))
238                 return;
239
240         spsr = vcpu->arch.ctxt.gp_regs.spsr;
241         sysreg = vcpu->arch.ctxt.sys_regs;
242
243         write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
244         write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
245         write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
246         write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
247
248         write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
249         write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
250
251         if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
252                 write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
253 }
254
255 /**
256  * kvm_vcpu_load_sysregs - Load guest system registers to the physical CPU
257  *
258  * @vcpu: The VCPU pointer
259  *
260  * Load system registers that do not affect the host's execution, for
261  * example EL1 system registers on a VHE system where the host kernel
262  * runs at EL2.  This function is called from KVM's vcpu_load() function
263  * and loading system register state early avoids having to load them on
264  * every entry to the VM.
265  */
266 void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
267 {
268         struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
269         struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
270
271         if (!has_vhe())
272                 return;
273
274         __sysreg_save_user_state(host_ctxt);
275
276         /*
277          * Load guest EL1 and user state
278          *
279          * We must restore the 32-bit state before the sysregs, thanks
280          * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
281          */
282         __sysreg32_restore_state(vcpu);
283         __sysreg_restore_user_state(guest_ctxt);
284         __sysreg_restore_el1_state(guest_ctxt);
285
286         vcpu->arch.sysregs_loaded_on_cpu = true;
287
288         activate_traps_vhe_load(vcpu);
289 }
290
291 /**
292  * kvm_vcpu_put_sysregs - Restore host system registers to the physical CPU
293  *
294  * @vcpu: The VCPU pointer
295  *
296  * Save guest system registers that do not affect the host's execution, for
297  * example EL1 system registers on a VHE system where the host kernel
298  * runs at EL2.  This function is called from KVM's vcpu_put() function
299  * and deferring saving system register state until we're no longer running the
300  * VCPU avoids having to save them on every exit from the VM.
301  */
302 void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
303 {
304         struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
305         struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
306
307         if (!has_vhe())
308                 return;
309
310         deactivate_traps_vhe_put();
311
312         __sysreg_save_el1_state(guest_ctxt);
313         __sysreg_save_user_state(guest_ctxt);
314         __sysreg32_save_state(vcpu);
315
316         /* Restore host user state */
317         __sysreg_restore_user_state(host_ctxt);
318
319         vcpu->arch.sysregs_loaded_on_cpu = false;
320 }
321
322 void __hyp_text __kvm_enable_ssbs(void)
323 {
324         u64 tmp;
325
326         asm volatile(
327         "mrs    %0, sctlr_el2\n"
328         "orr    %0, %0, %1\n"
329         "msr    sctlr_el2, %0"
330         : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
331 }