Merge tag 'f2fs-for-5.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / kvm_emulate.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/kvm_emulate.h
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13
14 #include <linux/kvm_host.h>
15
16 #include <asm/debug-monitors.h>
17 #include <asm/esr.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/ptrace.h>
21 #include <asm/cputype.h>
22 #include <asm/virt.h>
23
24 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
25 unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
26 void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
27
28 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
29 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
30
31 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
32 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
33 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
34 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
35 void kvm_inject_undef32(struct kvm_vcpu *vcpu);
36 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
37 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
38
39 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
40 {
41         return !(vcpu->arch.hcr_el2 & HCR_RW);
42 }
43
44 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45 {
46         vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
47         if (is_kernel_in_hyp_mode())
48                 vcpu->arch.hcr_el2 |= HCR_E2H;
49         if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
50                 /* route synchronous external abort exceptions to EL2 */
51                 vcpu->arch.hcr_el2 |= HCR_TEA;
52                 /* trap error record accesses */
53                 vcpu->arch.hcr_el2 |= HCR_TERR;
54         }
55
56         if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
57                 vcpu->arch.hcr_el2 |= HCR_FWB;
58         } else {
59                 /*
60                  * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
61                  * get set in SCTLR_EL1 such that we can detect when the guest
62                  * MMU gets turned on and do the necessary cache maintenance
63                  * then.
64                  */
65                 vcpu->arch.hcr_el2 |= HCR_TVM;
66         }
67
68         if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
69                 vcpu->arch.hcr_el2 &= ~HCR_RW;
70
71         /*
72          * TID3: trap feature register accesses that we virtualise.
73          * For now this is conditional, since no AArch32 feature regs
74          * are currently virtualised.
75          */
76         if (!vcpu_el1_is_32bit(vcpu))
77                 vcpu->arch.hcr_el2 |= HCR_TID3;
78
79         if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
80             vcpu_el1_is_32bit(vcpu))
81                 vcpu->arch.hcr_el2 |= HCR_TID2;
82 }
83
84 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
85 {
86         return (unsigned long *)&vcpu->arch.hcr_el2;
87 }
88
89 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
90 {
91         vcpu->arch.hcr_el2 &= ~HCR_TWE;
92         if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
93             vcpu->kvm->arch.vgic.nassgireq)
94                 vcpu->arch.hcr_el2 &= ~HCR_TWI;
95         else
96                 vcpu->arch.hcr_el2 |= HCR_TWI;
97 }
98
99 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
100 {
101         vcpu->arch.hcr_el2 |= HCR_TWE;
102         vcpu->arch.hcr_el2 |= HCR_TWI;
103 }
104
105 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
106 {
107         vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
108 }
109
110 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
111 {
112         vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
113 }
114
115 static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
116 {
117         if (vcpu_has_ptrauth(vcpu))
118                 vcpu_ptrauth_disable(vcpu);
119 }
120
121 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
122 {
123         return vcpu->arch.vsesr_el2;
124 }
125
126 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
127 {
128         vcpu->arch.vsesr_el2 = vsesr;
129 }
130
131 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
132 {
133         return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
134 }
135
136 static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
137 {
138         return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
139 }
140
141 static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
142 {
143         if (vcpu->arch.sysregs_loaded_on_cpu)
144                 return read_sysreg_el1(SYS_ELR);
145         else
146                 return *__vcpu_elr_el1(vcpu);
147 }
148
149 static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
150 {
151         if (vcpu->arch.sysregs_loaded_on_cpu)
152                 write_sysreg_el1(v, SYS_ELR);
153         else
154                 *__vcpu_elr_el1(vcpu) = v;
155 }
156
157 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
158 {
159         return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
160 }
161
162 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
163 {
164         return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
165 }
166
167 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
168 {
169         if (vcpu_mode_is_32bit(vcpu))
170                 return kvm_condition_valid32(vcpu);
171
172         return true;
173 }
174
175 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
176 {
177         *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
178 }
179
180 /*
181  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
182  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
183  * AArch32 with banked registers.
184  */
185 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
186                                          u8 reg_num)
187 {
188         return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
189 }
190
191 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
192                                 unsigned long val)
193 {
194         if (reg_num != 31)
195                 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
196 }
197
198 static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
199 {
200         if (vcpu_mode_is_32bit(vcpu))
201                 return vcpu_read_spsr32(vcpu);
202
203         if (vcpu->arch.sysregs_loaded_on_cpu)
204                 return read_sysreg_el1(SYS_SPSR);
205         else
206                 return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
207 }
208
209 static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
210 {
211         if (vcpu_mode_is_32bit(vcpu)) {
212                 vcpu_write_spsr32(vcpu, v);
213                 return;
214         }
215
216         if (vcpu->arch.sysregs_loaded_on_cpu)
217                 write_sysreg_el1(v, SYS_SPSR);
218         else
219                 vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
220 }
221
222 /*
223  * The layout of SPSR for an AArch32 state is different when observed from an
224  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
225  * view given an AArch64 view.
226  *
227  * In ARM DDI 0487E.a see:
228  *
229  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
230  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
231  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
232  *
233  * Which show the following differences:
234  *
235  * | Bit | AA64 | AA32 | Notes                       |
236  * +-----+------+------+-----------------------------|
237  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
238  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
239  *
240  * ... and all other bits are (currently) common.
241  */
242 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
243 {
244         const unsigned long overlap = BIT(24) | BIT(21);
245         unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
246
247         spsr &= ~overlap;
248
249         spsr |= dit << 21;
250
251         return spsr;
252 }
253
254 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
255 {
256         u32 mode;
257
258         if (vcpu_mode_is_32bit(vcpu)) {
259                 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
260                 return mode > PSR_AA32_MODE_USR;
261         }
262
263         mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
264
265         return mode != PSR_MODE_EL0t;
266 }
267
268 static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
269 {
270         return vcpu->arch.fault.esr_el2;
271 }
272
273 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
274 {
275         u32 esr = kvm_vcpu_get_hsr(vcpu);
276
277         if (esr & ESR_ELx_CV)
278                 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
279
280         return -1;
281 }
282
283 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
284 {
285         return vcpu->arch.fault.far_el2;
286 }
287
288 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
289 {
290         return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
291 }
292
293 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
294 {
295         return vcpu->arch.fault.disr_el1;
296 }
297
298 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
299 {
300         return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
301 }
302
303 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
304 {
305         return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
306 }
307
308 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
309 {
310         return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
311 }
312
313 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
314 {
315         return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
316 }
317
318 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
319 {
320         return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
321 }
322
323 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
324 {
325         return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
326 }
327
328 static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
329 {
330         return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
331 }
332
333 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
334 {
335         return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
336                 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
337 }
338
339 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
340 {
341         return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
342 }
343
344 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
345 {
346         return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
347 }
348
349 /* This one is not specific to Data Abort */
350 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
351 {
352         return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
353 }
354
355 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
356 {
357         return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
358 }
359
360 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
361 {
362         return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
363 }
364
365 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
366 {
367         return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
368 }
369
370 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
371 {
372         return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
373 }
374
375 static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
376 {
377         switch (kvm_vcpu_trap_get_fault(vcpu)) {
378         case FSC_SEA:
379         case FSC_SEA_TTW0:
380         case FSC_SEA_TTW1:
381         case FSC_SEA_TTW2:
382         case FSC_SEA_TTW3:
383         case FSC_SECC:
384         case FSC_SECC_TTW0:
385         case FSC_SECC_TTW1:
386         case FSC_SECC_TTW2:
387         case FSC_SECC_TTW3:
388                 return true;
389         default:
390                 return false;
391         }
392 }
393
394 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
395 {
396         u32 esr = kvm_vcpu_get_hsr(vcpu);
397         return ESR_ELx_SYS64_ISS_RT(esr);
398 }
399
400 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
401 {
402         if (kvm_vcpu_trap_is_iabt(vcpu))
403                 return false;
404
405         return kvm_vcpu_dabt_iswrite(vcpu);
406 }
407
408 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
409 {
410         return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
411 }
412
413 static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
414 {
415         return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
416 }
417
418 static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
419                                                       bool flag)
420 {
421         if (flag)
422                 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
423         else
424                 vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
425 }
426
427 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
428 {
429         if (vcpu_mode_is_32bit(vcpu)) {
430                 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
431         } else {
432                 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
433                 sctlr |= (1 << 25);
434                 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
435         }
436 }
437
438 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
439 {
440         if (vcpu_mode_is_32bit(vcpu))
441                 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
442
443         return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
444 }
445
446 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
447                                                     unsigned long data,
448                                                     unsigned int len)
449 {
450         if (kvm_vcpu_is_be(vcpu)) {
451                 switch (len) {
452                 case 1:
453                         return data & 0xff;
454                 case 2:
455                         return be16_to_cpu(data & 0xffff);
456                 case 4:
457                         return be32_to_cpu(data & 0xffffffff);
458                 default:
459                         return be64_to_cpu(data);
460                 }
461         } else {
462                 switch (len) {
463                 case 1:
464                         return data & 0xff;
465                 case 2:
466                         return le16_to_cpu(data & 0xffff);
467                 case 4:
468                         return le32_to_cpu(data & 0xffffffff);
469                 default:
470                         return le64_to_cpu(data);
471                 }
472         }
473
474         return data;            /* Leave LE untouched */
475 }
476
477 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
478                                                     unsigned long data,
479                                                     unsigned int len)
480 {
481         if (kvm_vcpu_is_be(vcpu)) {
482                 switch (len) {
483                 case 1:
484                         return data & 0xff;
485                 case 2:
486                         return cpu_to_be16(data & 0xffff);
487                 case 4:
488                         return cpu_to_be32(data & 0xffffffff);
489                 default:
490                         return cpu_to_be64(data);
491                 }
492         } else {
493                 switch (len) {
494                 case 1:
495                         return data & 0xff;
496                 case 2:
497                         return cpu_to_le16(data & 0xffff);
498                 case 4:
499                         return cpu_to_le32(data & 0xffffffff);
500                 default:
501                         return cpu_to_le64(data);
502                 }
503         }
504
505         return data;            /* Leave LE untouched */
506 }
507
508 static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
509 {
510         if (vcpu_mode_is_32bit(vcpu))
511                 kvm_skip_instr32(vcpu, is_wide_instr);
512         else
513                 *vcpu_pc(vcpu) += 4;
514
515         /* advance the singlestep state machine */
516         *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
517 }
518
519 /*
520  * Skip an instruction which has been emulated at hyp while most guest sysregs
521  * are live.
522  */
523 static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
524 {
525         *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
526         vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
527
528         kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
529
530         write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR);
531         write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
532 }
533
534 #endif /* __ARM64_KVM_EMULATE_H__ */