Merge tag 'drm-misc-next-fixes-2021-07-01' of git://anongit.freedesktop.org/drm/drm...
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / kvm_emulate.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/kvm_emulate.h
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13
14 #include <linux/kvm_host.h>
15
16 #include <asm/debug-monitors.h>
17 #include <asm/esr.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/ptrace.h>
21 #include <asm/cputype.h>
22 #include <asm/virt.h>
23
24 #define CURRENT_EL_SP_EL0_VECTOR        0x0
25 #define CURRENT_EL_SP_ELx_VECTOR        0x200
26 #define LOWER_EL_AArch64_VECTOR         0x400
27 #define LOWER_EL_AArch32_VECTOR         0x600
28
29 enum exception_type {
30         except_type_sync        = 0,
31         except_type_irq         = 0x80,
32         except_type_fiq         = 0x100,
33         except_type_serror      = 0x180,
34 };
35
36 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
37 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
38
39 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
40 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
43
44 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
45 {
46         return !(vcpu->arch.hcr_el2 & HCR_RW);
47 }
48
49 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
50 {
51         vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
52         if (is_kernel_in_hyp_mode())
53                 vcpu->arch.hcr_el2 |= HCR_E2H;
54         if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
55                 /* route synchronous external abort exceptions to EL2 */
56                 vcpu->arch.hcr_el2 |= HCR_TEA;
57                 /* trap error record accesses */
58                 vcpu->arch.hcr_el2 |= HCR_TERR;
59         }
60
61         if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
62                 vcpu->arch.hcr_el2 |= HCR_FWB;
63         } else {
64                 /*
65                  * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
66                  * get set in SCTLR_EL1 such that we can detect when the guest
67                  * MMU gets turned on and do the necessary cache maintenance
68                  * then.
69                  */
70                 vcpu->arch.hcr_el2 |= HCR_TVM;
71         }
72
73         if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
74                 vcpu->arch.hcr_el2 &= ~HCR_RW;
75
76         /*
77          * TID3: trap feature register accesses that we virtualise.
78          * For now this is conditional, since no AArch32 feature regs
79          * are currently virtualised.
80          */
81         if (!vcpu_el1_is_32bit(vcpu))
82                 vcpu->arch.hcr_el2 |= HCR_TID3;
83
84         if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
85             vcpu_el1_is_32bit(vcpu))
86                 vcpu->arch.hcr_el2 |= HCR_TID2;
87 }
88
89 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
90 {
91         return (unsigned long *)&vcpu->arch.hcr_el2;
92 }
93
94 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
95 {
96         vcpu->arch.hcr_el2 &= ~HCR_TWE;
97         if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
98             vcpu->kvm->arch.vgic.nassgireq)
99                 vcpu->arch.hcr_el2 &= ~HCR_TWI;
100         else
101                 vcpu->arch.hcr_el2 |= HCR_TWI;
102 }
103
104 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
105 {
106         vcpu->arch.hcr_el2 |= HCR_TWE;
107         vcpu->arch.hcr_el2 |= HCR_TWI;
108 }
109
110 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
111 {
112         vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
113 }
114
115 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
116 {
117         vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
118 }
119
120 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
121 {
122         return vcpu->arch.vsesr_el2;
123 }
124
125 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
126 {
127         vcpu->arch.vsesr_el2 = vsesr;
128 }
129
130 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
131 {
132         return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
133 }
134
135 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
136 {
137         return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
138 }
139
140 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
141 {
142         return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
143 }
144
145 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
146 {
147         if (vcpu_mode_is_32bit(vcpu))
148                 return kvm_condition_valid32(vcpu);
149
150         return true;
151 }
152
153 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
154 {
155         *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
156 }
157
158 /*
159  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
160  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
161  * AArch32 with banked registers.
162  */
163 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
164                                          u8 reg_num)
165 {
166         return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
167 }
168
169 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
170                                 unsigned long val)
171 {
172         if (reg_num != 31)
173                 vcpu_gp_regs(vcpu)->regs[reg_num] = val;
174 }
175
176 /*
177  * The layout of SPSR for an AArch32 state is different when observed from an
178  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
179  * view given an AArch64 view.
180  *
181  * In ARM DDI 0487E.a see:
182  *
183  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
184  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
185  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
186  *
187  * Which show the following differences:
188  *
189  * | Bit | AA64 | AA32 | Notes                       |
190  * +-----+------+------+-----------------------------|
191  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
192  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
193  *
194  * ... and all other bits are (currently) common.
195  */
196 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
197 {
198         const unsigned long overlap = BIT(24) | BIT(21);
199         unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
200
201         spsr &= ~overlap;
202
203         spsr |= dit << 21;
204
205         return spsr;
206 }
207
208 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
209 {
210         u32 mode;
211
212         if (vcpu_mode_is_32bit(vcpu)) {
213                 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
214                 return mode > PSR_AA32_MODE_USR;
215         }
216
217         mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
218
219         return mode != PSR_MODE_EL0t;
220 }
221
222 static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
223 {
224         return vcpu->arch.fault.esr_el2;
225 }
226
227 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
228 {
229         u32 esr = kvm_vcpu_get_esr(vcpu);
230
231         if (esr & ESR_ELx_CV)
232                 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
233
234         return -1;
235 }
236
237 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
238 {
239         return vcpu->arch.fault.far_el2;
240 }
241
242 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
243 {
244         return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
245 }
246
247 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
248 {
249         return vcpu->arch.fault.disr_el1;
250 }
251
252 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
253 {
254         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
255 }
256
257 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
258 {
259         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
260 }
261
262 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
263 {
264         return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
265 }
266
267 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
268 {
269         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
270 }
271
272 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
273 {
274         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
275 }
276
277 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
278 {
279         return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
280 }
281
282 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
283 {
284         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
285 }
286
287 /* Always check for S1PTW *before* using this. */
288 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
289 {
290         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
291 }
292
293 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
294 {
295         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
296 }
297
298 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
299 {
300         return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
301 }
302
303 /* This one is not specific to Data Abort */
304 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
305 {
306         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
307 }
308
309 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
310 {
311         return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
312 }
313
314 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
315 {
316         return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
317 }
318
319 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
320 {
321         return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
322 }
323
324 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
325 {
326         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
327 }
328
329 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
330 {
331         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
332 }
333
334 static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
335 {
336         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
337 }
338
339 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
340 {
341         switch (kvm_vcpu_trap_get_fault(vcpu)) {
342         case FSC_SEA:
343         case FSC_SEA_TTW0:
344         case FSC_SEA_TTW1:
345         case FSC_SEA_TTW2:
346         case FSC_SEA_TTW3:
347         case FSC_SECC:
348         case FSC_SECC_TTW0:
349         case FSC_SECC_TTW1:
350         case FSC_SECC_TTW2:
351         case FSC_SECC_TTW3:
352                 return true;
353         default:
354                 return false;
355         }
356 }
357
358 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
359 {
360         u32 esr = kvm_vcpu_get_esr(vcpu);
361         return ESR_ELx_SYS64_ISS_RT(esr);
362 }
363
364 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
365 {
366         if (kvm_vcpu_abt_iss1tw(vcpu))
367                 return true;
368
369         if (kvm_vcpu_trap_is_iabt(vcpu))
370                 return false;
371
372         return kvm_vcpu_dabt_iswrite(vcpu);
373 }
374
375 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
376 {
377         return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
378 }
379
380 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
381 {
382         if (vcpu_mode_is_32bit(vcpu)) {
383                 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
384         } else {
385                 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
386                 sctlr |= (1 << 25);
387                 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
388         }
389 }
390
391 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
392 {
393         if (vcpu_mode_is_32bit(vcpu))
394                 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
395
396         return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
397 }
398
399 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
400                                                     unsigned long data,
401                                                     unsigned int len)
402 {
403         if (kvm_vcpu_is_be(vcpu)) {
404                 switch (len) {
405                 case 1:
406                         return data & 0xff;
407                 case 2:
408                         return be16_to_cpu(data & 0xffff);
409                 case 4:
410                         return be32_to_cpu(data & 0xffffffff);
411                 default:
412                         return be64_to_cpu(data);
413                 }
414         } else {
415                 switch (len) {
416                 case 1:
417                         return data & 0xff;
418                 case 2:
419                         return le16_to_cpu(data & 0xffff);
420                 case 4:
421                         return le32_to_cpu(data & 0xffffffff);
422                 default:
423                         return le64_to_cpu(data);
424                 }
425         }
426
427         return data;            /* Leave LE untouched */
428 }
429
430 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
431                                                     unsigned long data,
432                                                     unsigned int len)
433 {
434         if (kvm_vcpu_is_be(vcpu)) {
435                 switch (len) {
436                 case 1:
437                         return data & 0xff;
438                 case 2:
439                         return cpu_to_be16(data & 0xffff);
440                 case 4:
441                         return cpu_to_be32(data & 0xffffffff);
442                 default:
443                         return cpu_to_be64(data);
444                 }
445         } else {
446                 switch (len) {
447                 case 1:
448                         return data & 0xff;
449                 case 2:
450                         return cpu_to_le16(data & 0xffff);
451                 case 4:
452                         return cpu_to_le32(data & 0xffffffff);
453                 default:
454                         return cpu_to_le64(data);
455                 }
456         }
457
458         return data;            /* Leave LE untouched */
459 }
460
461 static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
462 {
463         vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
464 }
465
466 static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
467 {
468         return test_bit(feature, vcpu->arch.features);
469 }
470
471 #endif /* __ARM64_KVM_EMULATE_H__ */