Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / kvm_emulate.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/kvm_emulate.h
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13
14 #include <linux/kvm_host.h>
15
16 #include <asm/debug-monitors.h>
17 #include <asm/esr.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/ptrace.h>
21 #include <asm/cputype.h>
22 #include <asm/virt.h>
23
24 #define CURRENT_EL_SP_EL0_VECTOR        0x0
25 #define CURRENT_EL_SP_ELx_VECTOR        0x200
26 #define LOWER_EL_AArch64_VECTOR         0x400
27 #define LOWER_EL_AArch32_VECTOR         0x600
28
29 enum exception_type {
30         except_type_sync        = 0,
31         except_type_irq         = 0x80,
32         except_type_fiq         = 0x100,
33         except_type_serror      = 0x180,
34 };
35
36 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
37 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
38
39 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
40 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
43 void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
44
45 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
46
47 #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
48 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
49 {
50         return !(vcpu->arch.hcr_el2 & HCR_RW);
51 }
52 #else
53 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
54 {
55         struct kvm *kvm = vcpu->kvm;
56
57         WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
58                                &kvm->arch.flags));
59
60         return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
61 }
62 #endif
63
64 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
65 {
66         vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
67         if (is_kernel_in_hyp_mode())
68                 vcpu->arch.hcr_el2 |= HCR_E2H;
69         if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
70                 /* route synchronous external abort exceptions to EL2 */
71                 vcpu->arch.hcr_el2 |= HCR_TEA;
72                 /* trap error record accesses */
73                 vcpu->arch.hcr_el2 |= HCR_TERR;
74         }
75
76         if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
77                 vcpu->arch.hcr_el2 |= HCR_FWB;
78         } else {
79                 /*
80                  * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
81                  * get set in SCTLR_EL1 such that we can detect when the guest
82                  * MMU gets turned on and do the necessary cache maintenance
83                  * then.
84                  */
85                 vcpu->arch.hcr_el2 |= HCR_TVM;
86         }
87
88         if (vcpu_el1_is_32bit(vcpu))
89                 vcpu->arch.hcr_el2 &= ~HCR_RW;
90         else
91                 /*
92                  * TID3: trap feature register accesses that we virtualise.
93                  * For now this is conditional, since no AArch32 feature regs
94                  * are currently virtualised.
95                  */
96                 vcpu->arch.hcr_el2 |= HCR_TID3;
97
98         if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
99             vcpu_el1_is_32bit(vcpu))
100                 vcpu->arch.hcr_el2 |= HCR_TID2;
101
102         if (kvm_has_mte(vcpu->kvm))
103                 vcpu->arch.hcr_el2 |= HCR_ATA;
104 }
105
106 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
107 {
108         return (unsigned long *)&vcpu->arch.hcr_el2;
109 }
110
111 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
112 {
113         vcpu->arch.hcr_el2 &= ~HCR_TWE;
114         if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
115             vcpu->kvm->arch.vgic.nassgireq)
116                 vcpu->arch.hcr_el2 &= ~HCR_TWI;
117         else
118                 vcpu->arch.hcr_el2 |= HCR_TWI;
119 }
120
121 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
122 {
123         vcpu->arch.hcr_el2 |= HCR_TWE;
124         vcpu->arch.hcr_el2 |= HCR_TWI;
125 }
126
127 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
128 {
129         vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
130 }
131
132 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
133 {
134         vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
135 }
136
137 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
138 {
139         return vcpu->arch.vsesr_el2;
140 }
141
142 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
143 {
144         vcpu->arch.vsesr_el2 = vsesr;
145 }
146
147 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
148 {
149         return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
150 }
151
152 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
153 {
154         return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
155 }
156
157 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
158 {
159         return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
160 }
161
162 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
163 {
164         if (vcpu_mode_is_32bit(vcpu))
165                 return kvm_condition_valid32(vcpu);
166
167         return true;
168 }
169
170 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
171 {
172         *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
173 }
174
175 /*
176  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
177  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
178  * AArch32 with banked registers.
179  */
180 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
181                                          u8 reg_num)
182 {
183         return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
184 }
185
186 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
187                                 unsigned long val)
188 {
189         if (reg_num != 31)
190                 vcpu_gp_regs(vcpu)->regs[reg_num] = val;
191 }
192
193 /*
194  * The layout of SPSR for an AArch32 state is different when observed from an
195  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
196  * view given an AArch64 view.
197  *
198  * In ARM DDI 0487E.a see:
199  *
200  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
201  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
202  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
203  *
204  * Which show the following differences:
205  *
206  * | Bit | AA64 | AA32 | Notes                       |
207  * +-----+------+------+-----------------------------|
208  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
209  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
210  *
211  * ... and all other bits are (currently) common.
212  */
213 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
214 {
215         const unsigned long overlap = BIT(24) | BIT(21);
216         unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
217
218         spsr &= ~overlap;
219
220         spsr |= dit << 21;
221
222         return spsr;
223 }
224
225 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
226 {
227         u32 mode;
228
229         if (vcpu_mode_is_32bit(vcpu)) {
230                 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
231                 return mode > PSR_AA32_MODE_USR;
232         }
233
234         mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
235
236         return mode != PSR_MODE_EL0t;
237 }
238
239 static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
240 {
241         return vcpu->arch.fault.esr_el2;
242 }
243
244 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
245 {
246         u64 esr = kvm_vcpu_get_esr(vcpu);
247
248         if (esr & ESR_ELx_CV)
249                 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
250
251         return -1;
252 }
253
254 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
255 {
256         return vcpu->arch.fault.far_el2;
257 }
258
259 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
260 {
261         return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
262 }
263
264 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
265 {
266         return vcpu->arch.fault.disr_el1;
267 }
268
269 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
270 {
271         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
272 }
273
274 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
275 {
276         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
277 }
278
279 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
280 {
281         return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
282 }
283
284 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
285 {
286         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
287 }
288
289 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
290 {
291         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
292 }
293
294 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
295 {
296         return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
297 }
298
299 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
300 {
301         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
302 }
303
304 /* Always check for S1PTW *before* using this. */
305 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
306 {
307         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
308 }
309
310 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
311 {
312         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
313 }
314
315 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
316 {
317         return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
318 }
319
320 /* This one is not specific to Data Abort */
321 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
322 {
323         return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
324 }
325
326 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
327 {
328         return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
329 }
330
331 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
332 {
333         return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
334 }
335
336 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
337 {
338         return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
339 }
340
341 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
342 {
343         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
344 }
345
346 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
347 {
348         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
349 }
350
351 static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
352 {
353         return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
354 }
355
356 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
357 {
358         switch (kvm_vcpu_trap_get_fault(vcpu)) {
359         case FSC_SEA:
360         case FSC_SEA_TTW0:
361         case FSC_SEA_TTW1:
362         case FSC_SEA_TTW2:
363         case FSC_SEA_TTW3:
364         case FSC_SECC:
365         case FSC_SECC_TTW0:
366         case FSC_SECC_TTW1:
367         case FSC_SECC_TTW2:
368         case FSC_SECC_TTW3:
369                 return true;
370         default:
371                 return false;
372         }
373 }
374
375 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
376 {
377         u64 esr = kvm_vcpu_get_esr(vcpu);
378         return ESR_ELx_SYS64_ISS_RT(esr);
379 }
380
381 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
382 {
383         if (kvm_vcpu_abt_iss1tw(vcpu))
384                 return true;
385
386         if (kvm_vcpu_trap_is_iabt(vcpu))
387                 return false;
388
389         return kvm_vcpu_dabt_iswrite(vcpu);
390 }
391
392 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
393 {
394         return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
395 }
396
397 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
398 {
399         if (vcpu_mode_is_32bit(vcpu)) {
400                 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
401         } else {
402                 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
403                 sctlr |= SCTLR_ELx_EE;
404                 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
405         }
406 }
407
408 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
409 {
410         if (vcpu_mode_is_32bit(vcpu))
411                 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
412
413         if (vcpu_mode_priv(vcpu))
414                 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
415         else
416                 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
417 }
418
419 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
420                                                     unsigned long data,
421                                                     unsigned int len)
422 {
423         if (kvm_vcpu_is_be(vcpu)) {
424                 switch (len) {
425                 case 1:
426                         return data & 0xff;
427                 case 2:
428                         return be16_to_cpu(data & 0xffff);
429                 case 4:
430                         return be32_to_cpu(data & 0xffffffff);
431                 default:
432                         return be64_to_cpu(data);
433                 }
434         } else {
435                 switch (len) {
436                 case 1:
437                         return data & 0xff;
438                 case 2:
439                         return le16_to_cpu(data & 0xffff);
440                 case 4:
441                         return le32_to_cpu(data & 0xffffffff);
442                 default:
443                         return le64_to_cpu(data);
444                 }
445         }
446
447         return data;            /* Leave LE untouched */
448 }
449
450 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
451                                                     unsigned long data,
452                                                     unsigned int len)
453 {
454         if (kvm_vcpu_is_be(vcpu)) {
455                 switch (len) {
456                 case 1:
457                         return data & 0xff;
458                 case 2:
459                         return cpu_to_be16(data & 0xffff);
460                 case 4:
461                         return cpu_to_be32(data & 0xffffffff);
462                 default:
463                         return cpu_to_be64(data);
464                 }
465         } else {
466                 switch (len) {
467                 case 1:
468                         return data & 0xff;
469                 case 2:
470                         return cpu_to_le16(data & 0xffff);
471                 case 4:
472                         return cpu_to_le32(data & 0xffffffff);
473                 default:
474                         return cpu_to_le64(data);
475                 }
476         }
477
478         return data;            /* Leave LE untouched */
479 }
480
481 static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
482 {
483         vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
484 }
485
486 static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
487 {
488         return test_bit(feature, vcpu->arch.features);
489 }
490
491 #endif /* __ARM64_KVM_EMULATE_H__ */