1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/guest.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #include <linux/bits.h>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/nospec.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/stddef.h>
18 #include <linux/string.h>
19 #include <linux/vmalloc.h>
21 #include <kvm/arm_psci.h>
22 #include <asm/cputype.h>
23 #include <linux/uaccess.h>
24 #include <asm/fpsimd.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_coproc.h>
28 #include <asm/sigcontext.h>
32 struct kvm_stats_debugfs_item debugfs_entries[] = {
33 VCPU_STAT("halt_successful_poll", halt_successful_poll),
34 VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
35 VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
36 VCPU_STAT("halt_wakeup", halt_wakeup),
37 VCPU_STAT("hvc_exit_stat", hvc_exit_stat),
38 VCPU_STAT("wfe_exit_stat", wfe_exit_stat),
39 VCPU_STAT("wfi_exit_stat", wfi_exit_stat),
40 VCPU_STAT("mmio_exit_user", mmio_exit_user),
41 VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
42 VCPU_STAT("exits", exits),
43 VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
44 VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
48 static bool core_reg_offset_is_vreg(u64 off)
50 return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
51 off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
54 static u64 core_reg_offset_from_id(u64 id)
56 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
59 static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
64 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
65 KVM_REG_ARM_CORE_REG(regs.regs[30]):
66 case KVM_REG_ARM_CORE_REG(regs.sp):
67 case KVM_REG_ARM_CORE_REG(regs.pc):
68 case KVM_REG_ARM_CORE_REG(regs.pstate):
69 case KVM_REG_ARM_CORE_REG(sp_el1):
70 case KVM_REG_ARM_CORE_REG(elr_el1):
71 case KVM_REG_ARM_CORE_REG(spsr[0]) ...
72 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
76 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
77 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
78 size = sizeof(__uint128_t);
81 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
82 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
90 if (!IS_ALIGNED(off, size / sizeof(__u32)))
94 * The KVM_REG_ARM64_SVE regs must be used instead of
95 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
98 if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
104 static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
106 u64 off = core_reg_offset_from_id(reg->id);
107 int size = core_reg_size_from_offset(vcpu, off);
112 if (KVM_REG_SIZE(reg->id) != size)
116 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
117 KVM_REG_ARM_CORE_REG(regs.regs[30]):
118 off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
120 return &vcpu->arch.ctxt.regs.regs[off];
122 case KVM_REG_ARM_CORE_REG(regs.sp):
123 return &vcpu->arch.ctxt.regs.sp;
125 case KVM_REG_ARM_CORE_REG(regs.pc):
126 return &vcpu->arch.ctxt.regs.pc;
128 case KVM_REG_ARM_CORE_REG(regs.pstate):
129 return &vcpu->arch.ctxt.regs.pstate;
131 case KVM_REG_ARM_CORE_REG(sp_el1):
132 return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
134 case KVM_REG_ARM_CORE_REG(elr_el1):
135 return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
137 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
138 return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
140 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
141 return &vcpu->arch.ctxt.spsr_abt;
143 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
144 return &vcpu->arch.ctxt.spsr_und;
146 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
147 return &vcpu->arch.ctxt.spsr_irq;
149 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
150 return &vcpu->arch.ctxt.spsr_fiq;
152 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
153 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
154 off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
156 return &vcpu->arch.ctxt.fp_regs.vregs[off];
158 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
159 return &vcpu->arch.ctxt.fp_regs.fpsr;
161 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
162 return &vcpu->arch.ctxt.fp_regs.fpcr;
169 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
172 * Because the kvm_regs structure is a mix of 32, 64 and
173 * 128bit fields, we index it as if it was a 32bit
174 * array. Hence below, nr_regs is the number of entries, and
175 * off the index in the "array".
177 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
178 int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
182 /* Our ID is an index into the kvm_regs struct. */
183 off = core_reg_offset_from_id(reg->id);
184 if (off >= nr_regs ||
185 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
188 addr = core_reg_addr(vcpu, reg);
192 if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
198 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
200 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
201 int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
203 void *valp = &tmp, *addr;
207 /* Our ID is an index into the kvm_regs struct. */
208 off = core_reg_offset_from_id(reg->id);
209 if (off >= nr_regs ||
210 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
213 addr = core_reg_addr(vcpu, reg);
217 if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
220 if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
225 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
226 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
228 case PSR_AA32_MODE_USR:
229 if (!system_supports_32bit_el0())
232 case PSR_AA32_MODE_FIQ:
233 case PSR_AA32_MODE_IRQ:
234 case PSR_AA32_MODE_SVC:
235 case PSR_AA32_MODE_ABT:
236 case PSR_AA32_MODE_UND:
237 if (!vcpu_el1_is_32bit(vcpu))
243 if (vcpu_el1_is_32bit(vcpu))
252 memcpy(addr, valp, KVM_REG_SIZE(reg->id));
254 if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
257 for (i = 0; i < 16; i++)
258 *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
264 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
265 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
266 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
268 static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
270 unsigned int max_vq, vq;
271 u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
273 if (!vcpu_has_sve(vcpu))
276 if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
279 memset(vqs, 0, sizeof(vqs));
281 max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
282 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
283 if (sve_vq_available(vq))
284 vqs[vq_word(vq)] |= vq_mask(vq);
286 if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
292 static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
294 unsigned int max_vq, vq;
295 u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
297 if (!vcpu_has_sve(vcpu))
300 if (kvm_arm_vcpu_sve_finalized(vcpu))
301 return -EPERM; /* too late! */
303 if (WARN_ON(vcpu->arch.sve_state))
306 if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
310 for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
311 if (vq_present(vqs, vq))
314 if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
318 * Vector lengths supported by the host can't currently be
319 * hidden from the guest individually: instead we can only set a
320 * maximum via ZCR_EL2.LEN. So, make sure the available vector
321 * lengths match the set requested exactly up to the requested
324 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
325 if (vq_present(vqs, vq) != sve_vq_available(vq))
328 /* Can't run with no vector lengths at all: */
329 if (max_vq < SVE_VQ_MIN)
332 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
333 vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
338 #define SVE_REG_SLICE_SHIFT 0
339 #define SVE_REG_SLICE_BITS 5
340 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
341 #define SVE_REG_ID_BITS 5
343 #define SVE_REG_SLICE_MASK \
344 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
346 #define SVE_REG_ID_MASK \
347 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
349 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
351 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
352 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
355 * Number of register slices required to cover each whole SVE register.
356 * NOTE: Only the first slice every exists, for now.
357 * If you are tempted to modify this, you must also rework sve_reg_to_region()
360 #define vcpu_sve_slices(vcpu) 1
362 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
363 struct sve_state_reg_region {
364 unsigned int koffset; /* offset into sve_state in kernel memory */
365 unsigned int klen; /* length in kernel memory */
366 unsigned int upad; /* extra trailing padding in user memory */
370 * Validate SVE register ID and get sanitised bounds for user/kernel SVE
373 static int sve_reg_to_region(struct sve_state_reg_region *region,
374 struct kvm_vcpu *vcpu,
375 const struct kvm_one_reg *reg)
377 /* reg ID ranges for Z- registers */
378 const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
379 const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
382 /* reg ID ranges for P- registers and FFR (which are contiguous) */
383 const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
384 const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
387 unsigned int reg_num;
389 unsigned int reqoffset, reqlen; /* User-requested offset and length */
390 unsigned int maxlen; /* Maximum permitted length */
392 size_t sve_state_size;
394 const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
397 /* Verify that the P-regs and FFR really do have contiguous IDs: */
398 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
400 /* Verify that we match the UAPI header: */
401 BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
403 reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
405 if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
406 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
409 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
411 reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
413 reqlen = KVM_SVE_ZREG_SIZE;
414 maxlen = SVE_SIG_ZREG_SIZE(vq);
415 } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
416 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
419 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
421 reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
423 reqlen = KVM_SVE_PREG_SIZE;
424 maxlen = SVE_SIG_PREG_SIZE(vq);
429 sve_state_size = vcpu_sve_state_size(vcpu);
430 if (WARN_ON(!sve_state_size))
433 region->koffset = array_index_nospec(reqoffset, sve_state_size);
434 region->klen = min(maxlen, reqlen);
435 region->upad = reqlen - region->klen;
440 static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
443 struct sve_state_reg_region region;
444 char __user *uptr = (char __user *)reg->addr;
446 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
447 if (reg->id == KVM_REG_ARM64_SVE_VLS)
448 return get_sve_vls(vcpu, reg);
450 /* Try to interpret reg ID as an architectural SVE register... */
451 ret = sve_reg_to_region(®ion, vcpu, reg);
455 if (!kvm_arm_vcpu_sve_finalized(vcpu))
458 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
460 clear_user(uptr + region.klen, region.upad))
466 static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
469 struct sve_state_reg_region region;
470 const char __user *uptr = (const char __user *)reg->addr;
472 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
473 if (reg->id == KVM_REG_ARM64_SVE_VLS)
474 return set_sve_vls(vcpu, reg);
476 /* Try to interpret reg ID as an architectural SVE register... */
477 ret = sve_reg_to_region(®ion, vcpu, reg);
481 if (!kvm_arm_vcpu_sve_finalized(vcpu))
484 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
491 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
496 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
501 static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
502 u64 __user *uindices)
507 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
508 u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
509 int size = core_reg_size_from_offset(vcpu, i);
516 reg |= KVM_REG_SIZE_U32;
520 reg |= KVM_REG_SIZE_U64;
523 case sizeof(__uint128_t):
524 reg |= KVM_REG_SIZE_U128;
533 if (put_user(reg, uindices))
544 static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
546 return copy_core_reg_indices(vcpu, NULL);
550 * ARM64 versions of the TIMER registers, always available on arm64
553 #define NUM_TIMER_REGS 3
555 static bool is_timer_reg(u64 index)
558 case KVM_REG_ARM_TIMER_CTL:
559 case KVM_REG_ARM_TIMER_CNT:
560 case KVM_REG_ARM_TIMER_CVAL:
566 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
568 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
571 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
574 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
580 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
582 void __user *uaddr = (void __user *)(long)reg->addr;
586 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
590 return kvm_arm_timer_set_reg(vcpu, reg->id, val);
593 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
595 void __user *uaddr = (void __user *)(long)reg->addr;
598 val = kvm_arm_timer_get_reg(vcpu, reg->id);
599 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
602 static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
604 const unsigned int slices = vcpu_sve_slices(vcpu);
606 if (!vcpu_has_sve(vcpu))
609 /* Policed by KVM_GET_REG_LIST: */
610 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
612 return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
613 + 1; /* KVM_REG_ARM64_SVE_VLS */
616 static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
617 u64 __user *uindices)
619 const unsigned int slices = vcpu_sve_slices(vcpu);
624 if (!vcpu_has_sve(vcpu))
627 /* Policed by KVM_GET_REG_LIST: */
628 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
631 * Enumerate this first, so that userspace can save/restore in
632 * the order reported by KVM_GET_REG_LIST:
634 reg = KVM_REG_ARM64_SVE_VLS;
635 if (put_user(reg, uindices++))
639 for (i = 0; i < slices; i++) {
640 for (n = 0; n < SVE_NUM_ZREGS; n++) {
641 reg = KVM_REG_ARM64_SVE_ZREG(n, i);
642 if (put_user(reg, uindices++))
647 for (n = 0; n < SVE_NUM_PREGS; n++) {
648 reg = KVM_REG_ARM64_SVE_PREG(n, i);
649 if (put_user(reg, uindices++))
654 reg = KVM_REG_ARM64_SVE_FFR(i);
655 if (put_user(reg, uindices++))
664 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
666 * This is for all registers.
668 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
670 unsigned long res = 0;
672 res += num_core_regs(vcpu);
673 res += num_sve_regs(vcpu);
674 res += kvm_arm_num_sys_reg_descs(vcpu);
675 res += kvm_arm_get_fw_num_regs(vcpu);
676 res += NUM_TIMER_REGS;
682 * kvm_arm_copy_reg_indices - get indices of all registers.
684 * We do core registers right here, then we append system regs.
686 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
690 ret = copy_core_reg_indices(vcpu, uindices);
695 ret = copy_sve_reg_indices(vcpu, uindices);
700 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
703 uindices += kvm_arm_get_fw_num_regs(vcpu);
705 ret = copy_timer_indices(vcpu, uindices);
708 uindices += NUM_TIMER_REGS;
710 return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
713 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
715 /* We currently use nothing arch-specific in upper 32 bits */
716 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
719 switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
720 case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
721 case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
722 case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
725 if (is_timer_reg(reg->id))
726 return get_timer_reg(vcpu, reg);
728 return kvm_arm_sys_reg_get_reg(vcpu, reg);
731 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
733 /* We currently use nothing arch-specific in upper 32 bits */
734 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
737 switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
738 case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
739 case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
740 case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
743 if (is_timer_reg(reg->id))
744 return set_timer_reg(vcpu, reg);
746 return kvm_arm_sys_reg_set_reg(vcpu, reg);
749 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
750 struct kvm_sregs *sregs)
755 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
756 struct kvm_sregs *sregs)
761 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
762 struct kvm_vcpu_events *events)
764 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
765 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
767 if (events->exception.serror_pending && events->exception.serror_has_esr)
768 events->exception.serror_esr = vcpu_get_vsesr(vcpu);
771 * We never return a pending ext_dabt here because we deliver it to
772 * the virtual CPU directly when setting the event and it's no longer
773 * 'pending' at this point.
779 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
780 struct kvm_vcpu_events *events)
782 bool serror_pending = events->exception.serror_pending;
783 bool has_esr = events->exception.serror_has_esr;
784 bool ext_dabt_pending = events->exception.ext_dabt_pending;
786 if (serror_pending && has_esr) {
787 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
790 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
791 kvm_set_sei_esr(vcpu, events->exception.serror_esr);
794 } else if (serror_pending) {
795 kvm_inject_vabt(vcpu);
798 if (ext_dabt_pending)
799 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
804 int __attribute_const__ kvm_target_cpu(void)
806 unsigned long implementor = read_cpuid_implementor();
807 unsigned long part_number = read_cpuid_part_number();
809 switch (implementor) {
810 case ARM_CPU_IMP_ARM:
811 switch (part_number) {
812 case ARM_CPU_PART_AEM_V8:
813 return KVM_ARM_TARGET_AEM_V8;
814 case ARM_CPU_PART_FOUNDATION:
815 return KVM_ARM_TARGET_FOUNDATION_V8;
816 case ARM_CPU_PART_CORTEX_A53:
817 return KVM_ARM_TARGET_CORTEX_A53;
818 case ARM_CPU_PART_CORTEX_A57:
819 return KVM_ARM_TARGET_CORTEX_A57;
822 case ARM_CPU_IMP_APM:
823 switch (part_number) {
824 case APM_CPU_PART_POTENZA:
825 return KVM_ARM_TARGET_XGENE_POTENZA;
830 /* Return a default generic target */
831 return KVM_ARM_TARGET_GENERIC_V8;
834 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
836 int target = kvm_target_cpu();
841 memset(init, 0, sizeof(*init));
844 * For now, we don't return any features.
845 * In future, we might use features to return target
846 * specific features available for the preferred
849 init->target = (__u32)target;
854 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
859 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
864 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
865 struct kvm_translation *tr)
870 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
871 KVM_GUESTDBG_USE_SW_BP | \
872 KVM_GUESTDBG_USE_HW | \
873 KVM_GUESTDBG_SINGLESTEP)
876 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
877 * @kvm: pointer to the KVM struct
878 * @kvm_guest_debug: the ioctl data buffer
880 * This sets up and enables the VM for guest debugging. Userspace
881 * passes in a control flag to enable different debug types and
882 * potentially other architecture specific information in the rest of
885 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
886 struct kvm_guest_debug *dbg)
890 trace_kvm_set_guest_debug(vcpu, dbg->control);
892 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
897 if (dbg->control & KVM_GUESTDBG_ENABLE) {
898 vcpu->guest_debug = dbg->control;
900 /* Hardware assisted Break and Watch points */
901 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
902 vcpu->arch.external_debug_state = dbg->arch;
906 /* If not enabled clear all flags */
907 vcpu->guest_debug = 0;
914 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
915 struct kvm_device_attr *attr)
919 switch (attr->group) {
920 case KVM_ARM_VCPU_PMU_V3_CTRL:
921 ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
923 case KVM_ARM_VCPU_TIMER_CTRL:
924 ret = kvm_arm_timer_set_attr(vcpu, attr);
926 case KVM_ARM_VCPU_PVTIME_CTRL:
927 ret = kvm_arm_pvtime_set_attr(vcpu, attr);
937 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
938 struct kvm_device_attr *attr)
942 switch (attr->group) {
943 case KVM_ARM_VCPU_PMU_V3_CTRL:
944 ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
946 case KVM_ARM_VCPU_TIMER_CTRL:
947 ret = kvm_arm_timer_get_attr(vcpu, attr);
949 case KVM_ARM_VCPU_PVTIME_CTRL:
950 ret = kvm_arm_pvtime_get_attr(vcpu, attr);
960 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
961 struct kvm_device_attr *attr)
965 switch (attr->group) {
966 case KVM_ARM_VCPU_PMU_V3_CTRL:
967 ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
969 case KVM_ARM_VCPU_TIMER_CTRL:
970 ret = kvm_arm_timer_has_attr(vcpu, attr);
972 case KVM_ARM_VCPU_PVTIME_CTRL:
973 ret = kvm_arm_pvtime_has_attr(vcpu, attr);