1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/guest.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #include <linux/bits.h>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/nospec.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/stddef.h>
18 #include <linux/string.h>
19 #include <linux/vmalloc.h>
21 #include <kvm/arm_psci.h>
22 #include <asm/cputype.h>
23 #include <linux/uaccess.h>
24 #include <asm/fpsimd.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/sigcontext.h>
31 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
32 KVM_GENERIC_VM_STATS()
34 static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
35 sizeof(struct kvm_vm_stat) / sizeof(u64));
37 const struct kvm_stats_header kvm_vm_stats_header = {
38 .name_size = KVM_STATS_NAME_SIZE,
39 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
40 .id_offset = sizeof(struct kvm_stats_header),
41 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
42 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
43 sizeof(kvm_vm_stats_desc),
46 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
47 KVM_GENERIC_VCPU_STATS(),
48 STATS_DESC_COUNTER(VCPU, hvc_exit_stat),
49 STATS_DESC_COUNTER(VCPU, wfe_exit_stat),
50 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
51 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
52 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
53 STATS_DESC_COUNTER(VCPU, exits)
55 static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
56 sizeof(struct kvm_vcpu_stat) / sizeof(u64));
58 const struct kvm_stats_header kvm_vcpu_stats_header = {
59 .name_size = KVM_STATS_NAME_SIZE,
60 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
61 .id_offset = sizeof(struct kvm_stats_header),
62 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
63 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
64 sizeof(kvm_vcpu_stats_desc),
67 static bool core_reg_offset_is_vreg(u64 off)
69 return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
70 off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
73 static u64 core_reg_offset_from_id(u64 id)
75 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
78 static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
83 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
84 KVM_REG_ARM_CORE_REG(regs.regs[30]):
85 case KVM_REG_ARM_CORE_REG(regs.sp):
86 case KVM_REG_ARM_CORE_REG(regs.pc):
87 case KVM_REG_ARM_CORE_REG(regs.pstate):
88 case KVM_REG_ARM_CORE_REG(sp_el1):
89 case KVM_REG_ARM_CORE_REG(elr_el1):
90 case KVM_REG_ARM_CORE_REG(spsr[0]) ...
91 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
95 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
96 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
97 size = sizeof(__uint128_t);
100 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
101 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
102 size = sizeof(__u32);
109 if (!IS_ALIGNED(off, size / sizeof(__u32)))
113 * The KVM_REG_ARM64_SVE regs must be used instead of
114 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
117 if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
123 static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
125 u64 off = core_reg_offset_from_id(reg->id);
126 int size = core_reg_size_from_offset(vcpu, off);
131 if (KVM_REG_SIZE(reg->id) != size)
135 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
136 KVM_REG_ARM_CORE_REG(regs.regs[30]):
137 off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
139 return &vcpu->arch.ctxt.regs.regs[off];
141 case KVM_REG_ARM_CORE_REG(regs.sp):
142 return &vcpu->arch.ctxt.regs.sp;
144 case KVM_REG_ARM_CORE_REG(regs.pc):
145 return &vcpu->arch.ctxt.regs.pc;
147 case KVM_REG_ARM_CORE_REG(regs.pstate):
148 return &vcpu->arch.ctxt.regs.pstate;
150 case KVM_REG_ARM_CORE_REG(sp_el1):
151 return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
153 case KVM_REG_ARM_CORE_REG(elr_el1):
154 return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
156 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
157 return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
159 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
160 return &vcpu->arch.ctxt.spsr_abt;
162 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
163 return &vcpu->arch.ctxt.spsr_und;
165 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
166 return &vcpu->arch.ctxt.spsr_irq;
168 case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
169 return &vcpu->arch.ctxt.spsr_fiq;
171 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
172 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
173 off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
175 return &vcpu->arch.ctxt.fp_regs.vregs[off];
177 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
178 return &vcpu->arch.ctxt.fp_regs.fpsr;
180 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
181 return &vcpu->arch.ctxt.fp_regs.fpcr;
188 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
191 * Because the kvm_regs structure is a mix of 32, 64 and
192 * 128bit fields, we index it as if it was a 32bit
193 * array. Hence below, nr_regs is the number of entries, and
194 * off the index in the "array".
196 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
197 int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
201 /* Our ID is an index into the kvm_regs struct. */
202 off = core_reg_offset_from_id(reg->id);
203 if (off >= nr_regs ||
204 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
207 addr = core_reg_addr(vcpu, reg);
211 if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
217 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
219 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
220 int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
222 void *valp = &tmp, *addr;
226 /* Our ID is an index into the kvm_regs struct. */
227 off = core_reg_offset_from_id(reg->id);
228 if (off >= nr_regs ||
229 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
232 addr = core_reg_addr(vcpu, reg);
236 if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
239 if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
244 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
245 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
247 case PSR_AA32_MODE_USR:
248 if (!system_supports_32bit_el0())
251 case PSR_AA32_MODE_FIQ:
252 case PSR_AA32_MODE_IRQ:
253 case PSR_AA32_MODE_SVC:
254 case PSR_AA32_MODE_ABT:
255 case PSR_AA32_MODE_UND:
256 if (!vcpu_el1_is_32bit(vcpu))
262 if (vcpu_el1_is_32bit(vcpu))
271 memcpy(addr, valp, KVM_REG_SIZE(reg->id));
273 if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
276 switch (*vcpu_cpsr(vcpu)) {
278 * Either we are dealing with user mode, and only the
279 * first 15 registers (+ PC) must be narrowed to 32bit.
280 * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
282 case PSR_AA32_MODE_USR:
283 case PSR_AA32_MODE_SYS:
288 * Otherwide, this is a priviledged mode, and *all* the
289 * registers must be narrowed to 32bit.
296 for (i = 0; i < nr_reg; i++)
297 vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
299 *vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu);
305 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
306 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
307 #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
309 static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
311 unsigned int max_vq, vq;
312 u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
314 if (!vcpu_has_sve(vcpu))
317 if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
320 memset(vqs, 0, sizeof(vqs));
322 max_vq = vcpu_sve_max_vq(vcpu);
323 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
324 if (sve_vq_available(vq))
325 vqs[vq_word(vq)] |= vq_mask(vq);
327 if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
333 static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
335 unsigned int max_vq, vq;
336 u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
338 if (!vcpu_has_sve(vcpu))
341 if (kvm_arm_vcpu_sve_finalized(vcpu))
342 return -EPERM; /* too late! */
344 if (WARN_ON(vcpu->arch.sve_state))
347 if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
351 for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
352 if (vq_present(vqs, vq))
355 if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
359 * Vector lengths supported by the host can't currently be
360 * hidden from the guest individually: instead we can only set a
361 * maximum via ZCR_EL2.LEN. So, make sure the available vector
362 * lengths match the set requested exactly up to the requested
365 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
366 if (vq_present(vqs, vq) != sve_vq_available(vq))
369 /* Can't run with no vector lengths at all: */
370 if (max_vq < SVE_VQ_MIN)
373 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
374 vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
379 #define SVE_REG_SLICE_SHIFT 0
380 #define SVE_REG_SLICE_BITS 5
381 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
382 #define SVE_REG_ID_BITS 5
384 #define SVE_REG_SLICE_MASK \
385 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
387 #define SVE_REG_ID_MASK \
388 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
390 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
392 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
393 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
396 * Number of register slices required to cover each whole SVE register.
397 * NOTE: Only the first slice every exists, for now.
398 * If you are tempted to modify this, you must also rework sve_reg_to_region()
401 #define vcpu_sve_slices(vcpu) 1
403 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
404 struct sve_state_reg_region {
405 unsigned int koffset; /* offset into sve_state in kernel memory */
406 unsigned int klen; /* length in kernel memory */
407 unsigned int upad; /* extra trailing padding in user memory */
411 * Validate SVE register ID and get sanitised bounds for user/kernel SVE
414 static int sve_reg_to_region(struct sve_state_reg_region *region,
415 struct kvm_vcpu *vcpu,
416 const struct kvm_one_reg *reg)
418 /* reg ID ranges for Z- registers */
419 const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
420 const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
423 /* reg ID ranges for P- registers and FFR (which are contiguous) */
424 const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
425 const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
428 unsigned int reg_num;
430 unsigned int reqoffset, reqlen; /* User-requested offset and length */
431 unsigned int maxlen; /* Maximum permitted length */
433 size_t sve_state_size;
435 const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
438 /* Verify that the P-regs and FFR really do have contiguous IDs: */
439 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
441 /* Verify that we match the UAPI header: */
442 BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
444 reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
446 if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
447 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
450 vq = vcpu_sve_max_vq(vcpu);
452 reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
454 reqlen = KVM_SVE_ZREG_SIZE;
455 maxlen = SVE_SIG_ZREG_SIZE(vq);
456 } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
457 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
460 vq = vcpu_sve_max_vq(vcpu);
462 reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
464 reqlen = KVM_SVE_PREG_SIZE;
465 maxlen = SVE_SIG_PREG_SIZE(vq);
470 sve_state_size = vcpu_sve_state_size(vcpu);
471 if (WARN_ON(!sve_state_size))
474 region->koffset = array_index_nospec(reqoffset, sve_state_size);
475 region->klen = min(maxlen, reqlen);
476 region->upad = reqlen - region->klen;
481 static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
484 struct sve_state_reg_region region;
485 char __user *uptr = (char __user *)reg->addr;
487 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
488 if (reg->id == KVM_REG_ARM64_SVE_VLS)
489 return get_sve_vls(vcpu, reg);
491 /* Try to interpret reg ID as an architectural SVE register... */
492 ret = sve_reg_to_region(®ion, vcpu, reg);
496 if (!kvm_arm_vcpu_sve_finalized(vcpu))
499 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
501 clear_user(uptr + region.klen, region.upad))
507 static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
510 struct sve_state_reg_region region;
511 const char __user *uptr = (const char __user *)reg->addr;
513 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
514 if (reg->id == KVM_REG_ARM64_SVE_VLS)
515 return set_sve_vls(vcpu, reg);
517 /* Try to interpret reg ID as an architectural SVE register... */
518 ret = sve_reg_to_region(®ion, vcpu, reg);
522 if (!kvm_arm_vcpu_sve_finalized(vcpu))
525 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
532 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
537 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
542 static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
543 u64 __user *uindices)
548 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
549 u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
550 int size = core_reg_size_from_offset(vcpu, i);
557 reg |= KVM_REG_SIZE_U32;
561 reg |= KVM_REG_SIZE_U64;
564 case sizeof(__uint128_t):
565 reg |= KVM_REG_SIZE_U128;
574 if (put_user(reg, uindices))
585 static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
587 return copy_core_reg_indices(vcpu, NULL);
591 * ARM64 versions of the TIMER registers, always available on arm64
594 #define NUM_TIMER_REGS 3
596 static bool is_timer_reg(u64 index)
599 case KVM_REG_ARM_TIMER_CTL:
600 case KVM_REG_ARM_TIMER_CNT:
601 case KVM_REG_ARM_TIMER_CVAL:
607 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
609 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
612 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
615 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
621 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
623 void __user *uaddr = (void __user *)(long)reg->addr;
627 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
631 return kvm_arm_timer_set_reg(vcpu, reg->id, val);
634 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
636 void __user *uaddr = (void __user *)(long)reg->addr;
639 val = kvm_arm_timer_get_reg(vcpu, reg->id);
640 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
643 static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
645 const unsigned int slices = vcpu_sve_slices(vcpu);
647 if (!vcpu_has_sve(vcpu))
650 /* Policed by KVM_GET_REG_LIST: */
651 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
653 return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
654 + 1; /* KVM_REG_ARM64_SVE_VLS */
657 static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
658 u64 __user *uindices)
660 const unsigned int slices = vcpu_sve_slices(vcpu);
665 if (!vcpu_has_sve(vcpu))
668 /* Policed by KVM_GET_REG_LIST: */
669 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
672 * Enumerate this first, so that userspace can save/restore in
673 * the order reported by KVM_GET_REG_LIST:
675 reg = KVM_REG_ARM64_SVE_VLS;
676 if (put_user(reg, uindices++))
680 for (i = 0; i < slices; i++) {
681 for (n = 0; n < SVE_NUM_ZREGS; n++) {
682 reg = KVM_REG_ARM64_SVE_ZREG(n, i);
683 if (put_user(reg, uindices++))
688 for (n = 0; n < SVE_NUM_PREGS; n++) {
689 reg = KVM_REG_ARM64_SVE_PREG(n, i);
690 if (put_user(reg, uindices++))
695 reg = KVM_REG_ARM64_SVE_FFR(i);
696 if (put_user(reg, uindices++))
705 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
707 * This is for all registers.
709 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
711 unsigned long res = 0;
713 res += num_core_regs(vcpu);
714 res += num_sve_regs(vcpu);
715 res += kvm_arm_num_sys_reg_descs(vcpu);
716 res += kvm_arm_get_fw_num_regs(vcpu);
717 res += NUM_TIMER_REGS;
723 * kvm_arm_copy_reg_indices - get indices of all registers.
725 * We do core registers right here, then we append system regs.
727 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
731 ret = copy_core_reg_indices(vcpu, uindices);
736 ret = copy_sve_reg_indices(vcpu, uindices);
741 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
744 uindices += kvm_arm_get_fw_num_regs(vcpu);
746 ret = copy_timer_indices(vcpu, uindices);
749 uindices += NUM_TIMER_REGS;
751 return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
754 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
756 /* We currently use nothing arch-specific in upper 32 bits */
757 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
760 switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
761 case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
762 case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
763 case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
766 if (is_timer_reg(reg->id))
767 return get_timer_reg(vcpu, reg);
769 return kvm_arm_sys_reg_get_reg(vcpu, reg);
772 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
774 /* We currently use nothing arch-specific in upper 32 bits */
775 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
778 switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
779 case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
780 case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
781 case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
784 if (is_timer_reg(reg->id))
785 return set_timer_reg(vcpu, reg);
787 return kvm_arm_sys_reg_set_reg(vcpu, reg);
790 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
791 struct kvm_sregs *sregs)
796 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
797 struct kvm_sregs *sregs)
802 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
803 struct kvm_vcpu_events *events)
805 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
806 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
808 if (events->exception.serror_pending && events->exception.serror_has_esr)
809 events->exception.serror_esr = vcpu_get_vsesr(vcpu);
812 * We never return a pending ext_dabt here because we deliver it to
813 * the virtual CPU directly when setting the event and it's no longer
814 * 'pending' at this point.
820 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
821 struct kvm_vcpu_events *events)
823 bool serror_pending = events->exception.serror_pending;
824 bool has_esr = events->exception.serror_has_esr;
825 bool ext_dabt_pending = events->exception.ext_dabt_pending;
827 if (serror_pending && has_esr) {
828 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
831 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
832 kvm_set_sei_esr(vcpu, events->exception.serror_esr);
835 } else if (serror_pending) {
836 kvm_inject_vabt(vcpu);
839 if (ext_dabt_pending)
840 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
845 int __attribute_const__ kvm_target_cpu(void)
847 unsigned long implementor = read_cpuid_implementor();
848 unsigned long part_number = read_cpuid_part_number();
850 switch (implementor) {
851 case ARM_CPU_IMP_ARM:
852 switch (part_number) {
853 case ARM_CPU_PART_AEM_V8:
854 return KVM_ARM_TARGET_AEM_V8;
855 case ARM_CPU_PART_FOUNDATION:
856 return KVM_ARM_TARGET_FOUNDATION_V8;
857 case ARM_CPU_PART_CORTEX_A53:
858 return KVM_ARM_TARGET_CORTEX_A53;
859 case ARM_CPU_PART_CORTEX_A57:
860 return KVM_ARM_TARGET_CORTEX_A57;
863 case ARM_CPU_IMP_APM:
864 switch (part_number) {
865 case APM_CPU_PART_POTENZA:
866 return KVM_ARM_TARGET_XGENE_POTENZA;
871 /* Return a default generic target */
872 return KVM_ARM_TARGET_GENERIC_V8;
875 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
877 int target = kvm_target_cpu();
882 memset(init, 0, sizeof(*init));
885 * For now, we don't return any features.
886 * In future, we might use features to return target
887 * specific features available for the preferred
890 init->target = (__u32)target;
895 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
900 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
905 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
906 struct kvm_translation *tr)
912 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
913 * @kvm: pointer to the KVM struct
914 * @kvm_guest_debug: the ioctl data buffer
916 * This sets up and enables the VM for guest debugging. Userspace
917 * passes in a control flag to enable different debug types and
918 * potentially other architecture specific information in the rest of
921 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
922 struct kvm_guest_debug *dbg)
926 trace_kvm_set_guest_debug(vcpu, dbg->control);
928 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
933 if (dbg->control & KVM_GUESTDBG_ENABLE) {
934 vcpu->guest_debug = dbg->control;
936 /* Hardware assisted Break and Watch points */
937 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
938 vcpu->arch.external_debug_state = dbg->arch;
942 /* If not enabled clear all flags */
943 vcpu->guest_debug = 0;
950 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
951 struct kvm_device_attr *attr)
955 switch (attr->group) {
956 case KVM_ARM_VCPU_PMU_V3_CTRL:
957 ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
959 case KVM_ARM_VCPU_TIMER_CTRL:
960 ret = kvm_arm_timer_set_attr(vcpu, attr);
962 case KVM_ARM_VCPU_PVTIME_CTRL:
963 ret = kvm_arm_pvtime_set_attr(vcpu, attr);
973 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
974 struct kvm_device_attr *attr)
978 switch (attr->group) {
979 case KVM_ARM_VCPU_PMU_V3_CTRL:
980 ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
982 case KVM_ARM_VCPU_TIMER_CTRL:
983 ret = kvm_arm_timer_get_attr(vcpu, attr);
985 case KVM_ARM_VCPU_PVTIME_CTRL:
986 ret = kvm_arm_pvtime_get_attr(vcpu, attr);
996 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
997 struct kvm_device_attr *attr)
1001 switch (attr->group) {
1002 case KVM_ARM_VCPU_PMU_V3_CTRL:
1003 ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
1005 case KVM_ARM_VCPU_TIMER_CTRL:
1006 ret = kvm_arm_timer_has_attr(vcpu, attr);
1008 case KVM_ARM_VCPU_PVTIME_CTRL:
1009 ret = kvm_arm_pvtime_has_attr(vcpu, attr);
1019 long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
1020 struct kvm_arm_copy_mte_tags *copy_tags)
1022 gpa_t guest_ipa = copy_tags->guest_ipa;
1023 size_t length = copy_tags->length;
1024 void __user *tags = copy_tags->addr;
1026 bool write = !(copy_tags->flags & KVM_ARM_TAGS_FROM_GUEST);
1029 if (!kvm_has_mte(kvm))
1032 if (copy_tags->reserved[0] || copy_tags->reserved[1])
1035 if (copy_tags->flags & ~KVM_ARM_TAGS_FROM_GUEST)
1038 if (length & ~PAGE_MASK || guest_ipa & ~PAGE_MASK)
1041 gfn = gpa_to_gfn(guest_ipa);
1043 mutex_lock(&kvm->slots_lock);
1045 while (length > 0) {
1046 kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL);
1048 unsigned long num_tags;
1051 if (is_error_noslot_pfn(pfn)) {
1056 page = pfn_to_online_page(pfn);
1058 /* Reject ZONE_DEVICE memory */
1062 maddr = page_address(page);
1065 if (test_bit(PG_mte_tagged, &page->flags))
1066 num_tags = mte_copy_tags_to_user(tags, maddr,
1067 MTE_GRANULES_PER_PAGE);
1069 /* No tags in memory, so write zeros */
1070 num_tags = MTE_GRANULES_PER_PAGE -
1071 clear_user(tags, MTE_GRANULES_PER_PAGE);
1072 kvm_release_pfn_clean(pfn);
1074 num_tags = mte_copy_tags_from_user(maddr, tags,
1075 MTE_GRANULES_PER_PAGE);
1078 * Set the flag after checking the write
1081 if (num_tags == MTE_GRANULES_PER_PAGE)
1082 set_bit(PG_mte_tagged, &page->flags);
1084 kvm_release_pfn_dirty(pfn);
1087 if (num_tags != MTE_GRANULES_PER_PAGE) {
1094 length -= PAGE_SIZE;
1098 mutex_unlock(&kvm->slots_lock);
1099 /* If some data has been copied report the number of bytes copied */
1100 if (length != copy_tags->length)
1101 return copy_tags->length - length;