1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/reset.c
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/kvm_host.h>
14 #include <linux/kvm.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
20 #include <kvm/arm_arch_timer.h>
22 #include <asm/cpufeature.h>
23 #include <asm/cputype.h>
24 #include <asm/fpsimd.h>
25 #include <asm/ptrace.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_asm.h>
28 #include <asm/kvm_coproc.h>
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_mmu.h>
33 /* Maximum phys_shift supported for any VM on this host */
34 static u32 kvm_ipa_limit;
39 #define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
40 PSR_F_BIT | PSR_D_BIT)
42 #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
43 PSR_AA32_I_BIT | PSR_AA32_F_BIT)
45 static bool system_has_full_ptr_auth(void)
47 return system_supports_address_auth() && system_supports_generic_auth();
51 * kvm_arch_vm_ioctl_check_extension
53 * We currently assume that the number of HW registers is uniform
54 * across all CPUs (see cpuinfo_sanity_check).
56 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
61 case KVM_CAP_ARM_EL1_32BIT:
62 r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1);
64 case KVM_CAP_GUEST_DEBUG_HW_BPS:
67 case KVM_CAP_GUEST_DEBUG_HW_WPS:
70 case KVM_CAP_ARM_PMU_V3:
71 r = kvm_arm_support_pmu_v3();
73 case KVM_CAP_ARM_INJECT_SERROR_ESR:
74 r = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
76 case KVM_CAP_SET_GUEST_DEBUG:
77 case KVM_CAP_VCPU_ATTRIBUTES:
80 case KVM_CAP_ARM_VM_IPA_SIZE:
84 r = system_supports_sve();
86 case KVM_CAP_ARM_PTRAUTH_ADDRESS:
87 case KVM_CAP_ARM_PTRAUTH_GENERIC:
88 r = system_has_full_ptr_auth();
97 unsigned int kvm_sve_max_vl;
99 int kvm_arm_init_sve(void)
101 if (system_supports_sve()) {
102 kvm_sve_max_vl = sve_max_virtualisable_vl;
105 * The get_sve_reg()/set_sve_reg() ioctl interface will need
106 * to be extended with multiple register slice support in
107 * order to support vector lengths greater than
110 if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX))
111 kvm_sve_max_vl = SVE_VL_ARCH_MAX;
114 * Don't even try to make use of vector lengths that
115 * aren't available on all CPUs, for now:
117 if (kvm_sve_max_vl < sve_max_vl)
118 pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
125 static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
127 if (!system_supports_sve())
130 /* Verify that KVM startup enforced this when SVE was detected: */
131 if (WARN_ON(!has_vhe()))
134 vcpu->arch.sve_max_vl = kvm_sve_max_vl;
137 * Userspace can still customize the vector lengths by writing
138 * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
139 * kvm_arm_vcpu_finalize(), which freezes the configuration.
141 vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE;
147 * Finalize vcpu's maximum SVE vector length, allocating
148 * vcpu->arch.sve_state as necessary.
150 static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
155 vl = vcpu->arch.sve_max_vl;
158 * Responsibility for these properties is shared between
159 * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
160 * set_sve_vls(). Double-check here just to be sure:
162 if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl ||
163 vl > SVE_VL_ARCH_MAX))
166 buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL);
170 vcpu->arch.sve_state = buf;
171 vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
175 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
178 case KVM_ARM_VCPU_SVE:
179 if (!vcpu_has_sve(vcpu))
182 if (kvm_arm_vcpu_sve_finalized(vcpu))
185 return kvm_vcpu_finalize_sve(vcpu);
191 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
193 if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
199 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
201 kfree(vcpu->arch.sve_state);
204 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
206 if (vcpu_has_sve(vcpu))
207 memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
210 static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
213 * For now make sure that both address/generic pointer authentication
214 * features are requested by the userspace together and the system
215 * supports these capabilities.
217 if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
218 !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) ||
219 !system_has_full_ptr_auth())
222 vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
227 * kvm_reset_vcpu - sets core registers and sys_regs to reset value
228 * @vcpu: The VCPU pointer
230 * This function finds the right table above and sets the registers on
231 * the virtual CPU struct to their architecturally defined reset
232 * values, except for registers whose reset is deferred until
233 * kvm_arm_vcpu_finalize().
235 * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
236 * ioctl or as part of handling a request issued by another VCPU in the PSCI
237 * handling code. In the first case, the VCPU will not be loaded, and in the
238 * second case the VCPU will be loaded. Because this function operates purely
239 * on the memory-backed values of system registers, we want to do a full put if
240 * we were loaded (handling a request) and load the values back at the end of
241 * the function. Otherwise we leave the state alone. In both cases, we
242 * disable preemption around the vcpu reset as we would otherwise race with
243 * preempt notifiers which also call put/load.
245 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
251 /* Reset PMU outside of the non-preemptible section */
252 kvm_pmu_vcpu_reset(vcpu);
255 loaded = (vcpu->cpu != -1);
257 kvm_arch_vcpu_put(vcpu);
259 if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
260 if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
261 ret = kvm_vcpu_enable_sve(vcpu);
266 kvm_vcpu_reset_sve(vcpu);
269 if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
270 test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
271 if (kvm_vcpu_enable_ptrauth(vcpu))
275 switch (vcpu->arch.target) {
277 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
278 if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
280 pstate = VCPU_RESET_PSTATE_SVC;
282 pstate = VCPU_RESET_PSTATE_EL1;
288 /* Reset core registers */
289 memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
290 vcpu_gp_regs(vcpu)->pstate = pstate;
292 /* Reset system registers */
293 kvm_reset_sys_regs(vcpu);
296 * Additional reset state handling that PSCI may have imposed on us.
297 * Must be done after all the sys_reg reset.
299 if (vcpu->arch.reset_state.reset) {
300 unsigned long target_pc = vcpu->arch.reset_state.pc;
302 /* Gracefully handle Thumb2 entry point */
303 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
305 vcpu_set_thumb(vcpu);
308 /* Propagate caller endianness */
309 if (vcpu->arch.reset_state.be)
310 kvm_vcpu_set_be(vcpu);
312 *vcpu_pc(vcpu) = target_pc;
313 vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
315 vcpu->arch.reset_state.reset = false;
318 /* Default workaround setup is enabled (if supported) */
319 if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
320 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
323 ret = kvm_timer_vcpu_reset(vcpu);
326 kvm_arch_vcpu_load(vcpu, smp_processor_id());
331 u32 get_kvm_ipa_limit(void)
333 return kvm_ipa_limit;
336 int kvm_set_ipa_limit(void)
338 unsigned int ipa_max, pa_max, va_max, parange, tgran_2;
341 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
342 parange = cpuid_feature_extract_unsigned_field(mmfr0,
343 ID_AA64MMFR0_PARANGE_SHIFT);
346 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
347 * Stage-2. If not, things will stop very quickly.
352 tgran_2 = ID_AA64MMFR0_TGRAN4_2_SHIFT;
355 tgran_2 = ID_AA64MMFR0_TGRAN16_2_SHIFT;
358 tgran_2 = ID_AA64MMFR0_TGRAN64_2_SHIFT;
362 switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
365 kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
368 kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
371 kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
375 pa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
377 /* Clamp the IPA limit to the PA size supported by the kernel */
378 ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max;
380 * Since our stage2 table is dependent on the stage1 page table code,
381 * we must always honor the following condition:
383 * Number of levels in Stage1 >= Number of levels in Stage2.
385 * So clamp the ipa limit further down to limit the number of levels.
386 * Since we can concatenate upto 16 tables at entry level, we could
387 * go upto 4bits above the maximum VA addressable with the current
390 va_max = PGDIR_SHIFT + PAGE_SHIFT - 3;
393 if (va_max < ipa_max)
397 * If the final limit is lower than the real physical address
398 * limit of the CPUs, report the reason.
400 if (ipa_max < pa_max)
401 pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n",
402 (va_max < pa_max) ? "Virtual" : "Physical");
404 WARN(ipa_max < KVM_PHYS_SHIFT,
405 "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max);
406 kvm_ipa_limit = ipa_max;
407 kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit);
413 * Configure the VTCR_EL2 for this VM. The VTCR value is common
414 * across all the physical CPUs on the system. We use system wide
415 * sanitised values to fill in different fields, except for Hardware
416 * Management of Access Flags. HA Flag is set unconditionally on
417 * all CPUs, as it is safe to run with or without the feature and
418 * the bit is RES0 on CPUs that don't support it.
420 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
422 u64 vtcr = VTCR_EL2_FLAGS, mmfr0;
423 u32 parange, phys_shift;
426 if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
429 phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
431 if (phys_shift > kvm_ipa_limit ||
435 phys_shift = KVM_PHYS_SHIFT;
438 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
439 parange = cpuid_feature_extract_unsigned_field(mmfr0,
440 ID_AA64MMFR0_PARANGE_SHIFT);
441 if (parange > ID_AA64MMFR0_PARANGE_MAX)
442 parange = ID_AA64MMFR0_PARANGE_MAX;
443 vtcr |= parange << VTCR_EL2_PS_SHIFT;
445 vtcr |= VTCR_EL2_T0SZ(phys_shift);
447 * Use a minimum 2 level page table to prevent splitting
448 * host PMD huge pages at stage2.
450 lvls = stage2_pgtable_levels(phys_shift);
453 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
456 * Enable the Hardware Access Flag management, unconditionally
457 * on all CPUs. The features is RES0 on CPUs without the support
458 * and must be ignored by the CPUs.
462 /* Set the vmid bits */
463 vtcr |= (kvm_get_vmid_bits() == 16) ?
466 kvm->arch.vtcr = vtcr;