Merge tag 'kvmarm-fixes-5.18-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 8 Apr 2022 16:30:04 +0000 (12:30 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 8 Apr 2022 16:30:04 +0000 (12:30 -0400)
KVM/arm64 fixes for 5.18, take #1

- Some PSCI fixes after introducing PSCIv1.1 and SYSTEM_RESET2

- Fix the MMU write-lock not being taken on THP split

- Fix mixed-width VM handling

- Fix potential UAF when debugfs registration fails

- Various selftest updates for all of the above

14 files changed:
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/mmu.c
arch/arm64/kvm/psci.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/vgic/vgic-debug.c
arch/arm64/kvm/vgic/vgic-its.c
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/arch_timer.c
tools/testing/selftests/kvm/aarch64/get-reg-list.c
tools/testing/selftests/kvm/aarch64/vcpu_width_config.c [new file with mode: 0644]
tools/testing/selftests/kvm/dirty_log_perf_test.c
virt/kvm/kvm_main.c

index d62405c..7496dea 100644 (file)
@@ -43,10 +43,22 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 
 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
 
+#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
 {
        return !(vcpu->arch.hcr_el2 & HCR_RW);
 }
+#else
+static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+
+       WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
+                              &kvm->arch.flags));
+
+       return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
+}
+#endif
 
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
@@ -72,15 +84,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
                vcpu->arch.hcr_el2 |= HCR_TVM;
        }
 
-       if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+       if (vcpu_el1_is_32bit(vcpu))
                vcpu->arch.hcr_el2 &= ~HCR_RW;
-
-       /*
-        * TID3: trap feature register accesses that we virtualise.
-        * For now this is conditional, since no AArch32 feature regs
-        * are currently virtualised.
-        */
-       if (!vcpu_el1_is_32bit(vcpu))
+       else
+               /*
+                * TID3: trap feature register accesses that we virtualise.
+                * For now this is conditional, since no AArch32 feature regs
+                * are currently virtualised.
+                */
                vcpu->arch.hcr_el2 |= HCR_TID3;
 
        if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
index e3b25dc..94a27a7 100644 (file)
@@ -127,6 +127,16 @@ struct kvm_arch {
 #define KVM_ARCH_FLAG_MTE_ENABLED                      1
        /* At least one vCPU has ran in the VM */
 #define KVM_ARCH_FLAG_HAS_RAN_ONCE                     2
+       /*
+        * The following two bits are used to indicate the guest's EL1
+        * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
+        * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
+        * Otherwise, the guest's EL1 register width has not yet been
+        * determined yet.
+        */
+#define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED             3
+#define KVM_ARCH_FLAG_EL1_32BIT                                4
+
        unsigned long flags;
 
        /*
index 0d19259..53ae2c0 100644 (file)
@@ -1079,7 +1079,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        gfn_t gfn;
        kvm_pfn_t pfn;
        bool logging_active = memslot_is_logging(memslot);
-       bool logging_perm_fault = false;
+       bool use_read_lock = false;
        unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
        unsigned long vma_pagesize, fault_granule;
        enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
@@ -1114,7 +1114,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (logging_active) {
                force_pte = true;
                vma_shift = PAGE_SHIFT;
-               logging_perm_fault = (fault_status == FSC_PERM && write_fault);
+               use_read_lock = (fault_status == FSC_PERM && write_fault &&
+                                fault_granule == PAGE_SIZE);
        } else {
                vma_shift = get_vma_page_shift(vma, hva);
        }
@@ -1218,7 +1219,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * logging dirty logging, only acquire read lock for permission
         * relaxation.
         */
-       if (logging_perm_fault)
+       if (use_read_lock)
                read_lock(&kvm->mmu_lock);
        else
                write_lock(&kvm->mmu_lock);
@@ -1268,6 +1269,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
                ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
        } else {
+               WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n");
+
                ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
                                             __pfn_to_phys(pfn), prot,
                                             memcache);
@@ -1280,7 +1283,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        }
 
 out_unlock:
-       if (logging_perm_fault)
+       if (use_read_lock)
                read_unlock(&kvm->mmu_lock);
        else
                write_unlock(&kvm->mmu_lock);
index 372da09..baac2b4 100644 (file)
@@ -215,15 +215,11 @@ static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
 
 static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
 {
-       switch(fn) {
-       case PSCI_0_2_FN64_CPU_SUSPEND:
-       case PSCI_0_2_FN64_CPU_ON:
-       case PSCI_0_2_FN64_AFFINITY_INFO:
-               /* Disallow these functions for 32bit guests */
-               if (vcpu_mode_is_32bit(vcpu))
-                       return PSCI_RET_NOT_SUPPORTED;
-               break;
-       }
+       /*
+        * Prevent 32 bit guests from calling 64 bit PSCI functions.
+        */
+       if ((fn & PSCI_0_2_64BIT) && vcpu_mode_is_32bit(vcpu))
+               return PSCI_RET_NOT_SUPPORTED;
 
        return 0;
 }
@@ -235,10 +231,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
        unsigned long val;
        int ret = 1;
 
-       val = kvm_psci_check_allowed_function(vcpu, psci_fn);
-       if (val)
-               goto out;
-
        switch (psci_fn) {
        case PSCI_0_2_FN_PSCI_VERSION:
                /*
@@ -306,7 +298,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
                break;
        }
 
-out:
        smccc_set_retval(vcpu, val, 0, 0, 0);
        return ret;
 }
@@ -318,9 +309,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
        unsigned long val;
        int ret = 1;
 
-       if (minor > 1)
-               return -EINVAL;
-
        switch(psci_fn) {
        case PSCI_0_2_FN_PSCI_VERSION:
                val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1;
@@ -426,6 +414,15 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
  */
 int kvm_psci_call(struct kvm_vcpu *vcpu)
 {
+       u32 psci_fn = smccc_get_function(vcpu);
+       unsigned long val;
+
+       val = kvm_psci_check_allowed_function(vcpu, psci_fn);
+       if (val) {
+               smccc_set_retval(vcpu, val, 0, 0, 0);
+               return 1;
+       }
+
        switch (kvm_psci_version(vcpu)) {
        case KVM_ARM_PSCI_1_1:
                return kvm_psci_1_x_call(vcpu, 1);
index ecc40c8..6c70c6f 100644 (file)
@@ -181,27 +181,51 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+/**
+ * kvm_set_vm_width() - set the register width for the guest
+ * @vcpu: Pointer to the vcpu being configured
+ *
+ * Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
+ * in the VM flags based on the vcpu's requested register width, the HW
+ * capabilities and other options (such as MTE).
+ * When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
+ * consistent with the value of the FLAG_EL1_32BIT bit in the flags.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
 {
-       struct kvm_vcpu *tmp;
+       struct kvm *kvm = vcpu->kvm;
        bool is32bit;
-       unsigned long i;
 
        is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+
+       lockdep_assert_held(&kvm->lock);
+
+       if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
+               /*
+                * The guest's register width is already configured.
+                * Make sure that the vcpu is consistent with it.
+                */
+               if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
+                       return 0;
+
+               return -EINVAL;
+       }
+
        if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
-               return false;
+               return -EINVAL;
 
        /* MTE is incompatible with AArch32 */
-       if (kvm_has_mte(vcpu->kvm) && is32bit)
-               return false;
+       if (kvm_has_mte(kvm) && is32bit)
+               return -EINVAL;
 
-       /* Check that the vcpus are either all 32bit or all 64bit */
-       kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
-               if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
-                       return false;
-       }
+       if (is32bit)
+               set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
 
-       return true;
+       set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
+
+       return 0;
 }
 
 /**
@@ -230,10 +254,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        u32 pstate;
 
        mutex_lock(&vcpu->kvm->lock);
-       reset_state = vcpu->arch.reset_state;
-       WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+       ret = kvm_set_vm_width(vcpu);
+       if (!ret) {
+               reset_state = vcpu->arch.reset_state;
+               WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+       }
        mutex_unlock(&vcpu->kvm->lock);
 
+       if (ret)
+               return ret;
+
        /* Reset PMU outside of the non-preemptible section */
        kvm_pmu_vcpu_reset(vcpu);
 
@@ -260,14 +290,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                }
        }
 
-       if (!vcpu_allowed_register_width(vcpu)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        switch (vcpu->arch.target) {
        default:
-               if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
+               if (vcpu_el1_is_32bit(vcpu)) {
                        pstate = VCPU_RESET_PSTATE_SVC;
                } else {
                        pstate = VCPU_RESET_PSTATE_EL1;
index f38c40a..78cde68 100644 (file)
@@ -82,7 +82,7 @@ static bool end_of_vgic(struct vgic_state_iter *iter)
 
 static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
+       struct kvm *kvm = s->private;
        struct vgic_state_iter *iter;
 
        mutex_lock(&kvm->lock);
@@ -110,7 +110,7 @@ out:
 
 static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
+       struct kvm *kvm = s->private;
        struct vgic_state_iter *iter = kvm->arch.vgic.iter;
 
        ++*pos;
@@ -122,7 +122,7 @@ static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
 
 static void vgic_debug_stop(struct seq_file *s, void *v)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
+       struct kvm *kvm = s->private;
        struct vgic_state_iter *iter;
 
        /*
@@ -229,8 +229,8 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
 
 static int vgic_debug_show(struct seq_file *s, void *v)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
-       struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
+       struct kvm *kvm = s->private;
+       struct vgic_state_iter *iter = v;
        struct vgic_irq *irq;
        struct kvm_vcpu *vcpu = NULL;
        unsigned long flags;
index 089fc2f..2e13402 100644 (file)
@@ -2143,7 +2143,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
 static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
                                void *ptr, void *opaque)
 {
-       struct its_device *dev = (struct its_device *)opaque;
+       struct its_device *dev = opaque;
        struct its_collection *collection;
        struct kvm *kvm = its->dev->kvm;
        struct kvm_vcpu *vcpu = NULL;
index d1e8f52..573d93a 100644 (file)
@@ -3,6 +3,7 @@
 /aarch64/debug-exceptions
 /aarch64/get-reg-list
 /aarch64/psci_cpu_on_test
+/aarch64/vcpu_width_config
 /aarch64/vgic_init
 /aarch64/vgic_irq
 /s390x/memop
index 21c2dbd..681b173 100644 (file)
@@ -106,6 +106,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
 TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
 TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
 TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
+TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
 TEST_GEN_PROGS_aarch64 += demand_paging_test
index b08d30b..3b940a1 100644 (file)
@@ -362,11 +362,12 @@ static void test_init_timer_irq(struct kvm_vm *vm)
        pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
 }
 
+static int gic_fd;
+
 static struct kvm_vm *test_vm_create(void)
 {
        struct kvm_vm *vm;
        unsigned int i;
-       int ret;
        int nr_vcpus = test_args.nr_vcpus;
 
        vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
@@ -383,8 +384,8 @@ static struct kvm_vm *test_vm_create(void)
 
        ucall_init(vm, NULL);
        test_init_timer_irq(vm);
-       ret = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
-       if (ret < 0) {
+       gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+       if (gic_fd < 0) {
                print_skip("Failed to create vgic-v3");
                exit(KSFT_SKIP);
        }
@@ -395,6 +396,12 @@ static struct kvm_vm *test_vm_create(void)
        return vm;
 }
 
+static void test_vm_cleanup(struct kvm_vm *vm)
+{
+       close(gic_fd);
+       kvm_vm_free(vm);
+}
+
 static void test_print_help(char *name)
 {
        pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
@@ -478,7 +485,7 @@ int main(int argc, char *argv[])
 
        vm = test_vm_create();
        test_run(vm);
-       kvm_vm_free(vm);
+       test_vm_cleanup(vm);
 
        return 0;
 }
index f12147c..0b571f3 100644 (file)
@@ -503,8 +503,13 @@ static void run_test(struct vcpu_config *c)
                ++missing_regs;
 
        if (new_regs || missing_regs) {
+               n = 0;
+               for_each_reg_filtered(i)
+                       ++n;
+
                printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
-               printf("%s: Number registers:         %5lld\n", config_name(c), reg_list->n);
+               printf("%s: Number registers:         %5lld (includes %lld filtered registers)\n",
+                      config_name(c), reg_list->n, reg_list->n - n);
        }
 
        if (new_regs) {
@@ -683,9 +688,10 @@ static __u64 base_regs[] = {
        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
        KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
        KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
-       KVM_REG_ARM_FW_REG(0),
-       KVM_REG_ARM_FW_REG(1),
-       KVM_REG_ARM_FW_REG(2),
+       KVM_REG_ARM_FW_REG(0),          /* KVM_REG_ARM_PSCI_VERSION */
+       KVM_REG_ARM_FW_REG(1),          /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
+       KVM_REG_ARM_FW_REG(2),          /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
+       KVM_REG_ARM_FW_REG(3),          /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
        ARM64_SYS_REG(3, 3, 14, 3, 1),  /* CNTV_CTL_EL0 */
        ARM64_SYS_REG(3, 3, 14, 3, 2),  /* CNTV_CVAL_EL0 */
        ARM64_SYS_REG(3, 3, 14, 0, 2),
diff --git a/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c b/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
new file mode 100644 (file)
index 0000000..6e94026
--- /dev/null
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vcpu_width_config - Test KVM_ARM_VCPU_INIT() with KVM_ARM_VCPU_EL1_32BIT.
+ *
+ * Copyright (c) 2022 Google LLC.
+ *
+ * This is a test that ensures that non-mixed-width vCPUs (all 64bit vCPUs
+ * or all 32bit vcPUs) can be configured and mixed-width vCPUs cannot be
+ * configured.
+ */
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+
+
+/*
+ * Add a vCPU, run KVM_ARM_VCPU_INIT with @init1, and then
+ * add another vCPU, and run KVM_ARM_VCPU_INIT with @init2.
+ */
+static int add_init_2vcpus(struct kvm_vcpu_init *init1,
+                          struct kvm_vcpu_init *init2)
+{
+       struct kvm_vm *vm;
+       int ret;
+
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+
+       vm_vcpu_add(vm, 0);
+       ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+       if (ret)
+               goto free_exit;
+
+       vm_vcpu_add(vm, 1);
+       ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+
+free_exit:
+       kvm_vm_free(vm);
+       return ret;
+}
+
+/*
+ * Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init1,
+ * and run KVM_ARM_VCPU_INIT for another vCPU with @init2.
+ */
+static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1,
+                                 struct kvm_vcpu_init *init2)
+{
+       struct kvm_vm *vm;
+       int ret;
+
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+
+       vm_vcpu_add(vm, 0);
+       vm_vcpu_add(vm, 1);
+
+       ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+       if (ret)
+               goto free_exit;
+
+       ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+
+free_exit:
+       kvm_vm_free(vm);
+       return ret;
+}
+
+/*
+ * Tests that two 64bit vCPUs can be configured, two 32bit vCPUs can be
+ * configured, and two mixed-width vCPUs cannot be configured.
+ * Each of those three cases, configure vCPUs in two different orders.
+ * The one is running KVM_CREATE_VCPU for 2 vCPUs, and then running
+ * KVM_ARM_VCPU_INIT for them.
+ * The other is running KVM_CREATE_VCPU and KVM_ARM_VCPU_INIT for a vCPU,
+ * and then run those commands for another vCPU.
+ */
+int main(void)
+{
+       struct kvm_vcpu_init init1, init2;
+       struct kvm_vm *vm;
+       int ret;
+
+       if (!kvm_check_cap(KVM_CAP_ARM_EL1_32BIT)) {
+               print_skip("KVM_CAP_ARM_EL1_32BIT is not supported");
+               exit(KSFT_SKIP);
+       }
+
+       /* Get the preferred target type and copy that to init2 for later use */
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+       vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init1);
+       kvm_vm_free(vm);
+       init2 = init1;
+
+       /* Test with 64bit vCPUs */
+       ret = add_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 64bit EL1 vCPUs failed unexpectedly");
+       ret = add_2vcpus_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 64bit EL1 vCPUs failed unexpectedly");
+
+       /* Test with 32bit vCPUs */
+       init1.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+       ret = add_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 32bit EL1 vCPUs failed unexpectedly");
+       ret = add_2vcpus_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 32bit EL1 vCPUs failed unexpectedly");
+
+       /* Test with mixed-width vCPUs  */
+       init1.features[0] = 0;
+       init2.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+       ret = add_init_2vcpus(&init1, &init2);
+       TEST_ASSERT(ret != 0,
+                   "Configuring mixed-width vCPUs worked unexpectedly");
+       ret = add_2vcpus_init_2vcpus(&init1, &init2);
+       TEST_ASSERT(ret != 0,
+                   "Configuring mixed-width vCPUs worked unexpectedly");
+
+       return 0;
+}
index c9d9e51..7b47ae4 100644 (file)
 #include "test_util.h"
 #include "perf_test_util.h"
 #include "guest_modes.h"
+
 #ifdef __aarch64__
 #include "aarch64/vgic.h"
 
 #define GICD_BASE_GPA                  0x8000000ULL
 #define GICR_BASE_GPA                  0x80A0000ULL
+
+static int gic_fd;
+
+static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
+{
+       /*
+        * The test can still run even if hardware does not support GICv3, as it
+        * is only an optimization to reduce guest exits.
+        */
+       gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+}
+
+static void arch_cleanup_vm(struct kvm_vm *vm)
+{
+       if (gic_fd > 0)
+               close(gic_fd);
+}
+
+#else /* __aarch64__ */
+
+static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
+{
+}
+
+static void arch_cleanup_vm(struct kvm_vm *vm)
+{
+}
+
 #endif
 
 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
@@ -206,9 +235,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                vm_enable_cap(vm, &cap);
        }
 
-#ifdef __aarch64__
-       vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
-#endif
+       arch_setup_vm(vm, nr_vcpus);
 
        /* Start the iterations */
        iteration = 0;
@@ -302,6 +329,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        }
 
        free_bitmaps(bitmaps, p->slots);
+       arch_cleanup_vm(vm);
        perf_test_destroy_vm(vm);
 }
 
index b22f380..dfb7dab 100644 (file)
@@ -932,7 +932,7 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
        int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
                                      kvm_vcpu_stats_header.num_desc;
 
-       if (!kvm->debugfs_dentry)
+       if (IS_ERR(kvm->debugfs_dentry))
                return;
 
        debugfs_remove_recursive(kvm->debugfs_dentry);
@@ -955,6 +955,12 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
        int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
                                      kvm_vcpu_stats_header.num_desc;
 
+       /*
+        * Force subsequent debugfs file creations to fail if the VM directory
+        * is not created.
+        */
+       kvm->debugfs_dentry = ERR_PTR(-ENOENT);
+
        if (!debugfs_initialized())
                return 0;
 
@@ -5479,7 +5485,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        }
        add_uevent_var(env, "PID=%d", kvm->userspace_pid);
 
-       if (kvm->debugfs_dentry) {
+       if (!IS_ERR(kvm->debugfs_dentry)) {
                char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
 
                if (p) {