Merge branch kvm-arm64/smccc-filtering into kvmarm-master/next
[linux-2.6-microblaze.git] / arch / arm64 / kvm / arm.c
index efee032..bb21d0c 100644 (file)
@@ -128,6 +128,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 {
        int ret;
 
+       mutex_init(&kvm->arch.config_lock);
+
+#ifdef CONFIG_LOCKDEP
+       /* Clue in lockdep that the config_lock must be taken inside kvm->lock */
+       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
+       mutex_unlock(&kvm->arch.config_lock);
+       mutex_unlock(&kvm->lock);
+#endif
+
        ret = kvm_share_hyp(kvm, kvm + 1);
        if (ret)
                return ret;
@@ -148,6 +158,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        kvm_vgic_early_init(kvm);
 
+       kvm_timer_init_vm(kvm);
+
        /* The maximum number of VCPUs is limited by the host's GIC model */
        kvm->max_vcpus = kvm_arm_default_max_vcpus();
 
@@ -222,6 +234,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_VCPU_ATTRIBUTES:
        case KVM_CAP_PTP_KVM:
        case KVM_CAP_ARM_SYSTEM_SUSPEND:
+       case KVM_CAP_COUNTER_OFFSET:
                r = 1;
                break;
        case KVM_CAP_SET_GUEST_DEBUG2:
@@ -328,6 +341,16 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
 {
        int err;
 
+       spin_lock_init(&vcpu->arch.mp_state_lock);
+
+#ifdef CONFIG_LOCKDEP
+       /* Inform lockdep that the config_lock is acquired after vcpu->mutex */
+       mutex_lock(&vcpu->mutex);
+       mutex_lock(&vcpu->kvm->arch.config_lock);
+       mutex_unlock(&vcpu->kvm->arch.config_lock);
+       mutex_unlock(&vcpu->mutex);
+#endif
+
        /* Force users to call KVM_ARM_VCPU_INIT */
        vcpu->arch.target = -1;
        bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
@@ -445,34 +468,41 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        vcpu->cpu = -1;
 }
 
-void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
+static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
+       WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
        kvm_make_request(KVM_REQ_SLEEP, vcpu);
        kvm_vcpu_kick(vcpu);
 }
 
+void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
+{
+       spin_lock(&vcpu->arch.mp_state_lock);
+       __kvm_arm_vcpu_power_off(vcpu);
+       spin_unlock(&vcpu->arch.mp_state_lock);
+}
+
 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED;
+       return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
 }
 
 static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED;
+       WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
        kvm_make_request(KVM_REQ_SUSPEND, vcpu);
        kvm_vcpu_kick(vcpu);
 }
 
 static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED;
+       return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
 }
 
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
-       *mp_state = vcpu->arch.mp_state;
+       *mp_state = READ_ONCE(vcpu->arch.mp_state);
 
        return 0;
 }
@@ -482,12 +512,14 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 {
        int ret = 0;
 
+       spin_lock(&vcpu->arch.mp_state_lock);
+
        switch (mp_state->mp_state) {
        case KVM_MP_STATE_RUNNABLE:
-               vcpu->arch.mp_state = *mp_state;
+               WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
                break;
        case KVM_MP_STATE_STOPPED:
-               kvm_arm_vcpu_power_off(vcpu);
+               __kvm_arm_vcpu_power_off(vcpu);
                break;
        case KVM_MP_STATE_SUSPENDED:
                kvm_arm_vcpu_suspend(vcpu);
@@ -496,6 +528,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                ret = -EINVAL;
        }
 
+       spin_unlock(&vcpu->arch.mp_state_lock);
+
        return ret;
 }
 
@@ -595,9 +629,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
        if (kvm_vm_is_protected(kvm))
                kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.config_lock);
        set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.config_lock);
 
        return ret;
 }
@@ -1212,10 +1246,14 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
        /*
         * Handle the "start in power-off" case.
         */
+       spin_lock(&vcpu->arch.mp_state_lock);
+
        if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
-               kvm_arm_vcpu_power_off(vcpu);
+               __kvm_arm_vcpu_power_off(vcpu);
        else
-               vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
+               WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
+
+       spin_unlock(&vcpu->arch.mp_state_lock);
 
        return 0;
 }
@@ -1502,6 +1540,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
                        return -EFAULT;
                return kvm_vm_ioctl_mte_copy_tags(kvm, &copy_tags);
        }
+       case KVM_ARM_SET_COUNTER_OFFSET: {
+               struct kvm_arm_counter_offset offset;
+
+               if (copy_from_user(&offset, argp, sizeof(offset)))
+                       return -EFAULT;
+               return kvm_vm_ioctl_set_counter_offset(kvm, &offset);
+       }
        case KVM_HAS_DEVICE_ATTR: {
                if (copy_from_user(&attr, argp, sizeof(attr)))
                        return -EFAULT;
@@ -1519,6 +1564,49 @@ long kvm_arch_vm_ioctl(struct file *filp,
        }
 }
 
+/* unlocks vcpus from @vcpu_lock_idx and smaller */
+static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
+{
+       struct kvm_vcpu *tmp_vcpu;
+
+       for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+               tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+               mutex_unlock(&tmp_vcpu->mutex);
+       }
+}
+
+void unlock_all_vcpus(struct kvm *kvm)
+{
+       lockdep_assert_held(&kvm->lock);
+
+       unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
+}
+
+/* Returns true if all vcpus were locked, false otherwise */
+bool lock_all_vcpus(struct kvm *kvm)
+{
+       struct kvm_vcpu *tmp_vcpu;
+       unsigned long c;
+
+       lockdep_assert_held(&kvm->lock);
+
+       /*
+        * Any time a vcpu is in an ioctl (including running), the
+        * core KVM code tries to grab the vcpu->mutex.
+        *
+        * By grabbing the vcpu->mutex of all VCPUs we ensure that no
+        * other VCPUs can fiddle with the state while we access it.
+        */
+       kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
+               if (!mutex_trylock(&tmp_vcpu->mutex)) {
+                       unlock_vcpus(kvm, c - 1);
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 static unsigned long nvhe_percpu_size(void)
 {
        return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -