Merge branch kvm-arm64/timer-vm-offsets into kvmarm-master/next
[linux-2.6-microblaze.git] / arch / arm64 / kvm / arm.c
index ad3655a..59f6245 100644 (file)
@@ -158,6 +158,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        kvm_vgic_early_init(kvm);
 
+       kvm_timer_init_vm(kvm);
+
        /* The maximum number of VCPUs is limited by the host's GIC model */
        kvm->max_vcpus = kvm_arm_default_max_vcpus();
 
@@ -230,6 +232,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_VCPU_ATTRIBUTES:
        case KVM_CAP_PTP_KVM:
        case KVM_CAP_ARM_SYSTEM_SUSPEND:
+       case KVM_CAP_COUNTER_OFFSET:
                r = 1;
                break;
        case KVM_CAP_SET_GUEST_DEBUG2:
@@ -1514,11 +1517,61 @@ long kvm_arch_vm_ioctl(struct file *filp,
                        return -EFAULT;
                return kvm_vm_ioctl_mte_copy_tags(kvm, &copy_tags);
        }
+       case KVM_ARM_SET_COUNTER_OFFSET: {
+               struct kvm_arm_counter_offset offset;
+
+               if (copy_from_user(&offset, argp, sizeof(offset)))
+                       return -EFAULT;
+               return kvm_vm_ioctl_set_counter_offset(kvm, &offset);
+       }
        default:
                return -EINVAL;
        }
 }
 
+/* unlocks vcpus from @vcpu_lock_idx and smaller */
+static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
+{
+       struct kvm_vcpu *tmp_vcpu;
+
+       for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+               tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+               mutex_unlock(&tmp_vcpu->mutex);
+       }
+}
+
+void unlock_all_vcpus(struct kvm *kvm)
+{
+       lockdep_assert_held(&kvm->lock);
+
+       unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
+}
+
+/* Returns true if all vcpus were locked, false otherwise */
+bool lock_all_vcpus(struct kvm *kvm)
+{
+       struct kvm_vcpu *tmp_vcpu;
+       unsigned long c;
+
+       lockdep_assert_held(&kvm->lock);
+
+       /*
+        * Any time a vcpu is in an ioctl (including running), the
+        * core KVM code tries to grab the vcpu->mutex.
+        *
+        * By grabbing the vcpu->mutex of all VCPUs we ensure that no
+        * other VCPUs can fiddle with the state while we access it.
+        */
+       kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
+               if (!mutex_trylock(&tmp_vcpu->mutex)) {
+                       unlock_vcpus(kvm, c - 1);
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 static unsigned long nvhe_percpu_size(void)
 {
        return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -