projects
/
linux-2.6-microblaze.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge tag 'kvm-arm-for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm...
[linux-2.6-microblaze.git]
/
virt
/
kvm
/
kvm_main.c
diff --git
a/virt/kvm/kvm_main.c
b/virt/kvm/kvm_main.c
index
ca54b09
..
b4ab59d
100644
(file)
--- a/
virt/kvm/kvm_main.c
+++ b/
virt/kvm/kvm_main.c
@@
-1,3
+1,4
@@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Kernel-based Virtual Machine driver for Linux
*
/*
* Kernel-based Virtual Machine driver for Linux
*
@@
-10,10
+11,6
@@
* Authors:
* Avi Kivity <avi@qumranet.com>
* Yaniv Kamay <yaniv@qumranet.com>
* Authors:
* Avi Kivity <avi@qumranet.com>
* Yaniv Kamay <yaniv@qumranet.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
*/
#include <kvm/iodev.h>
*/
#include <kvm/iodev.h>
@@
-98,7
+95,7
@@
EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/
-DEFINE_
SPINLOCK
(kvm_lock);
+DEFINE_
MUTEX
(kvm_lock);
static DEFINE_RAW_SPINLOCK(kvm_count_lock);
LIST_HEAD(vm_list);
static DEFINE_RAW_SPINLOCK(kvm_count_lock);
LIST_HEAD(vm_list);
@@
-683,9
+680,9
@@
static struct kvm *kvm_create_vm(unsigned long type)
if (r)
goto out_err;
if (r)
goto out_err;
-
spin
_lock(&kvm_lock);
+
mutex
_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
list_add(&kvm->vm_list, &vm_list);
-
spin
_unlock(&kvm_lock);
+
mutex
_unlock(&kvm_lock);
preempt_notifier_inc();
preempt_notifier_inc();
@@
-731,9
+728,9
@@
static void kvm_destroy_vm(struct kvm *kvm)
kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
kvm_destroy_vm_debugfs(kvm);
kvm_arch_sync_events(kvm);
kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
kvm_destroy_vm_debugfs(kvm);
kvm_arch_sync_events(kvm);
-
spin
_lock(&kvm_lock);
+
mutex
_lock(&kvm_lock);
list_del(&kvm->vm_list);
list_del(&kvm->vm_list);
-
spin
_unlock(&kvm_lock);
+
mutex
_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) {
struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) {
struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
@@
-1793,7
+1790,7
@@
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
if (!map->hva)
return;
if (!map->hva)
return;
- if (map->page)
+ if (map->page
!= KVM_UNMAPPED_PAGE
)
kunmap(map->page);
#ifdef CONFIG_HAS_IOMEM
else
kunmap(map->page);
#ifdef CONFIG_HAS_IOMEM
else
@@
-4034,13
+4031,13
@@
static int vm_stat_get(void *_offset, u64 *val)
u64 tmp_val;
*val = 0;
u64 tmp_val;
*val = 0;
-
spin
_lock(&kvm_lock);
+
mutex
_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
*val += tmp_val;
}
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
*val += tmp_val;
}
-
spin
_unlock(&kvm_lock);
+
mutex
_unlock(&kvm_lock);
return 0;
}
return 0;
}
@@
-4053,12
+4050,12
@@
static int vm_stat_clear(void *_offset, u64 val)
if (val)
return -EINVAL;
if (val)
return -EINVAL;
-
spin
_lock(&kvm_lock);
+
mutex
_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vm_stat_clear_per_vm((void *)&stat_tmp, 0);
}
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vm_stat_clear_per_vm((void *)&stat_tmp, 0);
}
-
spin
_unlock(&kvm_lock);
+
mutex
_unlock(&kvm_lock);
return 0;
}
return 0;
}
@@
-4073,13
+4070,13
@@
static int vcpu_stat_get(void *_offset, u64 *val)
u64 tmp_val;
*val = 0;
u64 tmp_val;
*val = 0;
-
spin
_lock(&kvm_lock);
+
mutex
_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
*val += tmp_val;
}
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
*val += tmp_val;
}
-
spin
_unlock(&kvm_lock);
+
mutex
_unlock(&kvm_lock);
return 0;
}
return 0;
}
@@
-4092,12
+4089,12
@@
static int vcpu_stat_clear(void *_offset, u64 val)
if (val)
return -EINVAL;
if (val)
return -EINVAL;
-
spin
_lock(&kvm_lock);
+
mutex
_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
}
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
}
-
spin
_unlock(&kvm_lock);
+
mutex
_unlock(&kvm_lock);
return 0;
}
return 0;
}
@@
-4118,7
+4115,7
@@
static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
if (!kvm_dev.this_device || !kvm)
return;
if (!kvm_dev.this_device || !kvm)
return;
-
spin
_lock(&kvm_lock);
+
mutex
_lock(&kvm_lock);
if (type == KVM_EVENT_CREATE_VM) {
kvm_createvm_count++;
kvm_active_vms++;
if (type == KVM_EVENT_CREATE_VM) {
kvm_createvm_count++;
kvm_active_vms++;
@@
-4127,7
+4124,7
@@
static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
}
created = kvm_createvm_count;
active = kvm_active_vms;
}
created = kvm_createvm_count;
active = kvm_active_vms;
-
spin
_unlock(&kvm_lock);
+
mutex
_unlock(&kvm_lock);
env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
if (!env)
env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
if (!env)
@@
-4224,6
+4221,11
@@
static void kvm_sched_out(struct preempt_notifier *pn,
kvm_arch_vcpu_put(vcpu);
}
kvm_arch_vcpu_put(vcpu);
}
+static void check_processor_compat(void *rtn)
+{
+ *(int *)rtn = kvm_arch_check_processor_compat();
+}
+
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
struct module *module)
{
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
struct module *module)
{
@@
-4255,9
+4257,7
@@
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
goto out_free_0a;
for_each_online_cpu(cpu) {
goto out_free_0a;
for_each_online_cpu(cpu) {
- smp_call_function_single(cpu,
- kvm_arch_check_processor_compat,
- &r, 1);
+ smp_call_function_single(cpu, check_processor_compat, &r, 1);
if (r < 0)
goto out_free_1;
}
if (r < 0)
goto out_free_1;
}