#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/bsearch.h>
+#include <linux/io.h>
#include <asm/processor.h>
-#include <asm/io.h>
#include <asm/ioctl.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
spin_unlock(&kvm->mmu_lock);
ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
- range->end, range->blockable);
+ range->end,
+ mmu_notifier_range_blockable(range));
srcu_read_unlock(&kvm->srcu, idx);
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
/**
- * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
+ * kvm_get_dirty_log_protect - get a snapshot of dirty pages
* and reenable dirty page tracking for the corresponding pages.
* @kvm: pointer to kvm instance
* @log: slot id and address to which we copy the log
- * @is_dirty: flag set if any page is dirty
+ * @flush: true if TLB flush is needed by caller
*
* We need to keep it in mind that VCPU threads can write to the bitmap
* concurrently. So, to avoid losing track of dirty pages we keep the
* and reenable dirty page tracking for the corresponding pages.
* @kvm: pointer to kvm instance
* @log: slot id and address from which to fetch the bitmap of dirty pages
+ * @flush: true if TLB flush is needed by caller
*/
int kvm_clear_dirty_log_protect(struct kvm *kvm,
struct kvm_clear_dirty_log *log, bool *flush)
if (!dirty_bitmap)
return -ENOENT;
- n = kvm_dirty_bitmap_bytes(memslot);
+ n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
if (log->first_page > memslot->npages ||
log->num_pages > memslot->npages - log->first_page ||
return -EFAULT;
spin_lock(&kvm->mmu_lock);
- for (offset = log->first_page,
- i = offset / BITS_PER_LONG, n = log->num_pages / BITS_PER_LONG; n--;
+ for (offset = log->first_page, i = offset / BITS_PER_LONG,
+ n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
i++, offset += BITS_PER_LONG) {
unsigned long mask = *dirty_bitmap_buffer++;
atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
}
EXPORT_SYMBOL_GPL(gfn_to_page);
+static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct kvm_host_map *map)
+{
+ kvm_pfn_t pfn;
+ void *hva = NULL;
+ struct page *page = KVM_UNMAPPED_PAGE;
+
+ if (!map)
+ return -EINVAL;
+
+ pfn = gfn_to_pfn_memslot(slot, gfn);
+ if (is_error_noslot_pfn(pfn))
+ return -EINVAL;
+
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ hva = kmap(page);
+ } else {
+ hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
+ }
+
+ if (!hva)
+ return -EFAULT;
+
+ map->page = page;
+ map->hva = hva;
+ map->pfn = pfn;
+ map->gfn = gfn;
+
+ return 0;
+}
+
+int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
+{
+ return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_map);
+
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+ bool dirty)
+{
+ if (!map)
+ return;
+
+ if (!map->hva)
+ return;
+
+ if (map->page)
+ kunmap(map->page);
+ else
+ memunmap(map->hva);
+
+ if (dirty) {
+ kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
+ kvm_release_pfn_dirty(map->pfn);
+ } else {
+ kvm_release_pfn_clean(map->pfn);
+ }
+
+ map->hva = NULL;
+ map->page = NULL;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
+
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
kvm_pfn_t pfn;
u64 block_ns;
start = cur = ktime_get();
- if (vcpu->halt_poll_ns) {
+ if (vcpu->halt_poll_ns && !kvm_arch_no_poll(vcpu)) {
ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
++vcpu->stat.halt_attempted_poll;
}
#endif
+static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct kvm_device *dev = filp->private_data;
+
+ if (dev->ops->mmap)
+ return dev->ops->mmap(dev, vma);
+
+ return -ENODEV;
+}
+
static int kvm_device_ioctl_attr(struct kvm_device *dev,
int (*accessor)(struct kvm_device *dev,
struct kvm_device_attr *attr),
struct kvm_device *dev = filp->private_data;
struct kvm *kvm = dev->kvm;
+ if (dev->ops->release) {
+ mutex_lock(&kvm->lock);
+ list_del(&dev->vm_node);
+ dev->ops->release(dev);
+ mutex_unlock(&kvm->lock);
+ }
+
kvm_put_kvm(kvm);
return 0;
}
.unlocked_ioctl = kvm_device_ioctl,
.release = kvm_device_release,
KVM_COMPAT(kvm_device_ioctl),
+ .mmap = kvm_device_mmap,
};
struct kvm_device *kvm_device_from_filp(struct file *filp)
case KVM_CAP_CHECK_EXTENSION_VM:
case KVM_CAP_ENABLE_CAP_VM:
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
- case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT:
+ case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
#endif
return 1;
#ifdef CONFIG_KVM_MMIO
#endif
case KVM_CAP_MAX_VCPU_ID:
return KVM_MAX_VCPU_ID;
+ case KVM_CAP_NR_MEMSLOTS:
+ return KVM_USER_MEM_SLOTS;
default:
break;
}
{
switch (cap->cap) {
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
- case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT:
+ case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
if (cap->flags || (cap->args[0] & ~1))
return -EINVAL;
kvm->manual_dirty_log_protect = cap->args[0];