kvm: use more precise cast and do not drop __user
[linux-2.6-microblaze.git] / virt / kvm / kvm_main.c
index 7b6013f..0a68c9d 100644 (file)
@@ -55,7 +55,6 @@
 #include <asm/processor.h>
 #include <asm/ioctl.h>
 #include <linux/uaccess.h>
-#include <asm/pgtable.h>
 
 #include "coalesced_mmio.h"
 #include "async_pf.h"
@@ -1638,7 +1637,7 @@ unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
        if (kvm_is_error_hva(addr))
                return PAGE_SIZE;
 
-       down_read(&current->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        vma = find_vma(current->mm, addr);
        if (!vma)
                goto out;
@@ -1646,7 +1645,7 @@ unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
        size = vma_kernel_pagesize(vma);
 
 out:
-       up_read(&current->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        return size;
 }
@@ -1746,7 +1745,6 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
                            bool *writable, kvm_pfn_t *pfn)
 {
        struct page *page[1];
-       int npages;
 
        /*
         * Fast pin a writable pfn only if it is a write fault request
@@ -1756,8 +1754,7 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
        if (!(write_fault || writable))
                return false;
 
-       npages = __get_user_pages_fast(addr, 1, 1, page);
-       if (npages == 1) {
+       if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
                *pfn = page_to_pfn(page[0]);
 
                if (writable)
@@ -1797,7 +1794,7 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
        if (unlikely(!write_fault) && writable) {
                struct page *wpage;
 
-               if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
+               if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
                        *writable = true;
                        put_page(page);
                        page = wpage;
@@ -1901,7 +1898,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
        if (npages == 1)
                return pfn;
 
-       down_read(&current->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        if (npages == -EHWPOISON ||
              (!async && check_user_page_hwpoison(addr))) {
                pfn = KVM_PFN_ERR_HWPOISON;
@@ -1925,7 +1922,7 @@ retry:
                pfn = KVM_PFN_ERR_FAULT;
        }
 exit:
-       up_read(&current->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        return pfn;
 }
 
@@ -2009,7 +2006,7 @@ int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
        if (entry < nr_pages)
                return 0;
 
-       return __get_user_pages_fast(addr, nr_pages, 1, pages);
+       return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
 }
 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
 
@@ -3353,7 +3350,8 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
                        if (kvm_sigmask.len != sizeof(compat_sigset_t))
                                goto out;
                        r = -EFAULT;
-                       if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset))
+                       if (get_compat_sigset(&sigset,
+                                             (compat_sigset_t __user *)sigmask_arg->sigset))
                                goto out;
                        r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
                } else