IB/{core,hw,umem}: set FOLL_PIN via pin_user_pages*(), fix up ODP
[linux-2.6-microblaze.git] / drivers / infiniband / core / umem.c
index 7a3b995..aae5bfe 100644 (file)
@@ -181,15 +181,14 @@ EXPORT_SYMBOL(ib_umem_find_best_pgsz);
 /**
  * ib_umem_get - Pin and DMA map userspace memory.
  *
- * @udata: userspace context to pin memory for
+ * @device: IB device to connect UMEM
  * @addr: userspace virtual address to start at
  * @size: length of region to pin
  * @access: IB_ACCESS_xxx flags for memory being pinned
  */
-struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
+struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
                            size_t size, int access)
 {
-       struct ib_ucontext *context;
        struct ib_umem *umem;
        struct page **page_list;
        unsigned long lock_limit;
@@ -201,14 +200,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
        struct scatterlist *sg;
        unsigned int gup_flags = FOLL_WRITE;
 
-       if (!udata)
-               return ERR_PTR(-EIO);
-
-       context = container_of(udata, struct uverbs_attr_bundle, driver_udata)
-                         ->context;
-       if (!context)
-               return ERR_PTR(-EIO);
-
        /*
         * If the combination of the addr and size requested for this memory
         * region causes an integer overflow, return error.
@@ -226,7 +217,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
        umem = kzalloc(sizeof(*umem), GFP_KERNEL);
        if (!umem)
                return ERR_PTR(-ENOMEM);
-       umem->ibdev = context->device;
+       umem->ibdev      = device;
        umem->length     = size;
        umem->address    = addr;
        umem->writable   = ib_access_writable(access);
@@ -266,33 +257,28 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
        sg = umem->sg_head.sgl;
 
        while (npages) {
-               down_read(&mm->mmap_sem);
-               ret = get_user_pages(cur_base,
-                                    min_t(unsigned long, npages,
-                                          PAGE_SIZE / sizeof (struct page *)),
-                                    gup_flags | FOLL_LONGTERM,
-                                    page_list, NULL);
-               if (ret < 0) {
-                       up_read(&mm->mmap_sem);
+               ret = pin_user_pages_fast(cur_base,
+                                         min_t(unsigned long, npages,
+                                               PAGE_SIZE /
+                                               sizeof(struct page *)),
+                                         gup_flags | FOLL_LONGTERM, page_list);
+               if (ret < 0)
                        goto umem_release;
-               }
 
                cur_base += ret * PAGE_SIZE;
                npages   -= ret;
 
                sg = ib_umem_add_sg_table(sg, page_list, ret,
-                       dma_get_max_seg_size(context->device->dma_device),
+                       dma_get_max_seg_size(device->dma_device),
                        &umem->sg_nents);
-
-               up_read(&mm->mmap_sem);
        }
 
        sg_mark_end(sg);
 
-       umem->nmap = ib_dma_map_sg(context->device,
-                                 umem->sg_head.sgl,
-                                 umem->sg_nents,
-                                 DMA_BIDIRECTIONAL);
+       umem->nmap = ib_dma_map_sg(device,
+                                  umem->sg_head.sgl,
+                                  umem->sg_nents,
+                                  DMA_BIDIRECTIONAL);
 
        if (!umem->nmap) {
                ret = -ENOMEM;
@@ -303,7 +289,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
        goto out;
 
 umem_release:
-       __ib_umem_release(context->device, umem, 0);
+       __ib_umem_release(device, umem, 0);
 vma:
        atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
 out: