IB/core: Fix ODP with IB_ACCESS_HUGETLB handling
authorYishai Hadas <yishaih@mellanox.com>
Sun, 22 Dec 2019 12:46:49 +0000 (14:46 +0200)
committerJason Gunthorpe <jgg@mellanox.com>
Fri, 3 Jan 2020 21:00:14 +0000 (17:00 -0400)
As VMAs for a given range might not be available as part of the
registration phase in ODP.

ib_init_umem_odp() considered the expected page shift value that was
previously set and initializes its internals accordingly.

If memory isn't backed by physical contiguous pages aligned to a hugepage
boundary an error will be set as part of the page fault flow and come back
to the user as some failed RDMA operation.

Fixes: 0008b84ea9af ("IB/umem: Add support to huge ODP")
Link: https://lore.kernel.org/r/20191222124649.52300-4-leon@kernel.org
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Reviewed-by: Artemy Kovalyov <artemyko@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/core/umem_odp.c

index 2e9ee7a..f42fa31 100644 (file)
@@ -241,22 +241,10 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
        umem_odp->umem.owning_mm = mm = current->mm;
        umem_odp->notifier.ops = ops;
 
-       umem_odp->page_shift = PAGE_SHIFT;
-       if (access & IB_ACCESS_HUGETLB) {
-               struct vm_area_struct *vma;
-               struct hstate *h;
-
-               down_read(&mm->mmap_sem);
-               vma = find_vma(mm, ib_umem_start(umem_odp));
-               if (!vma || !is_vm_hugetlb_page(vma)) {
-                       up_read(&mm->mmap_sem);
-                       ret = -EINVAL;
-                       goto err_free;
-               }
-               h = hstate_vma(vma);
-               umem_odp->page_shift = huge_page_shift(h);
-               up_read(&mm->mmap_sem);
-       }
+       if (access & IB_ACCESS_HUGETLB)
+               umem_odp->page_shift = HPAGE_SHIFT;
+       else
+               umem_odp->page_shift = PAGE_SHIFT;
 
        umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
        ret = ib_init_umem_odp(umem_odp, ops);
@@ -266,7 +254,6 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
 
 err_put_pid:
        put_pid(umem_odp->tgid);
-err_free:
        kfree(umem_odp);
        return ERR_PTR(ret);
 }