RDMA/uverbs: Check ODP in ib_check_mr_access() as well
[linux-2.6-microblaze.git] / drivers / infiniband / hw / mlx5 / devx.c
index 9e3d8b8..819c142 100644 (file)
@@ -93,9 +93,6 @@ struct devx_async_event_file {
 struct devx_umem {
        struct mlx5_core_dev            *mdev;
        struct ib_umem                  *umem;
-       u32                             page_offset;
-       int                             page_shift;
-       int                             ncont;
        u32                             dinlen;
        u32                             dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
 };
@@ -1311,7 +1308,7 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
        else
                ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
                                    obj->dinlen, out, sizeof(out));
-       if (ib_is_destroy_retryable(ret, why, uobject))
+       if (ret)
                return ret;
 
        devx_event_table = &dev->devx_event_table;
@@ -2057,9 +2054,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
        u64 addr;
        size_t size;
        u32 access;
-       int npages;
        int err;
-       u32 page_mask;
 
        if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
            uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
@@ -2073,57 +2068,62 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
        if (err)
                return err;
 
-       err = ib_check_mr_access(access);
+       err = ib_check_mr_access(&dev->ib_dev, access);
        if (err)
                return err;
 
        obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
        if (IS_ERR(obj->umem))
                return PTR_ERR(obj->umem);
-
-       mlx5_ib_cont_pages(obj->umem, obj->umem->address,
-                          MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
-                          &obj->page_shift, &obj->ncont, NULL);
-
-       if (!npages) {
-               ib_umem_release(obj->umem);
-               return -EINVAL;
-       }
-
-       page_mask = (1 << obj->page_shift) - 1;
-       obj->page_offset = obj->umem->address & page_mask;
-
        return 0;
 }
 
-static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
+static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
+                                  struct uverbs_attr_bundle *attrs,
                                   struct devx_umem *obj,
                                   struct devx_umem_reg_cmd *cmd)
 {
+       unsigned int page_size;
+       __be64 *mtt;
+       void *umem;
+
+       /*
+        * We don't know what the user intends to use this umem for, but the HW
+        * restrictions must be met. MR, doorbell records, QP, WQ and CQ all
+        * have different requirements. Since we have no idea how to sort this
+        * out, only support PAGE_SIZE with the expectation that userspace will
+        * provide the necessary alignments inside the known PAGE_SIZE and that
+        * FW will check everything.
+        */
+       page_size = ib_umem_find_best_pgoff(
+               obj->umem, PAGE_SIZE,
+               __mlx5_page_offset_to_bitmask(__mlx5_bit_sz(umem, page_offset),
+                                             0));
+       if (!page_size)
+               return -EINVAL;
+
        cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
-                   (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
+                    (MLX5_ST_SZ_BYTES(mtt) *
+                     ib_umem_num_dma_blocks(obj->umem, page_size));
        cmd->in = uverbs_zalloc(attrs, cmd->inlen);
-       return PTR_ERR_OR_ZERO(cmd->in);
-}
-
-static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
-                                   struct devx_umem *obj,
-                                   struct devx_umem_reg_cmd *cmd)
-{
-       void *umem;
-       __be64 *mtt;
+       if (IS_ERR(cmd->in))
+               return PTR_ERR(cmd->in);
 
        umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
        mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
 
        MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
-       MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
-       MLX5_SET(umem, umem, log_page_size, obj->page_shift -
-                                           MLX5_ADAPTER_PAGE_SHIFT);
-       MLX5_SET(umem, umem, page_offset, obj->page_offset);
-       mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
+       MLX5_SET64(umem, umem, num_of_mtt,
+                  ib_umem_num_dma_blocks(obj->umem, page_size));
+       MLX5_SET(umem, umem, log_page_size,
+                order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+       MLX5_SET(umem, umem, page_offset,
+                ib_umem_dma_offset(obj->umem, page_size));
+
+       mlx5_ib_populate_pas(obj->umem, page_size, mtt,
                             (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
-                            MLX5_IB_MTT_READ);
+                                    MLX5_IB_MTT_READ);
+       return 0;
 }
 
 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
@@ -2150,12 +2150,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
        if (err)
                goto err_obj_free;
 
-       err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
+       err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd);
        if (err)
                goto err_umem_release;
 
-       devx_umem_reg_cmd_build(dev, obj, &cmd);
-
        MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
        err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
                            sizeof(cmd.out));
@@ -2187,7 +2185,7 @@ static int devx_umem_cleanup(struct ib_uobject *uobject,
        int err;
 
        err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
-       if (ib_is_destroy_retryable(err, why, uobject))
+       if (err)
                return err;
 
        ib_umem_release(obj->umem);
@@ -2600,8 +2598,8 @@ static const struct file_operations devx_async_event_fops = {
        .llseek  = no_llseek,
 };
 
-static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
-                                            enum rdma_remove_reason why)
+static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
+                                             enum rdma_remove_reason why)
 {
        struct devx_async_cmd_event_file *comp_ev_file =
                container_of(uobj, struct devx_async_cmd_event_file,
@@ -2623,11 +2621,10 @@ static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
                kvfree(entry);
        }
        spin_unlock_irq(&comp_ev_file->ev_queue.lock);
-       return 0;
 };
 
-static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
-                                        enum rdma_remove_reason why)
+static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
+                                         enum rdma_remove_reason why)
 {
        struct devx_async_event_file *ev_file =
                container_of(uobj, struct devx_async_event_file,
@@ -2671,7 +2668,6 @@ static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
        mutex_unlock(&dev->devx_event_table.event_xa_lock);
 
        put_device(&dev->ib_dev.dev);
-       return 0;
 };
 
 DECLARE_UVERBS_NAMED_METHOD(