Merge branch 'mlx5_memic_ops' of git://git.kernel.org/pub/scm/linux/kernel/git/mellan...
authorJason Gunthorpe <jgg@nvidia.com>
Tue, 13 Apr 2021 22:37:17 +0000 (19:37 -0300)
committerJason Gunthorpe <jgg@nvidia.com>
Tue, 13 Apr 2021 22:37:17 +0000 (19:37 -0300)
Maor Gottlieb says:
====================
This series from Maor extends MEMIC to support atomic operations from the
host in addition to already supported regular read/write.
====================

* 'memic_ops':
  RDMA/mlx5: Expose UAPI to query DM
  RDMA/mlx5: Add support in MEMIC operations
  RDMA/mlx5: Add support to MODIFY_MEMIC command
  RDMA/mlx5: Re-organize the DM code
  RDMA/mlx5: Move all DM logic to separate file
  RDMA/uverbs: Make UVERBS_OBJECT_METHODS to consider line number
  net/mlx5: Add MEMIC operations related bits

drivers/infiniband/hw/mlx5/Makefile
drivers/infiniband/hw/mlx5/cmd.c
drivers/infiniband/hw/mlx5/cmd.h
drivers/infiniband/hw/mlx5/dm.c [new file with mode: 0644]
drivers/infiniband/hw/mlx5/dm.h [new file with mode: 0644]
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
include/linux/mlx5/mlx5_ifc.h
include/rdma/uverbs_named_ioctl.h
include/uapi/rdma/mlx5_user_ioctl_cmds.h

index b4c009b..f433801 100644 (file)
@@ -6,6 +6,7 @@ mlx5_ib-y := ah.o \
             cong.o \
             counters.o \
             cq.o \
+            dm.o \
             doorbell.o \
             gsi.o \
             ib_virt.o \
index 234f299..a8db8a0 100644 (file)
@@ -47,107 +47,6 @@ int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
        return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
 }
 
-int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
-                        u64 length, u32 alignment)
-{
-       struct mlx5_core_dev *dev = dm->dev;
-       u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
-                                       >> PAGE_SHIFT;
-       u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
-       u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
-       u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
-       u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
-       u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
-       u32 mlx5_alignment;
-       u64 page_idx = 0;
-       int ret = 0;
-
-       if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
-               return -EINVAL;
-
-       /* mlx5 device sets alignment as 64*2^driver_value
-        * so normalizing is needed.
-        */
-       mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
-                        alignment - MLX5_MEMIC_BASE_ALIGN;
-       if (mlx5_alignment > max_alignment)
-               return -EINVAL;
-
-       MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
-       MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
-       MLX5_SET(alloc_memic_in, in, memic_size, length);
-       MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
-                mlx5_alignment);
-
-       while (page_idx < num_memic_hw_pages) {
-               spin_lock(&dm->lock);
-               page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
-                                                     num_memic_hw_pages,
-                                                     page_idx,
-                                                     num_pages, 0);
-
-               if (page_idx < num_memic_hw_pages)
-                       bitmap_set(dm->memic_alloc_pages,
-                                  page_idx, num_pages);
-
-               spin_unlock(&dm->lock);
-
-               if (page_idx >= num_memic_hw_pages)
-                       break;
-
-               MLX5_SET64(alloc_memic_in, in, range_start_addr,
-                          hw_start_addr + (page_idx * PAGE_SIZE));
-
-               ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
-               if (ret) {
-                       spin_lock(&dm->lock);
-                       bitmap_clear(dm->memic_alloc_pages,
-                                    page_idx, num_pages);
-                       spin_unlock(&dm->lock);
-
-                       if (ret == -EAGAIN) {
-                               page_idx++;
-                               continue;
-                       }
-
-                       return ret;
-               }
-
-               *addr = dev->bar_addr +
-                       MLX5_GET64(alloc_memic_out, out, memic_start_addr);
-
-               return 0;
-       }
-
-       return -ENOMEM;
-}
-
-void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length)
-{
-       struct mlx5_core_dev *dev = dm->dev;
-       u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
-       u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
-       u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
-       u64 start_page_idx;
-       int err;
-
-       addr -= dev->bar_addr;
-       start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
-
-       MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
-       MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
-       MLX5_SET(dealloc_memic_in, in, memic_size, length);
-
-       err =  mlx5_cmd_exec_in(dev, dealloc_memic, in);
-       if (err)
-               return;
-
-       spin_lock(&dm->lock);
-       bitmap_clear(dm->memic_alloc_pages,
-                    start_page_idx, num_pages);
-       spin_unlock(&dm->lock);
-}
-
 void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
 {
        u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
index 88ea6ef..66c9629 100644 (file)
@@ -41,9 +41,6 @@ int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
 int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
                               void *out);
-int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
-                        u64 length, u32 alignment);
-void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length);
 int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
 void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
 void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
new file mode 100644 (file)
index 0000000..235aad6
--- /dev/null
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "dm.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
+                               u64 length, u32 alignment)
+{
+       struct mlx5_core_dev *dev = dm->dev;
+       u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
+                                       >> PAGE_SHIFT;
+       u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
+       u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
+       u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+       u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
+       u32 mlx5_alignment;
+       u64 page_idx = 0;
+       int ret = 0;
+
+       if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
+               return -EINVAL;
+
+       /* mlx5 device sets alignment as 64*2^driver_value
+        * so normalizing is needed.
+        */
+       mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
+                        alignment - MLX5_MEMIC_BASE_ALIGN;
+       if (mlx5_alignment > max_alignment)
+               return -EINVAL;
+
+       MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
+       MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
+       MLX5_SET(alloc_memic_in, in, memic_size, length);
+       MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
+                mlx5_alignment);
+
+       while (page_idx < num_memic_hw_pages) {
+               spin_lock(&dm->lock);
+               page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
+                                                     num_memic_hw_pages,
+                                                     page_idx,
+                                                     num_pages, 0);
+
+               if (page_idx < num_memic_hw_pages)
+                       bitmap_set(dm->memic_alloc_pages,
+                                  page_idx, num_pages);
+
+               spin_unlock(&dm->lock);
+
+               if (page_idx >= num_memic_hw_pages)
+                       break;
+
+               MLX5_SET64(alloc_memic_in, in, range_start_addr,
+                          hw_start_addr + (page_idx * PAGE_SIZE));
+
+               ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
+               if (ret) {
+                       spin_lock(&dm->lock);
+                       bitmap_clear(dm->memic_alloc_pages,
+                                    page_idx, num_pages);
+                       spin_unlock(&dm->lock);
+
+                       if (ret == -EAGAIN) {
+                               page_idx++;
+                               continue;
+                       }
+
+                       return ret;
+               }
+
+               *addr = dev->bar_addr +
+                       MLX5_GET64(alloc_memic_out, out, memic_start_addr);
+
+               return 0;
+       }
+
+       return -ENOMEM;
+}
+
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
+                           u64 length)
+{
+       struct mlx5_core_dev *dev = dm->dev;
+       u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
+       u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+       u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
+       u64 start_page_idx;
+       int err;
+
+       addr -= dev->bar_addr;
+       start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
+
+       MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
+       MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
+       MLX5_SET(dealloc_memic_in, in, memic_size, length);
+
+       err =  mlx5_cmd_exec_in(dev, dealloc_memic, in);
+       if (err)
+               return;
+
+       spin_lock(&dm->lock);
+       bitmap_clear(dm->memic_alloc_pages,
+                    start_page_idx, num_pages);
+       spin_unlock(&dm->lock);
+}
+
+void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+                              u8 operation)
+{
+       u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
+       struct mlx5_core_dev *dev = dm->dev;
+
+       MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
+       MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC);
+       MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
+       MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
+
+       mlx5_cmd_exec_in(dev, modify_memic, in);
+}
+
+static int mlx5_cmd_alloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+                                  u8 operation, phys_addr_t *op_addr)
+{
+       u32 out[MLX5_ST_SZ_DW(modify_memic_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
+       struct mlx5_core_dev *dev = dm->dev;
+       int err;
+
+       MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
+       MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_ALLOC);
+       MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
+       MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
+
+       err = mlx5_cmd_exec_inout(dev, modify_memic, in, out);
+       if (err)
+               return err;
+
+       *op_addr = dev->bar_addr +
+                  MLX5_GET64(modify_memic_out, out, memic_operation_addr);
+       return 0;
+}
+
+static int add_dm_mmap_entry(struct ib_ucontext *context,
+                            struct mlx5_user_mmap_entry *mentry, u8 mmap_flag,
+                            size_t size, u64 address)
+{
+       mentry->mmap_flag = mmap_flag;
+       mentry->address = address;
+
+       return rdma_user_mmap_entry_insert_range(
+               context, &mentry->rdma_entry, size,
+               MLX5_IB_MMAP_DEVICE_MEM << 16,
+               (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
+}
+
+static void mlx5_ib_dm_memic_free(struct kref *kref)
+{
+       struct mlx5_ib_dm_memic *dm =
+               container_of(kref, struct mlx5_ib_dm_memic, ref);
+       struct mlx5_ib_dev *dev = to_mdev(dm->base.ibdm.device);
+
+       mlx5_cmd_dealloc_memic(&dev->dm, dm->base.dev_addr, dm->base.size);
+       kfree(dm);
+}
+
+static int copy_op_to_user(struct mlx5_ib_dm_op_entry *op_entry,
+                          struct uverbs_attr_bundle *attrs)
+{
+       u64 start_offset;
+       u16 page_idx;
+       int err;
+
+       page_idx = op_entry->mentry.rdma_entry.start_pgoff & 0xFFFF;
+       start_offset = op_entry->op_addr & ~PAGE_MASK;
+       err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+                            &page_idx, sizeof(page_idx));
+       if (err)
+               return err;
+
+       return uverbs_copy_to(attrs,
+                             MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+                             &start_offset, sizeof(start_offset));
+}
+
+static int map_existing_op(struct mlx5_ib_dm_memic *dm, u8 op,
+                          struct uverbs_attr_bundle *attrs)
+{
+       struct mlx5_ib_dm_op_entry *op_entry;
+
+       op_entry = xa_load(&dm->ops, op);
+       if (!op_entry)
+               return -ENOENT;
+
+       return copy_op_to_user(op_entry, attrs);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
+       struct uverbs_attr_bundle *attrs)
+{
+       struct ib_uobject *uobj = uverbs_attr_get_uobject(
+               attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE);
+       struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+       struct ib_dm *ibdm = uobj->object;
+       struct mlx5_ib_dm_memic *dm = to_memic(ibdm);
+       struct mlx5_ib_dm_op_entry *op_entry;
+       int err;
+       u8 op;
+
+       err = uverbs_copy_from(&op, attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP);
+       if (err)
+               return err;
+
+       if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&dm->ops_xa_lock);
+       err = map_existing_op(dm, op, attrs);
+       if (!err || err != -ENOENT)
+               goto err_unlock;
+
+       op_entry = kzalloc(sizeof(*op_entry), GFP_KERNEL);
+       if (!op_entry)
+               goto err_unlock;
+
+       err = mlx5_cmd_alloc_memic_op(&dev->dm, dm->base.dev_addr, op,
+                                     &op_entry->op_addr);
+       if (err) {
+               kfree(op_entry);
+               goto err_unlock;
+       }
+       op_entry->op = op;
+       op_entry->dm = dm;
+
+       err = add_dm_mmap_entry(uobj->context, &op_entry->mentry,
+                               MLX5_IB_MMAP_TYPE_MEMIC_OP, dm->base.size,
+                               op_entry->op_addr & PAGE_MASK);
+       if (err) {
+               mlx5_cmd_dealloc_memic_op(&dev->dm, dm->base.dev_addr, op);
+               kfree(op_entry);
+               goto err_unlock;
+       }
+       /* From this point, entry will be freed by mmap_free */
+       kref_get(&dm->ref);
+
+       err = copy_op_to_user(op_entry, attrs);
+       if (err)
+               goto err_remove;
+
+       err = xa_insert(&dm->ops, op, op_entry, GFP_KERNEL);
+       if (err)
+               goto err_remove;
+       mutex_unlock(&dm->ops_xa_lock);
+
+       return 0;
+
+err_remove:
+       rdma_user_mmap_entry_remove(&op_entry->mentry.rdma_entry);
+err_unlock:
+       mutex_unlock(&dm->ops_xa_lock);
+
+       return err;
+}
+
+static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
+                                          struct ib_dm_alloc_attr *attr,
+                                          struct uverbs_attr_bundle *attrs)
+{
+       struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
+       struct mlx5_ib_dm_memic *dm;
+       u64 start_offset;
+       u16 page_idx;
+       int err;
+       u64 address;
+
+       if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic))
+               return ERR_PTR(-EOPNOTSUPP);
+
+       dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+       if (!dm)
+               return ERR_PTR(-ENOMEM);
+
+       dm->base.type = MLX5_IB_UAPI_DM_TYPE_MEMIC;
+       dm->base.size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
+       dm->base.ibdm.device = ctx->device;
+
+       kref_init(&dm->ref);
+       xa_init(&dm->ops);
+       mutex_init(&dm->ops_xa_lock);
+       dm->req_length = attr->length;
+
+       err = mlx5_cmd_alloc_memic(dm_db, &dm->base.dev_addr,
+                                  dm->base.size, attr->alignment);
+       if (err) {
+               kfree(dm);
+               return ERR_PTR(err);
+       }
+
+       address = dm->base.dev_addr & PAGE_MASK;
+       err = add_dm_mmap_entry(ctx, &dm->mentry, MLX5_IB_MMAP_TYPE_MEMIC,
+                               dm->base.size, address);
+       if (err) {
+               mlx5_cmd_dealloc_memic(dm_db, dm->base.dev_addr, dm->base.size);
+               kfree(dm);
+               return ERR_PTR(err);
+       }
+
+       page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
+       err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+                            &page_idx, sizeof(page_idx));
+       if (err)
+               goto err_copy;
+
+       start_offset = dm->base.dev_addr & ~PAGE_MASK;
+       err = uverbs_copy_to(attrs,
+                            MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+                            &start_offset, sizeof(start_offset));
+       if (err)
+               goto err_copy;
+
+       return &dm->base.ibdm;
+
+err_copy:
+       rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
+       return ERR_PTR(err);
+}
+
+static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
+                                           struct ib_dm_alloc_attr *attr,
+                                           struct uverbs_attr_bundle *attrs,
+                                           int type)
+{
+       struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
+       struct mlx5_ib_dm_icm *dm;
+       u64 act_size;
+       int err;
+
+       dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+       if (!dm)
+               return ERR_PTR(-ENOMEM);
+
+       dm->base.type = type;
+       dm->base.ibdm.device = ctx->device;
+
+       if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW)) {
+               err = -EPERM;
+               goto free;
+       }
+
+       if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) ||
+             MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) ||
+             MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
+             MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2))) {
+               err = -EOPNOTSUPP;
+               goto free;
+       }
+
+       /* Allocation size must a multiple of the basic block size
+        * and a power of 2.
+        */
+       act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
+       act_size = roundup_pow_of_two(act_size);
+
+       dm->base.size = act_size;
+       err = mlx5_dm_sw_icm_alloc(dev, type, act_size, attr->alignment,
+                                  to_mucontext(ctx)->devx_uid,
+                                  &dm->base.dev_addr, &dm->obj_id);
+       if (err)
+               goto free;
+
+       err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+                            &dm->base.dev_addr, sizeof(dm->base.dev_addr));
+       if (err) {
+               mlx5_dm_sw_icm_dealloc(dev, type, dm->base.size,
+                                      to_mucontext(ctx)->devx_uid,
+                                      dm->base.dev_addr, dm->obj_id);
+               goto free;
+       }
+       return &dm->base.ibdm;
+free:
+       kfree(dm);
+       return ERR_PTR(err);
+}
+
+struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+                              struct ib_ucontext *context,
+                              struct ib_dm_alloc_attr *attr,
+                              struct uverbs_attr_bundle *attrs)
+{
+       enum mlx5_ib_uapi_dm_type type;
+       int err;
+
+       err = uverbs_get_const_default(&type, attrs,
+                                      MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+                                      MLX5_IB_UAPI_DM_TYPE_MEMIC);
+       if (err)
+               return ERR_PTR(err);
+
+       mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
+                   type, attr->length, attr->alignment);
+
+       switch (type) {
+       case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+               return handle_alloc_dm_memic(context, attr, attrs);
+       case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+               return handle_alloc_dm_sw_icm(context, attr, attrs,
+                                            MLX5_SW_ICM_TYPE_STEERING);
+       case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+               return handle_alloc_dm_sw_icm(context, attr, attrs,
+                                             MLX5_SW_ICM_TYPE_HEADER_MODIFY);
+       default:
+               return ERR_PTR(-EOPNOTSUPP);
+       }
+}
+
+static void dm_memic_remove_ops(struct mlx5_ib_dm_memic *dm)
+{
+       struct mlx5_ib_dm_op_entry *entry;
+       unsigned long idx;
+
+       mutex_lock(&dm->ops_xa_lock);
+       xa_for_each(&dm->ops, idx, entry) {
+               xa_erase(&dm->ops, idx);
+               rdma_user_mmap_entry_remove(&entry->mentry.rdma_entry);
+       }
+       mutex_unlock(&dm->ops_xa_lock);
+}
+
+static void mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic *dm)
+{
+       dm_memic_remove_ops(dm);
+       rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
+}
+
+static int mlx5_dm_icm_dealloc(struct mlx5_ib_ucontext *ctx,
+                              struct mlx5_ib_dm_icm *dm)
+{
+       enum mlx5_sw_icm_type type =
+               dm->base.type == MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM ?
+                       MLX5_SW_ICM_TYPE_STEERING :
+                       MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+       struct mlx5_core_dev *dev = to_mdev(dm->base.ibdm.device)->mdev;
+       int err;
+
+       err = mlx5_dm_sw_icm_dealloc(dev, type, dm->base.size, ctx->devx_uid,
+                                    dm->base.dev_addr, dm->obj_id);
+       if (!err)
+               kfree(dm);
+       return 0;
+}
+
+static int mlx5_ib_dealloc_dm(struct ib_dm *ibdm,
+                             struct uverbs_attr_bundle *attrs)
+{
+       struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
+               &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+       struct mlx5_ib_dm *dm = to_mdm(ibdm);
+
+       switch (dm->type) {
+       case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+               mlx5_dm_memic_dealloc(to_memic(ibdm));
+               return 0;
+       case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+       case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+               return mlx5_dm_icm_dealloc(ctx, to_icm(ibdm));
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_QUERY)(
+       struct uverbs_attr_bundle *attrs)
+{
+       struct ib_dm *ibdm =
+               uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE);
+       struct mlx5_ib_dm *dm = to_mdm(ibdm);
+       struct mlx5_ib_dm_memic *memic;
+       u64 start_offset;
+       u16 page_idx;
+       int err;
+
+       if (dm->type != MLX5_IB_UAPI_DM_TYPE_MEMIC)
+               return -EOPNOTSUPP;
+
+       memic = to_memic(ibdm);
+       page_idx = memic->mentry.rdma_entry.start_pgoff & 0xFFFF;
+       err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+                            &page_idx, sizeof(page_idx));
+       if (err)
+               return err;
+
+       start_offset = memic->base.dev_addr & ~PAGE_MASK;
+       err =  uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+                             &start_offset, sizeof(start_offset));
+       if (err)
+               return err;
+
+       return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+                             &memic->req_length,
+                             sizeof(memic->req_length));
+}
+
+void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
+                         struct mlx5_user_mmap_entry *mentry)
+{
+       struct mlx5_ib_dm_op_entry *op_entry;
+       struct mlx5_ib_dm_memic *mdm;
+
+       switch (mentry->mmap_flag) {
+       case MLX5_IB_MMAP_TYPE_MEMIC:
+               mdm = container_of(mentry, struct mlx5_ib_dm_memic, mentry);
+               kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
+               break;
+       case MLX5_IB_MMAP_TYPE_MEMIC_OP:
+               op_entry = container_of(mentry, struct mlx5_ib_dm_op_entry,
+                                       mentry);
+               mdm = op_entry->dm;
+               mlx5_cmd_dealloc_memic_op(&dev->dm, mdm->base.dev_addr,
+                                         op_entry->op);
+               kfree(op_entry);
+               kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
+               break;
+       default:
+               WARN_ON(true);
+       }
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+       MLX5_IB_METHOD_DM_QUERY,
+       UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE, UVERBS_OBJECT_DM,
+                       UVERBS_ACCESS_READ, UA_MANDATORY),
+       UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+                           UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
+       UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+                           UVERBS_ATTR_TYPE(u16), UA_MANDATORY),
+       UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+                           UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
+
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+       mlx5_ib_dm, UVERBS_OBJECT_DM, UVERBS_METHOD_DM_ALLOC,
+       UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+                           UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
+       UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+                           UVERBS_ATTR_TYPE(u16), UA_OPTIONAL),
+       UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+                            enum mlx5_ib_uapi_dm_type, UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD(
+       MLX5_IB_METHOD_DM_MAP_OP_ADDR,
+       UVERBS_ATTR_IDR(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE,
+                       UVERBS_OBJECT_DM,
+                       UVERBS_ACCESS_READ,
+                       UA_MANDATORY),
+       UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP,
+                          UVERBS_ATTR_TYPE(u8),
+                          UA_MANDATORY),
+       UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+                           UVERBS_ATTR_TYPE(u64),
+                           UA_MANDATORY),
+       UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+                           UVERBS_ATTR_TYPE(u16),
+                           UA_OPTIONAL));
+
+DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DM,
+                             &UVERBS_METHOD(MLX5_IB_METHOD_DM_MAP_OP_ADDR),
+                             &UVERBS_METHOD(MLX5_IB_METHOD_DM_QUERY));
+
+const struct uapi_definition mlx5_ib_dm_defs[] = {
+       UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
+       UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM),
+       {},
+};
+
+const struct ib_device_ops mlx5_ib_dev_dm_ops = {
+       .alloc_dm = mlx5_ib_alloc_dm,
+       .dealloc_dm = mlx5_ib_dealloc_dm,
+       .reg_dm_mr = mlx5_ib_reg_dm_mr,
+};
diff --git a/drivers/infiniband/hw/mlx5/dm.h b/drivers/infiniband/hw/mlx5/dm.h
new file mode 100644 (file)
index 0000000..9674a80
--- /dev/null
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_DM_H
+#define _MLX5_IB_DM_H
+
+#include "mlx5_ib.h"
+
+extern const struct ib_device_ops mlx5_ib_dev_dm_ops;
+extern const struct uapi_definition mlx5_ib_dm_defs[];
+
+struct mlx5_ib_dm {
+       struct ib_dm            ibdm;
+       u32                     type;
+       phys_addr_t             dev_addr;
+       size_t                  size;
+};
+
+struct mlx5_ib_dm_op_entry {
+       struct mlx5_user_mmap_entry     mentry;
+       phys_addr_t                     op_addr;
+       struct mlx5_ib_dm_memic         *dm;
+       u8                              op;
+};
+
+struct mlx5_ib_dm_memic {
+       struct mlx5_ib_dm           base;
+       struct mlx5_user_mmap_entry mentry;
+       struct xarray               ops;
+       struct mutex                ops_xa_lock;
+       struct kref                 ref;
+       size_t                      req_length;
+};
+
+struct mlx5_ib_dm_icm {
+       struct mlx5_ib_dm      base;
+       u32                    obj_id;
+};
+
+static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
+{
+       return container_of(ibdm, struct mlx5_ib_dm, ibdm);
+}
+
+static inline struct mlx5_ib_dm_memic *to_memic(struct ib_dm *ibdm)
+{
+       return container_of(ibdm, struct mlx5_ib_dm_memic, base.ibdm);
+}
+
+static inline struct mlx5_ib_dm_icm *to_icm(struct ib_dm *ibdm)
+{
+       return container_of(ibdm, struct mlx5_ib_dm_icm, base.ibdm);
+}
+
+struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+                              struct ib_ucontext *context,
+                              struct ib_dm_alloc_attr *attr,
+                              struct uverbs_attr_bundle *attrs);
+void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
+                         struct mlx5_user_mmap_entry *mentry);
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
+                           u64 length);
+void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+                              u8 operation);
+
+#endif /* _MLX5_IB_DM_H */
index ee623a0..7ea6137 100644 (file)
@@ -34,6 +34,7 @@
 #include "ib_rep.h"
 #include "cmd.h"
 #include "devx.h"
+#include "dm.h"
 #include "fs.h"
 #include "srq.h"
 #include "qp.h"
@@ -2088,14 +2089,11 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
        struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
        struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
        struct mlx5_var_table *var_table = &dev->var_table;
-       struct mlx5_ib_dm *mdm;
 
        switch (mentry->mmap_flag) {
        case MLX5_IB_MMAP_TYPE_MEMIC:
-               mdm = container_of(mentry, struct mlx5_ib_dm, mentry);
-               mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr,
-                                      mdm->size);
-               kfree(mdm);
+       case MLX5_IB_MMAP_TYPE_MEMIC_OP:
+               mlx5_ib_dm_mmap_free(dev, mentry);
                break;
        case MLX5_IB_MMAP_TYPE_VAR:
                mutex_lock(&var_table->bitmap_lock);
@@ -2220,19 +2218,6 @@ free_bfreg:
        return err;
 }
 
-static int add_dm_mmap_entry(struct ib_ucontext *context,
-                            struct mlx5_ib_dm *mdm,
-                            u64 address)
-{
-       mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC;
-       mdm->mentry.address = address;
-       return rdma_user_mmap_entry_insert_range(
-                       context, &mdm->mentry.rdma_entry,
-                       mdm->size,
-                       MLX5_IB_MMAP_DEVICE_MEM << 16,
-                       (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
-}
-
 static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
 {
        unsigned long idx;
@@ -2334,206 +2319,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
        return 0;
 }
 
-static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
-                                       u32 type)
-{
-       switch (type) {
-       case MLX5_IB_UAPI_DM_TYPE_MEMIC:
-               if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
-                       return -EOPNOTSUPP;
-               break;
-       case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
-       case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
-               if (!capable(CAP_SYS_RAWIO) ||
-                   !capable(CAP_NET_RAW))
-                       return -EPERM;
-
-               if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
-                     MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner) ||
-                     MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2) ||
-                     MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner_v2)))
-                       return -EOPNOTSUPP;
-               break;
-       }
-
-       return 0;
-}
-
-static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
-                                struct mlx5_ib_dm *dm,
-                                struct ib_dm_alloc_attr *attr,
-                                struct uverbs_attr_bundle *attrs)
-{
-       struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
-       u64 start_offset;
-       u16 page_idx;
-       int err;
-       u64 address;
-
-       dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
-
-       err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
-                                  dm->size, attr->alignment);
-       if (err)
-               return err;
-
-       address = dm->dev_addr & PAGE_MASK;
-       err = add_dm_mmap_entry(ctx, dm, address);
-       if (err)
-               goto err_dealloc;
-
-       page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
-       err = uverbs_copy_to(attrs,
-                            MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
-                            &page_idx,
-                            sizeof(page_idx));
-       if (err)
-               goto err_copy;
-
-       start_offset = dm->dev_addr & ~PAGE_MASK;
-       err = uverbs_copy_to(attrs,
-                            MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
-                            &start_offset, sizeof(start_offset));
-       if (err)
-               goto err_copy;
-
-       return 0;
-
-err_copy:
-       rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
-err_dealloc:
-       mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
-
-       return err;
-}
-
-static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
-                                 struct mlx5_ib_dm *dm,
-                                 struct ib_dm_alloc_attr *attr,
-                                 struct uverbs_attr_bundle *attrs,
-                                 int type)
-{
-       struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
-       u64 act_size;
-       int err;
-
-       /* Allocation size must a multiple of the basic block size
-        * and a power of 2.
-        */
-       act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
-       act_size = roundup_pow_of_two(act_size);
-
-       dm->size = act_size;
-       err = mlx5_dm_sw_icm_alloc(dev, type, act_size, attr->alignment,
-                                  to_mucontext(ctx)->devx_uid, &dm->dev_addr,
-                                  &dm->icm_dm.obj_id);
-       if (err)
-               return err;
-
-       err = uverbs_copy_to(attrs,
-                            MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
-                            &dm->dev_addr, sizeof(dm->dev_addr));
-       if (err)
-               mlx5_dm_sw_icm_dealloc(dev, type, dm->size,
-                                      to_mucontext(ctx)->devx_uid, dm->dev_addr,
-                                      dm->icm_dm.obj_id);
-
-       return err;
-}
-
-struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
-                              struct ib_ucontext *context,
-                              struct ib_dm_alloc_attr *attr,
-                              struct uverbs_attr_bundle *attrs)
-{
-       struct mlx5_ib_dm *dm;
-       enum mlx5_ib_uapi_dm_type type;
-       int err;
-
-       err = uverbs_get_const_default(&type, attrs,
-                                      MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
-                                      MLX5_IB_UAPI_DM_TYPE_MEMIC);
-       if (err)
-               return ERR_PTR(err);
-
-       mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
-                   type, attr->length, attr->alignment);
-
-       err = check_dm_type_support(to_mdev(ibdev), type);
-       if (err)
-               return ERR_PTR(err);
-
-       dm = kzalloc(sizeof(*dm), GFP_KERNEL);
-       if (!dm)
-               return ERR_PTR(-ENOMEM);
-
-       dm->type = type;
-
-       switch (type) {
-       case MLX5_IB_UAPI_DM_TYPE_MEMIC:
-               err = handle_alloc_dm_memic(context, dm,
-                                           attr,
-                                           attrs);
-               break;
-       case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
-               err = handle_alloc_dm_sw_icm(context, dm,
-                                            attr, attrs,
-                                            MLX5_SW_ICM_TYPE_STEERING);
-               break;
-       case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
-               err = handle_alloc_dm_sw_icm(context, dm,
-                                            attr, attrs,
-                                            MLX5_SW_ICM_TYPE_HEADER_MODIFY);
-               break;
-       default:
-               err = -EOPNOTSUPP;
-       }
-
-       if (err)
-               goto err_free;
-
-       return &dm->ibdm;
-
-err_free:
-       kfree(dm);
-       return ERR_PTR(err);
-}
-
-int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
-{
-       struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
-               &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
-       struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev;
-       struct mlx5_ib_dm *dm = to_mdm(ibdm);
-       int ret;
-
-       switch (dm->type) {
-       case MLX5_IB_UAPI_DM_TYPE_MEMIC:
-               rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
-               return 0;
-       case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
-               ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING,
-                                            dm->size, ctx->devx_uid, dm->dev_addr,
-                                            dm->icm_dm.obj_id);
-               if (ret)
-                       return ret;
-               break;
-       case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
-               ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_HEADER_MODIFY,
-                                            dm->size, ctx->devx_uid, dm->dev_addr,
-                                            dm->icm_dm.obj_id);
-               if (ret)
-                       return ret;
-               break;
-       default:
-               return -EOPNOTSUPP;
-       }
-
-       kfree(dm);
-
-       return 0;
-}
-
 static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 {
        struct mlx5_ib_pd *pd = to_mpd(ibpd);
@@ -3817,20 +3602,6 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
                            &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
                            &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
 
-ADD_UVERBS_ATTRIBUTES_SIMPLE(
-       mlx5_ib_dm,
-       UVERBS_OBJECT_DM,
-       UVERBS_METHOD_DM_ALLOC,
-       UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
-                           UVERBS_ATTR_TYPE(u64),
-                           UA_MANDATORY),
-       UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
-                           UVERBS_ATTR_TYPE(u16),
-                           UA_OPTIONAL),
-       UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
-                            enum mlx5_ib_uapi_dm_type,
-                            UA_OPTIONAL));
-
 ADD_UVERBS_ATTRIBUTES_SIMPLE(
        mlx5_ib_flow_action,
        UVERBS_OBJECT_FLOW_ACTION,
@@ -3853,10 +3624,10 @@ static const struct uapi_definition mlx5_ib_defs[] = {
        UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
        UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
        UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
+       UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
 
        UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
                                &mlx5_ib_flow_action),
-       UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
        UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
        UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
                                UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
@@ -4032,12 +3803,6 @@ static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
        INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
 };
 
-static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
-       .alloc_dm = mlx5_ib_alloc_dm,
-       .dealloc_dm = mlx5_ib_dealloc_dm,
-       .reg_dm_mr = mlx5_ib_reg_dm_mr,
-};
-
 static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
 {
        struct mlx5_core_dev *mdev = dev->mdev;
index 82849d6..e9a3f34 100644 (file)
@@ -166,6 +166,7 @@ enum mlx5_ib_mmap_type {
        MLX5_IB_MMAP_TYPE_VAR = 2,
        MLX5_IB_MMAP_TYPE_UAR_WC = 3,
        MLX5_IB_MMAP_TYPE_UAR_NC = 4,
+       MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
 };
 
 struct mlx5_bfreg_info {
@@ -618,20 +619,6 @@ struct mlx5_user_mmap_entry {
        u32 page_idx;
 };
 
-struct mlx5_ib_dm {
-       struct ib_dm            ibdm;
-       phys_addr_t             dev_addr;
-       u32                     type;
-       size_t                  size;
-       union {
-               struct {
-                       u32     obj_id;
-               } icm_dm;
-               /* other dm types specific params should be added here */
-       };
-       struct mlx5_user_mmap_entry mentry;
-};
-
 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
 
 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE   |\
@@ -1188,11 +1175,6 @@ static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
        return container_of(msrq, struct mlx5_ib_srq, msrq);
 }
 
-static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
-{
-       return container_of(ibdm, struct mlx5_ib_dm, ibdm);
-}
-
 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
 {
        return container_of(ibmr, struct mlx5_ib_mr, ibmr);
@@ -1347,11 +1329,6 @@ int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
                                 struct ib_rwq_ind_table_init_attr *init_attr,
                                 struct ib_udata *udata);
 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
-struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
-                              struct ib_ucontext *context,
-                              struct ib_dm_alloc_attr *attr,
-                              struct uverbs_attr_bundle *attrs);
-int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs);
 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
                                struct ib_dm_mr_attr *attr,
                                struct uverbs_attr_bundle *attrs);
index ab5bffb..4388afe 100644 (file)
@@ -42,6 +42,7 @@
 #include <rdma/ib_umem.h>
 #include <rdma/ib_umem_odp.h>
 #include <rdma/ib_verbs.h>
+#include "dm.h"
 #include "mlx5_ib.h"
 
 /*
index 432290b..47241eb 100644 (file)
@@ -133,6 +133,7 @@ enum {
        MLX5_CMD_OP_PAGE_FAULT_RESUME             = 0x204,
        MLX5_CMD_OP_ALLOC_MEMIC                   = 0x205,
        MLX5_CMD_OP_DEALLOC_MEMIC                 = 0x206,
+       MLX5_CMD_OP_MODIFY_MEMIC                  = 0x207,
        MLX5_CMD_OP_CREATE_EQ                     = 0x301,
        MLX5_CMD_OP_DESTROY_EQ                    = 0x302,
        MLX5_CMD_OP_QUERY_EQ                      = 0x303,
@@ -1017,7 +1018,11 @@ struct mlx5_ifc_device_mem_cap_bits {
 
        u8         header_modify_sw_icm_start_address[0x40];
 
-       u8         reserved_at_180[0x680];
+       u8         reserved_at_180[0x80];
+
+       u8         memic_operations[0x20];
+
+       u8         reserved_at_220[0x5e0];
 };
 
 struct mlx5_ifc_device_event_cap_bits {
@@ -10417,6 +10422,41 @@ struct mlx5_ifc_destroy_vport_lag_in_bits {
        u8         reserved_at_40[0x40];
 };
 
+enum {
+       MLX5_MODIFY_MEMIC_OP_MOD_ALLOC,
+       MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC,
+};
+
+struct mlx5_ifc_modify_memic_in_bits {
+       u8         opcode[0x10];
+       u8         uid[0x10];
+
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_at_40[0x20];
+
+       u8         reserved_at_60[0x18];
+       u8         memic_operation_type[0x8];
+
+       u8         memic_start_addr[0x40];
+
+       u8         reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_modify_memic_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_at_40[0x40];
+
+       u8         memic_operation_addr[0x40];
+
+       u8         reserved_at_c0[0x140];
+};
+
 struct mlx5_ifc_alloc_memic_in_bits {
        u8         opcode[0x10];
        u8         reserved_at_10[0x10];
index f04f512..ee7873f 100644 (file)
@@ -20,7 +20,7 @@
 
 /* These are static so they do not need to be qualified */
 #define UVERBS_METHOD_ATTRS(method_id) _method_attrs_##method_id
-#define UVERBS_OBJECT_METHODS(object_id) _object_methods_##object_id
+#define UVERBS_OBJECT_METHODS(object_id) _UVERBS_NAME(_object_methods_##object_id, __LINE__)
 
 #define DECLARE_UVERBS_NAMED_METHOD(_method_id, ...)                           \
        static const struct uverbs_attr_def *const UVERBS_METHOD_ATTRS(        \
index 3f0bc75..3798cbc 100644 (file)
@@ -41,6 +41,25 @@ enum mlx5_ib_create_flow_action_attrs {
        MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS = (1U << UVERBS_ID_NS_SHIFT),
 };
 
+enum mlx5_ib_dm_methods {
+       MLX5_IB_METHOD_DM_MAP_OP_ADDR  = (1U << UVERBS_ID_NS_SHIFT),
+       MLX5_IB_METHOD_DM_QUERY,
+};
+
+enum mlx5_ib_dm_map_op_addr_attrs {
+       MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+       MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP,
+       MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+       MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+};
+
+enum mlx5_ib_query_dm_attrs {
+       MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+       MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+       MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+       MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+};
+
 enum mlx5_ib_alloc_dm_attrs {
        MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT),
        MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,