IB/mlx5: Fix MR registration flow to use UMR properly
authorGuy Levi <guyle@mellanox.com>
Wed, 31 Jul 2019 08:19:29 +0000 (11:19 +0300)
committerDoug Ledford <dledford@redhat.com>
Thu, 1 Aug 2019 15:49:51 +0000 (11:49 -0400)
Driver shouldn't allow to use UMR to register a MR when
umr_modify_atomic_disabled is set. Otherwise it will always end up with a
failure in the post send flow which sets the UMR WQE to modify atomic access
right.

Fixes: c8d75a980fab ("IB/mlx5: Respect new UMR capabilities")
Signed-off-by: Guy Levi <guyle@mellanox.com>
Reviewed-by: Moni Shoua <monis@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Link: https://lore.kernel.org/r/20190731081929.32559-1-leon@kernel.org
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/mlx5/mr.c

index 2c77456..b74fad0 100644 (file)
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
-       return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
 
 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
 {
        return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
 }
 
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
-       return order <= mr_cache_max_order(dev) &&
-               umr_can_modify_entity_size(dev);
-}
-
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
        int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -1271,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
        struct mlx5_ib_mr *mr = NULL;
-       bool populate_mtts = false;
+       bool use_umr;
        struct ib_umem *umem;
        int page_shift;
        int npages;
@@ -1303,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        if (err < 0)
                return ERR_PTR(err);
 
-       if (use_umr(dev, order)) {
+       use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+                 (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+                  !MLX5_CAP_GEN(dev->mdev, atomic));
+
+       if (order <= mr_cache_max_order(dev) && use_umr) {
                mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
                                         page_shift, order, access_flags);
                if (PTR_ERR(mr) == -EAGAIN) {
                        mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
                        mr = NULL;
                }
-               populate_mtts = false;
        } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
                if (access_flags & IB_ACCESS_ON_DEMAND) {
                        err = -EINVAL;
                        pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
                        goto error;
                }
-               populate_mtts = true;
+               use_umr = false;
        }
 
        if (!mr) {
-               if (!umr_can_modify_entity_size(dev))
-                       populate_mtts = true;
                mutex_lock(&dev->slow_path_mutex);
                mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
-                               page_shift, access_flags, populate_mtts);
+                               page_shift, access_flags, !use_umr);
                mutex_unlock(&dev->slow_path_mutex);
        }
 
@@ -1341,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 
        update_odp_mr(mr);
 
-       if (!populate_mtts) {
+       if (use_umr) {
                int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
 
                if (access_flags & IB_ACCESS_ON_DEMAND)