RDMA/mlx5: Implement mlx5_ib_map_mr_sg_pi and mlx5_ib_alloc_mr_integrity
[linux-2.6-microblaze.git] / drivers / infiniband / hw / mlx5 / main.c
index abac70a..23fedff 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/mlx5/port.h>
 #include <linux/mlx5/vport.h>
 #include <linux/mlx5/fs.h>
+#include <linux/mlx5/eswitch.h>
 #include <linux/list.h>
 #include <rdma/ib_smi.h>
 #include <rdma/ib_umem.h>
@@ -3252,11 +3253,14 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
        int max_table_size;
        int num_entries;
        int num_groups;
+       bool esw_encap;
        u32 flags = 0;
        int priority;
 
        max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
                                                       log_max_ft_size));
+       esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
+               DEVLINK_ESWITCH_ENCAP_MODE_NONE;
        if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
                enum mlx5_flow_namespace_type fn_type;
 
@@ -3269,10 +3273,10 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
                if (ft_type == MLX5_IB_FT_RX) {
                        fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
                        prio = &dev->flow_db->prios[priority];
-                       if (!dev->is_rep &&
+                       if (!dev->is_rep && !esw_encap &&
                            MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
                                flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
-                       if (!dev->is_rep &&
+                       if (!dev->is_rep && !esw_encap &&
                            MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
                                        reformat_l3_tunnel_to_l2))
                                flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
@@ -3282,7 +3286,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
                                                              log_max_ft_size));
                        fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
                        prio = &dev->flow_db->egress_prios[priority];
-                       if (!dev->is_rep &&
+                       if (!dev->is_rep && !esw_encap &&
                            MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
                                flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
                }
@@ -3892,6 +3896,7 @@ _get_flow_table(struct mlx5_ib_dev *dev,
        struct mlx5_flow_namespace *ns = NULL;
        struct mlx5_ib_flow_prio *prio = NULL;
        int max_table_size = 0;
+       bool esw_encap;
        u32 flags = 0;
        int priority;
 
@@ -3900,22 +3905,30 @@ _get_flow_table(struct mlx5_ib_dev *dev,
        else
                priority = ib_prio_to_core_prio(fs_matcher->priority, false);
 
+       esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
+               DEVLINK_ESWITCH_ENCAP_MODE_NONE;
        if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
                max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
                                        log_max_ft_size));
-               if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
+               if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap)
                        flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
                if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
-                                             reformat_l3_tunnel_to_l2))
+                                             reformat_l3_tunnel_to_l2) &&
+                   !esw_encap)
                        flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
        } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
                max_table_size = BIT(
                        MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
-               if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
+               if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap)
                        flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
        } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
                max_table_size = BIT(
                        MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
+               if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
+                       flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+               if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) &&
+                   esw_encap)
+                       flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
                priority = FDB_BYPASS_PATH;
        }
 
@@ -4891,18 +4904,19 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
        if (ret)
                goto error0;
 
-       devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL);
-       if (IS_ERR(devr->c0)) {
-               ret = PTR_ERR(devr->c0);
+       devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq);
+       if (!devr->c0) {
+               ret = -ENOMEM;
                goto error1;
        }
-       devr->c0->device        = &dev->ib_dev;
-       devr->c0->uobject       = NULL;
-       devr->c0->comp_handler  = NULL;
-       devr->c0->event_handler = NULL;
-       devr->c0->cq_context    = NULL;
+
+       devr->c0->device = &dev->ib_dev;
        atomic_set(&devr->c0->usecnt, 0);
 
+       ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL);
+       if (ret)
+               goto err_create_cq;
+
        devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
        if (IS_ERR(devr->x0)) {
                ret = PTR_ERR(devr->x0);
@@ -4994,6 +5008,8 @@ error3:
        mlx5_ib_dealloc_xrcd(devr->x0, NULL);
 error2:
        mlx5_ib_destroy_cq(devr->c0, NULL);
+err_create_cq:
+       kfree(devr->c0);
 error1:
        mlx5_ib_dealloc_pd(devr->p0, NULL);
 error0:
@@ -5012,6 +5028,7 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
        mlx5_ib_dealloc_xrcd(devr->x0, NULL);
        mlx5_ib_dealloc_xrcd(devr->x1, NULL);
        mlx5_ib_destroy_cq(devr->c0, NULL);
+       kfree(devr->c0);
        mlx5_ib_dealloc_pd(devr->p0, NULL);
        kfree(devr->p0);
 
@@ -6044,7 +6061,6 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
        if (mlx5_use_mad_ifc(dev))
                get_ext_port_caps(dev);
 
-       dev->ib_dev.owner               = THIS_MODULE;
        dev->ib_dev.node_type           = RDMA_NODE_IB_CA;
        dev->ib_dev.local_dma_lkey      = 0 /* not supported for now */;
        dev->ib_dev.phys_port_cnt       = dev->num_ports;
@@ -6124,8 +6140,13 @@ static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
 }
 
 static const struct ib_device_ops mlx5_ib_dev_ops = {
+       .owner = THIS_MODULE,
+       .driver_id = RDMA_DRIVER_MLX5,
+       .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION,
+
        .add_gid = mlx5_ib_add_gid,
        .alloc_mr = mlx5_ib_alloc_mr,
+       .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
        .alloc_pd = mlx5_ib_alloc_pd,
        .alloc_ucontext = mlx5_ib_alloc_ucontext,
        .attach_mcast = mlx5_ib_mcg_attach,
@@ -6155,6 +6176,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
        .get_dma_mr = mlx5_ib_get_dma_mr,
        .get_link_layer = mlx5_ib_port_link_layer,
        .map_mr_sg = mlx5_ib_map_mr_sg,
+       .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
        .mmap = mlx5_ib_mmap,
        .modify_cq = mlx5_ib_modify_cq,
        .modify_device = mlx5_ib_modify_device,
@@ -6179,6 +6201,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
        .resize_cq = mlx5_ib_resize_cq,
 
        INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
+       INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
        INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
        INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
        INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
@@ -6221,7 +6244,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
        struct mlx5_core_dev *mdev = dev->mdev;
        int err;
 
-       dev->ib_dev.uverbs_abi_ver      = MLX5_IB_UVERBS_ABI_VERSION;
        dev->ib_dev.uverbs_cmd_mask     =
                (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
                (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
@@ -6290,7 +6312,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
        if (mlx5_accel_ipsec_device_caps(dev->mdev) &
            MLX5_ACCEL_IPSEC_CAP_DEVICE)
                ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops);
-       dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
        ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
 
        if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))