net/mlx5: Lag, expose number of lag ports
authorMark Bloch <mbloch@nvidia.com>
Tue, 1 Mar 2022 15:42:01 +0000 (15:42 +0000)
committerSaeed Mahameed <saeedm@nvidia.com>
Tue, 10 May 2022 05:54:00 +0000 (22:54 -0700)
Downstream patches will add support for hardware lag with
more than 2 ports. Add a way for users to query the number of lag ports.

Signed-off-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/infiniband/hw/mlx5/gsi.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
include/linux/mlx5/driver.h

index 3ad8f63..b804f2d 100644 (file)
@@ -100,7 +100,7 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
                                 port_type) == MLX5_CAP_PORT_TYPE_IB)
                        num_qps = pd->device->attrs.max_pkeys;
                else if (dev->lag_active)
-                       num_qps = MLX5_MAX_PORTS;
+                       num_qps = dev->lag_ports;
        }
 
        gsi = &mqp->gsi;
index 61aa196..61a3b76 100644 (file)
@@ -2991,6 +2991,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
        }
 
        dev->flow_db->lag_demux_ft = ft;
+       dev->lag_ports = mlx5_lag_get_num_ports(mdev);
        dev->lag_active = true;
        return 0;
 
index 4f04bb5..8b3c83c 100644 (file)
@@ -1131,6 +1131,7 @@ struct mlx5_ib_dev {
        struct xarray sig_mrs;
        struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
        u16 pkey_table_len;
+       u8 lag_ports;
 };
 
 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
index 3f46755..fb8669c 100644 (file)
@@ -3907,7 +3907,7 @@ static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
                tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
 
        return (unsigned int)atomic_add_return(1, tx_port_affinity) %
-               MLX5_MAX_PORTS + 1;
+               (dev->lag_active ? dev->lag_ports : MLX5_CAP_GEN(dev->mdev, num_lag_ports)) + 1;
 }
 
 static bool qp_supports_affinity(struct mlx5_ib_qp *qp)
index 6cad3b7..fe34cce 100644 (file)
@@ -1185,6 +1185,12 @@ unlock:
 }
 EXPORT_SYMBOL(mlx5_lag_get_slave_port);
 
+u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev)
+{
+       return MLX5_MAX_PORTS;
+}
+EXPORT_SYMBOL(mlx5_lag_get_num_ports);
+
 struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_dev *peer_dev = NULL;
index f327d05..62ea112 100644 (file)
@@ -1142,6 +1142,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
                                 int num_counters,
                                 size_t *offsets);
 struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev);
+u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
 int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,