net/mlx5e: Refactor build channel params
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index c6b8304..11997c2 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/bpf.h>
 #include <linux/if_bridge.h>
 #include <net/page_pool.h>
-#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
 #include "eswitch.h"
 #include "en.h"
 #include "en/txrx.h"
@@ -66,7 +66,6 @@
 #include "en/devlink.h"
 #include "lib/mlx5.h"
 
-
 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
 {
        bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
@@ -233,7 +232,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
        cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
                                      ds_cnt);
        cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
-       cseg->imm       = rq->mkey_be;
+       cseg->umr_mkey  = rq->mkey_be;
 
        ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
        ucseg->xlt_octowords =
@@ -374,7 +373,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        struct mlx5_core_dev *mdev = c->mdev;
        void *rqc = rqp->rqc;
        void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
-       u32 num_xsk_frames = 0;
        u32 rq_xdp_ix;
        u32 pool_size;
        int wq_sz;
@@ -414,7 +412,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 
        rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
        rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
-       rq->buff.umem_headroom = xsk ? xsk->headroom : 0;
        pool_size = 1 << params->log_rq_mtu_frames;
 
        switch (rq->wq_type) {
@@ -428,10 +425,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 
                wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
 
-               if (xsk)
-                       num_xsk_frames = wq_sz <<
-                               mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
-
                pool_size = MLX5_MPWRQ_PAGES_PER_WQE <<
                        mlx5e_mpwqe_get_log_rq_size(params, xsk);
 
@@ -462,6 +455,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                rq->mpwqe.num_strides =
                        BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
 
+               rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
+
                err = mlx5e_create_rq_umr_mkey(mdev, rq);
                if (err)
                        goto err_rq_wq_destroy;
@@ -481,10 +476,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 
                wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
 
-               if (xsk)
-                       num_xsk_frames = wq_sz << rq->wqe.info.log_num_frags;
-
                rq->wqe.info = rqp->frags_info;
+               rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
+
                rq->wqe.frags =
                        kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
                                        (wq_sz << rq->wqe.info.log_num_frags)),
@@ -522,17 +516,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        }
 
        if (xsk) {
-               err = mlx5e_xsk_resize_reuseq(umem, num_xsk_frames);
-               if (unlikely(err)) {
-                       mlx5_core_err(mdev, "Unable to allocate the Reuse Ring for %u frames\n",
-                                     num_xsk_frames);
-                       goto err_free;
-               }
-
-               rq->zca.free = mlx5e_xsk_zca_free;
                err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
-                                                MEM_TYPE_ZERO_COPY,
-                                                &rq->zca);
+                                                MEM_TYPE_XSK_BUFF_POOL, NULL);
+               xsk_buff_set_rxq_info(rq->umem, &rq->xdp_rxq);
        } else {
                /* Create a page_pool and register it with rxq */
                pp_params.order     = 0;
@@ -721,7 +707,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
        MLX5_SET(modify_rq_in, in, rq_state, curr_state);
        MLX5_SET(rqc, rqc, state, next_state);
 
-       err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+       err = mlx5_core_modify_rq(mdev, rq->rqn, in);
 
        kvfree(in);
 
@@ -752,7 +738,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
        MLX5_SET(rqc, rqc, scatter_fcs, enable);
        MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
 
-       err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+       err = mlx5_core_modify_rq(mdev, rq->rqn, in);
 
        kvfree(in);
 
@@ -781,7 +767,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
        MLX5_SET(rqc, rqc, vsd, vsd);
        MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
 
-       err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+       err = mlx5_core_modify_rq(mdev, rq->rqn, in);
 
        kvfree(in);
 
@@ -1027,17 +1013,17 @@ static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
 
 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
 {
-       kvfree(sq->db.ico_wqe);
+       kvfree(sq->db.wqe_info);
 }
 
 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
 {
        int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+       size_t size;
 
-       sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
-                                                 sizeof(*sq->db.ico_wqe)),
-                                      GFP_KERNEL, numa);
-       if (!sq->db.ico_wqe)
+       size = array_size(wq_sz, sizeof(*sq->db.wqe_info));
+       sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
+       if (!sq->db.wqe_info)
                return -ENOMEM;
 
        return 0;
@@ -1116,6 +1102,22 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
        return 0;
 }
 
+static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size)
+{
+       int sq_size = 1 << log_sq_size;
+
+       sq->stop_room  = mlx5e_tls_get_stop_room(sq);
+       sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+
+       if (WARN_ON(sq->stop_room >= sq_size)) {
+               netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n",
+                          sq->stop_room, sq_size);
+               return -ENOSPC;
+       }
+
+       return 0;
+}
+
 static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
                             int txq_ix,
@@ -1140,20 +1142,16 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        sq->min_inline_mode = params->tx_min_inline_mode;
        sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        sq->stats     = &c->priv->channel_stats[c->ix].sq[tc];
-       sq->stop_room = MLX5E_SQ_STOP_ROOM;
        INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
        if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
                set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
        if (MLX5_IPSEC_DEV(c->priv->mdev))
                set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
-#ifdef CONFIG_MLX5_EN_TLS
-       if (mlx5_accel_is_tls_device(c->priv->mdev)) {
+       if (mlx5_accel_is_tls_device(c->priv->mdev))
                set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
-               sq->stop_room += MLX5E_SQ_TLS_ROOM +
-                       mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS,
-                                                   TLS_MAX_PAYLOAD_SIZE);
-       }
-#endif
+       err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size);
+       if (err)
+               return err;
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1259,7 +1257,7 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
                MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, p->rl_index);
        }
 
-       err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
+       err = mlx5_core_modify_sq(mdev, sqn, in);
 
        kvfree(in);
 
@@ -1364,13 +1362,12 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
        /* last doorbell out, godspeed .. */
        if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
                u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-               struct mlx5e_tx_wqe_info *wi;
                struct mlx5e_tx_wqe *nop;
 
-               wi = &sq->db.wqe_info[pi];
+               sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
+                       .num_wqebbs = 1,
+               };
 
-               memset(wi, 0, sizeof(*wi));
-               wi->num_wqebbs = 1;
                nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
                mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
        }
@@ -1482,20 +1479,21 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
 
                /* Pre initialize fixed WQE fields */
                for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
-                       struct mlx5e_xdp_wqe_info *wi  = &sq->db.wqe_info[i];
                        struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(&sq->wq, i);
                        struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
                        struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
                        struct mlx5_wqe_data_seg *dseg;
 
+                       sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
+                               .num_wqebbs = 1,
+                               .num_pkts   = 1,
+                       };
+
                        cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
                        eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
 
                        dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
                        dseg->lkey = sq->mkey_be;
-
-                       wi->num_wqebbs = 1;
-                       wi->num_pkts   = 1;
                }
        }
 
@@ -1677,7 +1675,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
 
        for (tc = 0; tc < c->num_tc; tc++) {
                err = mlx5e_open_cq(c, params->tx_cq_moderation,
-                                   &cparam->tx_cq, &c->sq[tc].cq);
+                                   &cparam->txq_sq.cqp, &c->sq[tc].cq);
                if (err)
                        goto err_close_tx_cqs;
        }
@@ -1709,7 +1707,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
                int txq_ix = c->ix + tc * params->num_channels;
 
                err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
-                                      params, &cparam->sq, &c->sq[tc], tc);
+                                      params, &cparam->txq_sq, &c->sq[tc], tc);
                if (err)
                        goto err_close_sqs;
        }
@@ -1819,34 +1817,43 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
        struct dim_cq_moder icocq_moder = {0, 0};
        int err;
 
-       err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
+       err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq.cqp, &c->async_icosq.cq);
        if (err)
                return err;
 
+       err = mlx5e_open_cq(c, icocq_moder, &cparam->async_icosq.cqp, &c->icosq.cq);
+       if (err)
+               goto err_close_async_icosq_cq;
+
        err = mlx5e_open_tx_cqs(c, params, cparam);
        if (err)
                goto err_close_icosq_cq;
 
-       err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq);
+       err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xdpsq.cq);
        if (err)
                goto err_close_tx_cqs;
 
-       err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
+       err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->rq.cq);
        if (err)
                goto err_close_xdp_tx_cqs;
 
-       /* XDP SQ CQ params are same as normal TXQ sq CQ params */
        err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
-                                    &cparam->tx_cq, &c->rq_xdpsq.cq) : 0;
+                                    &cparam->xdp_sq.cqp, &c->rq_xdpsq.cq) : 0;
        if (err)
                goto err_close_rx_cq;
 
        napi_enable(&c->napi);
 
-       err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
+       spin_lock_init(&c->async_icosq_lock);
+
+       err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
        if (err)
                goto err_disable_napi;
 
+       err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
+       if (err)
+               goto err_close_async_icosq;
+
        err = mlx5e_open_sqs(c, params, cparam);
        if (err)
                goto err_close_icosq;
@@ -1881,6 +1888,9 @@ err_close_sqs:
 err_close_icosq:
        mlx5e_close_icosq(&c->icosq);
 
+err_close_async_icosq:
+       mlx5e_close_icosq(&c->async_icosq);
+
 err_disable_napi:
        napi_disable(&c->napi);
 
@@ -1899,6 +1909,9 @@ err_close_tx_cqs:
 err_close_icosq_cq:
        mlx5e_close_cq(&c->icosq.cq);
 
+err_close_async_icosq_cq:
+       mlx5e_close_cq(&c->async_icosq.cq);
+
        return err;
 }
 
@@ -1910,6 +1923,7 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
                mlx5e_close_xdpsq(&c->rq_xdpsq);
        mlx5e_close_sqs(c);
        mlx5e_close_icosq(&c->icosq);
+       mlx5e_close_icosq(&c->async_icosq);
        napi_disable(&c->napi);
        if (c->xdp)
                mlx5e_close_cq(&c->rq_xdpsq.cq);
@@ -1917,6 +1931,7 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
        mlx5e_close_cq(&c->xdpsq.cq);
        mlx5e_close_tx_cqs(c);
        mlx5e_close_cq(&c->icosq.cq);
+       mlx5e_close_cq(&c->async_icosq.cq);
 }
 
 static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
@@ -1997,6 +2012,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
        for (tc = 0; tc < c->num_tc; tc++)
                mlx5e_activate_txqsq(&c->sq[tc]);
        mlx5e_activate_icosq(&c->icosq);
+       mlx5e_activate_icosq(&c->async_icosq);
        mlx5e_activate_rq(&c->rq);
 
        if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
@@ -2011,6 +2027,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
                mlx5e_deactivate_xsk(c);
 
        mlx5e_deactivate_rq(&c->rq);
+       mlx5e_deactivate_icosq(&c->async_icosq);
        mlx5e_deactivate_icosq(&c->icosq);
        for (tc = 0; tc < c->num_tc; tc++)
                mlx5e_deactivate_txqsq(&c->sq[tc]);
@@ -2140,6 +2157,7 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv,
        MLX5_SET(rqc, rqc, scatter_fcs,    params->scatter_fcs_en);
 
        param->wq.buf_numa_node = dev_to_node(mdev->device);
+       mlx5e_build_rx_cq_param(priv, params, xsk, &param->cqp);
 }
 
 static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
@@ -2182,6 +2200,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
        mlx5e_build_sq_param_common(priv, param);
        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
        MLX5_SET(sqc, sqc, allow_swp, allow_swp);
+       mlx5e_build_tx_cq_param(priv, params, &param->cqp);
 }
 
 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -2258,6 +2277,7 @@ void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
 
        MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
        MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+       mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
 }
 
 void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
@@ -2270,6 +2290,7 @@ void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
        mlx5e_build_sq_param_common(priv, param);
        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
        param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
+       mlx5e_build_tx_cq_param(priv, params, &param->cqp);
 }
 
 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
@@ -2288,18 +2309,17 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
                                      struct mlx5e_params *params,
                                      struct mlx5e_channel_param *cparam)
 {
-       u8 icosq_log_wq_sz;
+       u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
 
        mlx5e_build_rq_param(priv, params, NULL, &cparam->rq);
 
        icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
+       async_icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
 
-       mlx5e_build_sq_param(priv, params, &cparam->sq);
+       mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
        mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
        mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
-       mlx5e_build_rx_cq_param(priv, params, NULL, &cparam->rx_cq);
-       mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
-       mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
+       mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq);
 }
 
 int mlx5e_open_channels(struct mlx5e_priv *priv,
@@ -2698,7 +2718,7 @@ static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
        ttconfig->rx_hash_fields = rx_hash_fields;
 }
 
-void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in)
 {
        void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
        struct mlx5e_rss_params *rss = &priv->rss_params;
@@ -2714,7 +2734,7 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
                mlx5e_update_rx_hash_fields(&ttconfig, tt,
                                            rss->rx_hash_fields[tt]);
                mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
-               mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+               mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
        }
 
        /* Verify inner tirs resources allocated */
@@ -2726,8 +2746,7 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
                mlx5e_update_rx_hash_fields(&ttconfig, tt,
                                            rss->rx_hash_fields[tt]);
                mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
-               mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in,
-                                    inlen);
+               mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in);
        }
 }
 
@@ -2753,15 +2772,13 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
        mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
 
        for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
-               err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
-                                          inlen);
+               err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
                if (err)
                        goto free_in;
        }
 
        for (ix = 0; ix < priv->max_nch; ix++) {
-               err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
-                                          in, inlen);
+               err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in);
                if (err)
                        goto free_in;
        }
@@ -2840,11 +2857,8 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
                                ETH_MAX_MTU);
 }
 
-static void mlx5e_netdev_set_tcs(struct net_device *netdev)
+static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
 {
-       struct mlx5e_priv *priv = netdev_priv(netdev);
-       int nch = priv->channels.params.num_channels;
-       int ntc = priv->channels.params.num_tc;
        int tc;
 
        netdev_reset_tc(netdev);
@@ -2861,15 +2875,47 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
                netdev_set_tc_queue(netdev, tc, nch, 0);
 }
 
-static void mlx5e_update_netdev_queues(struct mlx5e_priv *priv, u16 count)
+static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
 {
-       int num_txqs = count * priv->channels.params.num_tc;
-       int num_rxqs = count * priv->profile->rq_groups;
        struct net_device *netdev = priv->netdev;
+       int num_txqs, num_rxqs, nch, ntc;
+       int old_num_txqs, old_ntc;
+       int err;
+
+       old_num_txqs = netdev->real_num_tx_queues;
+       old_ntc = netdev->num_tc;
+
+       nch = priv->channels.params.num_channels;
+       ntc = priv->channels.params.num_tc;
+       num_txqs = nch * ntc;
+       num_rxqs = nch * priv->profile->rq_groups;
+
+       mlx5e_netdev_set_tcs(netdev, nch, ntc);
+
+       err = netif_set_real_num_tx_queues(netdev, num_txqs);
+       if (err) {
+               netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+               goto err_tcs;
+       }
+       err = netif_set_real_num_rx_queues(netdev, num_rxqs);
+       if (err) {
+               netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
+               goto err_txqs;
+       }
 
-       mlx5e_netdev_set_tcs(netdev);
-       netif_set_real_num_tx_queues(netdev, num_txqs);
-       netif_set_real_num_rx_queues(netdev, num_rxqs);
+       return 0;
+
+err_txqs:
+       /* netif_set_real_num_rx_queues could fail only when nch increased. Only
+        * one of nch and ntc is changed in this function. That means, the call
+        * to netif_set_real_num_tx_queues below should not fail, because it
+        * decreases the number of TX queues.
+        */
+       WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
+
+err_tcs:
+       mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc);
+       return err;
 }
 
 static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
@@ -2896,8 +2942,12 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
 int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
 {
        u16 count = priv->channels.params.num_channels;
+       int err;
+
+       err = mlx5e_update_netdev_queues(priv);
+       if (err)
+               return err;
 
-       mlx5e_update_netdev_queues(priv, count);
        mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
 
        if (!netif_is_rxfh_configured(priv->netdev))
@@ -3215,7 +3265,7 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
        if (mlx5_lag_is_lacp_owner(mdev))
                MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
 
-       return mlx5_core_create_tis(mdev, in, MLX5_ST_SZ_BYTES(create_tis_in), tisn);
+       return mlx5_core_create_tis(mdev, in, tisn);
 }
 
 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
@@ -3333,7 +3383,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
                tir = &priv->indir_tir[tt];
                tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
                mlx5e_build_indir_tir_ctx(priv, tt, tirc);
-               err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+               err = mlx5e_create_tir(priv->mdev, tir, in);
                if (err) {
                        mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
                        goto err_destroy_inner_tirs;
@@ -3348,7 +3398,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
                tir = &priv->inner_indir_tir[i];
                tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
                mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
-               err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+               err = mlx5e_create_tir(priv->mdev, tir, in);
                if (err) {
                        mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
                        goto err_destroy_inner_tirs;
@@ -3391,7 +3441,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
                tir = &tirs[ix];
                tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
                mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc);
-               err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+               err = mlx5e_create_tir(priv->mdev, tir, in);
                if (unlikely(err))
                        goto err_destroy_ch_tirs;
        }
@@ -3494,41 +3544,6 @@ out:
        return err;
 }
 
-#ifdef CONFIG_MLX5_ESWITCH
-static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
-                                    struct flow_cls_offload *cls_flower,
-                                    unsigned long flags)
-{
-       switch (cls_flower->command) {
-       case FLOW_CLS_REPLACE:
-               return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
-                                             flags);
-       case FLOW_CLS_DESTROY:
-               return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
-                                          flags);
-       case FLOW_CLS_STATS:
-               return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
-                                         flags);
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
-                                  void *cb_priv)
-{
-       unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
-       struct mlx5e_priv *priv = cb_priv;
-
-       switch (type) {
-       case TC_SETUP_CLSFLOWER:
-               return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-#endif
-
 static LIST_HEAD(mlx5e_block_cb_list);
 
 static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -3537,7 +3552,6 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
        struct mlx5e_priv *priv = netdev_priv(dev);
 
        switch (type) {
-#ifdef CONFIG_MLX5_ESWITCH
        case TC_SETUP_BLOCK: {
                struct flow_block_offload *f = type_data;
 
@@ -3547,7 +3561,6 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
                                                  mlx5e_setup_tc_block_cb,
                                                  priv, priv, true);
        }
-#endif
        case TC_SETUP_QDISC_MQPRIO:
                return mlx5e_setup_tc_mqprio(priv, type_data);
        default:
@@ -3720,7 +3733,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
        return 0;
 }
 
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
 static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3831,7 +3844,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
                                    set_feature_cvlan_filter);
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
 #endif
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
@@ -4716,7 +4729,7 @@ static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
                DIM_CQ_PERIOD_MODE_START_FROM_EQE;
 }
 
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
 {
        if (params->tx_dim_enabled) {
                u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
@@ -4725,13 +4738,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
        } else {
                params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
        }
-
-       MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
-                       params->tx_cq_moderation.cq_period_mode ==
-                               MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
 }
 
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
 {
        if (params->rx_dim_enabled) {
                u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
@@ -4740,7 +4749,19 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
        } else {
                params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
        }
+}
 
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+       mlx5e_reset_tx_moderation(params, cq_period_mode);
+       MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+                       params->tx_cq_moderation.cq_period_mode ==
+                               MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+}
+
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+       mlx5e_reset_rx_moderation(params, cq_period_mode);
        MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
                        params->rx_cq_moderation.cq_period_mode ==
                                MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
@@ -4881,10 +4902,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
 
        netdev->netdev_ops = &mlx5e_netdev_ops;
 
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-       if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
-               netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
-#endif
+       mlx5e_dcbnl_build_netdev(netdev);
 
        netdev->watchdog_timeo    = 15 * HZ;
 
@@ -5004,29 +5022,40 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
 
 void mlx5e_create_q_counters(struct mlx5e_priv *priv)
 {
+       u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
        struct mlx5_core_dev *mdev = priv->mdev;
        int err;
 
-       err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
-       if (err) {
-               mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
-               priv->q_counter = 0;
-       }
+       MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+       err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
+       if (!err)
+               priv->q_counter =
+                       MLX5_GET(alloc_q_counter_out, out, counter_set_id);
 
-       err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
-       if (err) {
-               mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
-               priv->drop_rq_q_counter = 0;
-       }
+       err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
+       if (!err)
+               priv->drop_rq_q_counter =
+                       MLX5_GET(alloc_q_counter_out, out, counter_set_id);
 }
 
 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
 {
-       if (priv->q_counter)
-               mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+       u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
 
-       if (priv->drop_rq_q_counter)
-               mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
+       MLX5_SET(dealloc_q_counter_in, in, opcode,
+                MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+       if (priv->q_counter) {
+               MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+                        priv->q_counter);
+               mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
+       }
+
+       if (priv->drop_rq_q_counter) {
+               MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+                        priv->drop_rq_q_counter);
+               mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
+       }
 }
 
 static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
@@ -5161,9 +5190,7 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
                return err;
        }
 
-#ifdef CONFIG_MLX5_CORE_EN_DCB
        mlx5e_dcbnl_initialize(priv);
-#endif
        return 0;
 }
 
@@ -5190,9 +5217,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
        mlx5e_hv_vhca_stats_create(priv);
        if (netdev->reg_state != NETREG_REGISTERED)
                return;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
        mlx5e_dcbnl_init_app(priv);
-#endif
 
        queue_work(priv->wq, &priv->set_rx_mode_work);
 
@@ -5207,10 +5232,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
 
-#ifdef CONFIG_MLX5_CORE_EN_DCB
        if (priv->netdev->reg_state == NETREG_REGISTERED)
                mlx5e_dcbnl_delete_app(priv);
-#endif
 
        rtnl_lock();
        if (netif_running(priv->netdev))
@@ -5230,7 +5253,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
 
 int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
 {
-       return mlx5e_refresh_tirs(priv, false);
+       return mlx5e_refresh_tirs(priv, false, false);
 }
 
 static const struct mlx5e_profile mlx5e_nic_profile = {
@@ -5365,9 +5388,11 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
         */
        if (take_rtnl)
                rtnl_lock();
-       mlx5e_num_channels_changed(priv);
+       err = mlx5e_num_channels_changed(priv);
        if (take_rtnl)
                rtnl_unlock();
+       if (err)
+               goto out;
 
        err = profile->init_tx(priv);
        if (err)
@@ -5505,9 +5530,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
 
        mlx5e_devlink_port_type_eth_set(priv);
 
-#ifdef CONFIG_MLX5_CORE_EN_DCB
        mlx5e_dcbnl_init_app(priv);
-#endif
        return priv;
 
 err_devlink_port_unregister:
@@ -5530,9 +5553,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
        }
 #endif
        priv = vpriv;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
        mlx5e_dcbnl_delete_app(priv);
-#endif
        unregister_netdev(priv->netdev);
        mlx5e_devlink_port_unregister(priv);
        mlx5e_detach(mdev, vpriv);