Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index a9d824a..aad3887 100644 (file)
@@ -65,6 +65,8 @@
 #include "en/devlink.h"
 #include "lib/mlx5.h"
 #include "en/ptp.h"
+#include "qos.h"
+#include "en/trap.h"
 
 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
 {
@@ -211,6 +213,33 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
        mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
 }
 
+static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+       struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
+       int err;
+
+       switch (event) {
+       case MLX5_DRIVER_EVENT_TYPE_TRAP:
+               err = mlx5e_handle_trap_event(priv, data);
+               break;
+       default:
+               netdev_warn(priv->netdev, "Sync event: Unknouwn event %ld\n", event);
+               err = -EINVAL;
+       }
+       return err;
+}
+
+static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
+{
+       priv->blocking_events_nb.notifier_call = blocking_event;
+       mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb);
+}
+
+static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
+{
+       mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
+}
+
 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
                                       struct mlx5e_icosq *sq,
                                       struct mlx5e_umr_wqe *wqe)
@@ -342,13 +371,11 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
                prev->last_in_page = true;
 }
 
-static int mlx5e_init_di_list(struct mlx5e_rq *rq,
-                             int wq_sz, int cpu)
+int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
 {
        int len = wq_sz << rq->wqe.info.log_num_frags;
 
-       rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)),
-                                  GFP_KERNEL, cpu_to_node(cpu));
+       rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node);
        if (!rq->wqe.di)
                return -ENOMEM;
 
@@ -357,7 +384,7 @@ static int mlx5e_init_di_list(struct mlx5e_rq *rq,
        return 0;
 }
 
-static void mlx5e_free_di_list(struct mlx5e_rq *rq)
+void mlx5e_free_di_list(struct mlx5e_rq *rq)
 {
        kvfree(rq->wqe.di);
 }
@@ -499,7 +526,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                        goto err_rq_wq_destroy;
                }
 
-               err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
+               err = mlx5e_init_di_list(rq, wq_sz, cpu_to_node(c->cpu));
                if (err)
                        goto err_rq_frags;
 
@@ -650,8 +677,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
        mlx5_wq_destroy(&rq->wq_ctrl);
 }
 
-static int mlx5e_create_rq(struct mlx5e_rq *rq,
-                          struct mlx5e_rq_param *param)
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
 {
        struct mlx5_core_dev *mdev = rq->mdev;
 
@@ -774,7 +800,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
        return err;
 }
 
-static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+void mlx5e_destroy_rq(struct mlx5e_rq *rq)
 {
        mlx5_core_destroy_rq(rq->mdev, rq->rqn);
 }
@@ -1143,7 +1169,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
        sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
-       sq->stats     = &c->priv->channel_stats[c->ix].sq[tc];
        INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
        if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
                set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
@@ -1233,6 +1258,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
 int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
                    struct mlx5e_modify_sq_param *p)
 {
+       u64 bitmask = 0;
        void *in;
        void *sqc;
        int inlen;
@@ -1248,9 +1274,14 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
        MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
        MLX5_SET(sqc, sqc, state, p->next_state);
        if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
-               MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
-               MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, p->rl_index);
+               bitmask |= 1;
+               MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
+       }
+       if (p->qos_update && p->next_state == MLX5_SQC_STATE_RDY) {
+               bitmask |= 1 << 2;
+               MLX5_SET(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id);
        }
+       MLX5_SET64(modify_sq_in, in, modify_bitmask, bitmask);
 
        err = mlx5_core_modify_sq(mdev, sqn, in);
 
@@ -1267,6 +1298,7 @@ static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
 int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
                        struct mlx5e_sq_param *param,
                        struct mlx5e_create_sq_param *csp,
+                       u16 qos_queue_group_id,
                        u32 *sqn)
 {
        struct mlx5e_modify_sq_param msp = {0};
@@ -1278,6 +1310,10 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
 
        msp.curr_state = MLX5_SQC_STATE_RST;
        msp.next_state = MLX5_SQC_STATE_RDY;
+       if (qos_queue_group_id) {
+               msp.qos_update = true;
+               msp.qos_queue_group_id = qos_queue_group_id;
+       }
        err = mlx5e_modify_sq(mdev, *sqn, &msp);
        if (err)
                mlx5e_destroy_sq(mdev, *sqn);
@@ -1288,13 +1324,9 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
 static int mlx5e_set_sq_maxrate(struct net_device *dev,
                                struct mlx5e_txqsq *sq, u32 rate);
 
-static int mlx5e_open_txqsq(struct mlx5e_channel *c,
-                           u32 tisn,
-                           int txq_ix,
-                           struct mlx5e_params *params,
-                           struct mlx5e_sq_param *param,
-                           struct mlx5e_txqsq *sq,
-                           int tc)
+int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
+                    struct mlx5e_params *params, struct mlx5e_sq_param *param,
+                    struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid)
 {
        struct mlx5e_create_sq_param csp = {};
        u32 tx_rate;
@@ -1304,12 +1336,17 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
        if (err)
                return err;
 
+       if (qos_queue_group_id)
+               sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
+       else
+               sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+
        csp.tisn            = tisn;
        csp.tis_lst_sz      = 1;
        csp.cqn             = sq->cq.mcq.cqn;
        csp.wq_ctrl         = &sq->wq_ctrl;
        csp.min_inline_mode = sq->min_inline_mode;
-       err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+       err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
        if (err)
                goto err_free_txqsq;
 
@@ -1366,7 +1403,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
        }
 }
 
-static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
+void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
 {
        struct mlx5_core_dev *mdev = sq->mdev;
        struct mlx5_rate_limit rl = {0};
@@ -1403,7 +1440,7 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
        csp.cqn             = sq->cq.mcq.cqn;
        csp.wq_ctrl         = &sq->wq_ctrl;
        csp.min_inline_mode = params->tx_min_inline_mode;
-       err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+       err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
        if (err)
                goto err_free_icosq;
 
@@ -1452,7 +1489,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
        csp.wq_ctrl         = &sq->wq_ctrl;
        csp.min_inline_mode = sq->min_inline_mode;
        set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
-       err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+       err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
        if (err)
                goto err_free_xdpsq;
 
@@ -1703,7 +1740,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
                int txq_ix = c->ix + tc * params->num_channels;
 
                err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
-                                      params, &cparam->txq_sq, &c->sq[tc], tc);
+                                      params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0);
                if (err)
                        goto err_close_sqs;
        }
@@ -2044,6 +2081,8 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
        mlx5e_deactivate_icosq(&c->icosq);
        for (tc = 0; tc < c->num_tc; tc++)
                mlx5e_deactivate_txqsq(&c->sq[tc]);
+
+       mlx5e_qos_deactivate_queues(c);
 }
 
 static void mlx5e_close_channel(struct mlx5e_channel *c)
@@ -2051,6 +2090,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
        if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
                mlx5e_close_xsk(c);
        mlx5e_close_queues(c);
+       mlx5e_qos_close_queues(c);
        netif_napi_del(&c->napi);
 
        kvfree(c);
@@ -2068,10 +2108,8 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
        u32 buf_size = 0;
        int i;
 
-#ifdef CONFIG_MLX5_EN_IPSEC
        if (MLX5_IPSEC_DEV(mdev))
                byte_count += MLX5E_METADATA_ETHER_LEN;
-#endif
 
        if (mlx5e_rx_is_linear_skb(params, xsk)) {
                int frag_stride;
@@ -2200,9 +2238,8 @@ void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
        param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
 }
 
-static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
-                                struct mlx5e_params *params,
-                                struct mlx5e_sq_param *param)
+void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
+                         struct mlx5e_sq_param *param)
 {
        void *sqc = param->sqc;
        void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -2381,10 +2418,18 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
                        goto err_close_channels;
        }
 
+       err = mlx5e_qos_open_queues(priv, chs);
+       if (err)
+               goto err_close_ptp;
+
        mlx5e_health_channels_update(priv);
        kvfree(cparam);
        return 0;
 
+err_close_ptp:
+       if (chs->port_ptp)
+               mlx5e_port_ptp_close(chs->port_ptp);
+
 err_close_channels:
        for (i--; i >= 0; i--)
                mlx5e_close_channel(chs->c[i]);
@@ -2917,11 +2962,31 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
                netdev_set_tc_queue(netdev, tc, nch, 0);
 }
 
+int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
+{
+       int qos_queues, nch, ntc, num_txqs, err;
+
+       qos_queues = mlx5e_qos_cur_leaf_nodes(priv);
+
+       nch = priv->channels.params.num_channels;
+       ntc = priv->channels.params.num_tc;
+       num_txqs = nch * ntc + qos_queues;
+       if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
+               num_txqs += ntc;
+
+       mlx5e_dbg(DRV, priv, "Setting num_txqs %d\n", num_txqs);
+       err = netif_set_real_num_tx_queues(priv->netdev, num_txqs);
+       if (err)
+               netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+
+       return err;
+}
+
 static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
 {
        struct net_device *netdev = priv->netdev;
-       int num_txqs, num_rxqs, nch, ntc;
        int old_num_txqs, old_ntc;
+       int num_rxqs, nch, ntc;
        int err;
 
        old_num_txqs = netdev->real_num_tx_queues;
@@ -2929,18 +2994,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
 
        nch = priv->channels.params.num_channels;
        ntc = priv->channels.params.num_tc;
-       num_txqs = nch * ntc;
-       if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
-               num_txqs += ntc;
        num_rxqs = nch * priv->profile->rq_groups;
 
        mlx5e_netdev_set_tcs(netdev, nch, ntc);
 
-       err = netif_set_real_num_tx_queues(netdev, num_txqs);
-       if (err) {
-               netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+       err = mlx5e_update_tx_netdev_queues(priv);
+       if (err)
                goto err_tcs;
-       }
        err = netif_set_real_num_rx_queues(netdev, num_rxqs);
        if (err) {
                netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
@@ -3044,6 +3104,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
        mlx5e_update_num_tc_x_num_ch(priv);
        mlx5e_build_txq_maps(priv);
        mlx5e_activate_channels(&priv->channels);
+       mlx5e_qos_activate_queues(priv);
        mlx5e_xdp_tx_enable(priv);
        netif_tx_start_all_queues(priv->netdev);
 
@@ -3186,6 +3247,7 @@ int mlx5e_open_locked(struct net_device *netdev)
 
        priv->profile->update_rx(priv);
        mlx5e_activate_priv_channels(priv);
+       mlx5e_apply_traps(priv, true);
        if (priv->profile->update_carrier)
                priv->profile->update_carrier(priv);
 
@@ -3221,6 +3283,7 @@ int mlx5e_close_locked(struct net_device *netdev)
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
                return 0;
 
+       mlx5e_apply_traps(priv, false);
        clear_bit(MLX5E_STATE_OPENED, &priv->state);
 
        netif_carrier_off(priv->netdev);
@@ -3610,6 +3673,14 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
 
        mutex_lock(&priv->state_lock);
 
+       /* MQPRIO is another toplevel qdisc that can't be attached
+        * simultaneously with the offloaded HTB.
+        */
+       if (WARN_ON(priv->htb.maj_id)) {
+               err = -EINVAL;
+               goto out;
+       }
+
        new_channels.params = priv->channels.params;
        new_channels.params.num_tc = tc ? tc : 1;
 
@@ -3637,12 +3708,55 @@ out:
        return err;
 }
 
+static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
+{
+       int res;
+
+       switch (htb->command) {
+       case TC_HTB_CREATE:
+               return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid,
+                                         htb->extack);
+       case TC_HTB_DESTROY:
+               return mlx5e_htb_root_del(priv);
+       case TC_HTB_LEAF_ALLOC_QUEUE:
+               res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid,
+                                                htb->rate, htb->ceil, htb->extack);
+               if (res < 0)
+                       return res;
+               htb->qid = res;
+               return 0;
+       case TC_HTB_LEAF_TO_INNER:
+               return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
+                                              htb->rate, htb->ceil, htb->extack);
+       case TC_HTB_LEAF_DEL:
+               return mlx5e_htb_leaf_del(priv, htb->classid, &htb->moved_qid, &htb->qid,
+                                         htb->extack);
+       case TC_HTB_LEAF_DEL_LAST:
+       case TC_HTB_LEAF_DEL_LAST_FORCE:
+               return mlx5e_htb_leaf_del_last(priv, htb->classid,
+                                              htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
+                                              htb->extack);
+       case TC_HTB_NODE_MODIFY:
+               return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil,
+                                            htb->extack);
+       case TC_HTB_LEAF_QUERY_QUEUE:
+               res = mlx5e_get_txq_by_classid(priv, htb->classid);
+               if (res < 0)
+                       return res;
+               htb->qid = res;
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static LIST_HEAD(mlx5e_block_cb_list);
 
 static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
                          void *type_data)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
+       int err;
 
        switch (type) {
        case TC_SETUP_BLOCK: {
@@ -3656,6 +3770,11 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
        }
        case TC_SETUP_QDISC_MQPRIO:
                return mlx5e_setup_tc_mqprio(priv, type_data);
+       case TC_SETUP_QDISC_HTB:
+               mutex_lock(&priv->state_lock);
+               err = mlx5e_setup_tc_htb(priv, type_data);
+               mutex_unlock(&priv->state_lock);
+               return err;
        default:
                return -EOPNOTSUPP;
        }
@@ -3825,20 +3944,25 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
        return 0;
 }
 
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
-static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+static int set_feature_hw_tc(struct net_device *netdev, bool enable)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
        if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
                netdev_err(netdev,
                           "Active offloaded tc filters, can't turn hw_tc_offload off\n");
                return -EINVAL;
        }
+#endif
+
+       if (!enable && priv->htb.maj_id) {
+               netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
+               return -EINVAL;
+       }
 
        return 0;
 }
-#endif
 
 static int set_feature_rx_all(struct net_device *netdev, bool enable)
 {
@@ -3936,9 +4060,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
                                    set_feature_cvlan_filter);
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
-       err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
-#endif
+       err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
@@ -4395,10 +4517,8 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
        features = vlan_features_check(skb, features);
        features = vxlan_features_check(skb, features);
 
-#ifdef CONFIG_MLX5_EN_IPSEC
        if (mlx5e_ipsec_feature_check(skb, netdev, features))
                return features;
-#endif
 
        /* Validate if the tunneled packet is being offloaded by HW */
        if (skb->encapsulation &&
@@ -4641,8 +4761,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
        .ndo_change_mtu          = mlx5e_change_nic_mtu,
        .ndo_do_ioctl            = mlx5e_ioctl,
        .ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
-       .ndo_udp_tunnel_add      = udp_tunnel_nic_add_port,
-       .ndo_udp_tunnel_del      = udp_tunnel_nic_del_port,
        .ndo_features_check      = mlx5e_features_check,
        .ndo_tx_timeout          = mlx5e_tx_timeout,
        .ndo_bpf                 = mlx5e_xdp,
@@ -5053,6 +5171,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
                netdev->hw_features      |= NETIF_F_NTUPLE;
 #endif
        }
+       if (mlx5_qos_is_supported(mdev))
+               netdev->features |= NETIF_F_HW_TC;
 
        netdev->features         |= NETIF_F_HIGHDMA;
        netdev->features         |= NETIF_F_HW_VLAN_STAG_FILTER;
@@ -5270,6 +5390,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
        mlx5_lag_add(mdev, netdev);
 
        mlx5e_enable_async_events(priv);
+       mlx5e_enable_blocking_events(priv);
        if (mlx5e_monitor_counter_supported(priv))
                mlx5e_monitor_counter_init(priv);
 
@@ -5307,6 +5428,12 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
        if (mlx5e_monitor_counter_supported(priv))
                mlx5e_monitor_counter_cleanup(priv);
 
+       mlx5e_disable_blocking_events(priv);
+       if (priv->en_trap) {
+               mlx5e_deactivate_trap(priv);
+               mlx5e_close_trap(priv->en_trap);
+               priv->en_trap = NULL;
+       }
        mlx5e_disable_async_events(priv);
        mlx5_lag_remove(mdev);
        mlx5_vxlan_reset_to_default(mdev->vxlan);
@@ -5358,6 +5485,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
                return -ENOMEM;
 
        mutex_init(&priv->state_lock);
+       hash_init(priv->htb.qos_tc2node);
        INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
        INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
        INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
@@ -5380,8 +5508,14 @@ err_free_cpumask:
 
 void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
 {
+       int i;
+
        destroy_workqueue(priv->wq);
        free_cpumask_var(priv->scratchpad.cpumask);
+
+       for (i = 0; i < priv->htb.max_qos_sqs; i++)
+               kfree(priv->htb.qos_sq_stats[i]);
+       kvfree(priv->htb.qos_sq_stats);
 }
 
 struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
@@ -5391,13 +5525,17 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
 {
        struct net_device *netdev;
        unsigned int ptp_txqs = 0;
+       int qos_sqs = 0;
        int err;
 
        if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
                ptp_txqs = profile->max_tc;
 
+       if (mlx5_qos_is_supported(mdev))
+               qos_sqs = mlx5e_qos_max_leaf_nodes(mdev);
+
        netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
-                                   nch * profile->max_tc + ptp_txqs,
+                                   nch * profile->max_tc + ptp_txqs + qos_sqs,
                                    nch * profile->rq_groups);
        if (!netdev) {
                mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");