net/mlx5e: TX, Generalise code and usage of error CQE dump
authorTariq Toukan <tariqt@mellanox.com>
Sun, 9 Feb 2020 15:06:49 +0000 (17:06 +0200)
committerSaeed Mahameed <saeedm@mellanox.com>
Thu, 30 Apr 2020 17:10:44 +0000 (10:10 -0700)
Error CQE was dumped only for TXQ SQs.
Generalise the function, and add usage for error completions
on ICO SQs and XDP SQs.

Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Reviewed-by: Aya Levin <ayal@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

index 9f6967d..c0249fc 100644 (file)
@@ -189,6 +189,22 @@ static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
        }
 }
 
+static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn,
+                                       struct mlx5_err_cqe *err_cqe)
+{
+       struct mlx5_cqwq *wq = &cq->wq;
+       u32 ci;
+
+       ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
+
+       netdev_err(cq->channel->netdev,
+                  "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
+                  cq->mcq.cqn, ci, sqn,
+                  get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
+                  err_cqe->syndrome, err_cqe->vendor_err_synd);
+       mlx5_dump_err_cqe(cq->mdev, err_cqe);
+}
+
 /* SW parser related functions */
 
 struct mlx5e_swp_spec {
index f049e0a..f9dad26 100644 (file)
@@ -415,11 +415,6 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
 
                wqe_counter = be16_to_cpu(cqe->wqe_counter);
 
-               if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ))
-                       netdev_WARN_ONCE(sq->channel->netdev,
-                                        "Bad OP in XDPSQ CQE: 0x%x\n",
-                                        get_cqe_opcode(cqe));
-
                do {
                        struct mlx5e_xdp_wqe_info *wi;
                        u16 ci;
@@ -432,6 +427,14 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
 
                        mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true);
                } while (!last_wqe);
+
+               if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+                       netdev_WARN_ONCE(sq->channel->netdev,
+                                        "Bad OP in XDPSQ CQE: 0x%x\n",
+                                        get_cqe_opcode(cqe));
+                       mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
+                                            (struct mlx5_err_cqe *)cqe);
+               }
        } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
 
        if (xsk_frames)
index e2beb89..4db1c92 100644 (file)
@@ -631,6 +631,8 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
                                netdev_WARN_ONCE(cq->channel->netdev,
                                                 "Bad OP in ICOSQ CQE: 0x%x\n",
                                                 get_cqe_opcode(cqe));
+                               mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
+                                                    (struct mlx5_err_cqe *)cqe);
                                if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
                                        queue_work(cq->channel->priv->wq, &sq->recover_work);
                                break;
index fd6b2a1..1679557 100644 (file)
@@ -399,22 +399,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
        return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
 }
 
-static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
-                                struct mlx5_err_cqe *err_cqe)
-{
-       struct mlx5_cqwq *wq = &sq->cq.wq;
-       u32 ci;
-
-       ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
-
-       netdev_err(sq->channel->netdev,
-                  "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
-                  sq->cq.mcq.cqn, ci, sq->sqn,
-                  get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
-                  err_cqe->syndrome, err_cqe->vendor_err_synd);
-       mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
-}
-
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 {
        struct mlx5e_sq_stats *stats;
@@ -501,7 +485,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
                if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
                        if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
                                              &sq->state)) {
-                               mlx5e_dump_error_cqe(sq,
+                               mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
                                                     (struct mlx5_err_cqe *)cqe);
                                mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
                                queue_work(cq->channel->priv->wq,