1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019 Mellanox Technologies. */
7 #define MLX5E_TX_REPORTER_PER_SQ_MAX_LEN 256
9 struct mlx5e_tx_err_ctx {
10 int (*recover)(struct mlx5e_txqsq *sq);
11 struct mlx5e_txqsq *sq;
14 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
16 unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
18 while (time_before(jiffies, exp_time)) {
25 netdev_err(sq->channel->netdev,
26 "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
27 sq->sqn, sq->cc, sq->pc);
32 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
34 WARN_ONCE(sq->cc != sq->pc,
35 "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
36 sq->sqn, sq->cc, sq->pc);
42 static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
44 struct mlx5_core_dev *mdev = sq->channel->mdev;
45 struct net_device *dev = sq->channel->netdev;
46 struct mlx5e_modify_sq_param msp = {0};
49 msp.curr_state = curr_state;
50 msp.next_state = MLX5_SQC_STATE_RST;
52 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
54 netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
58 memset(&msp, 0, sizeof(msp));
59 msp.curr_state = MLX5_SQC_STATE_RST;
60 msp.next_state = MLX5_SQC_STATE_RDY;
62 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
64 netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
71 static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
73 struct mlx5_core_dev *mdev = sq->channel->mdev;
74 struct net_device *dev = sq->channel->netdev;
78 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
80 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
85 if (state != MLX5_SQC_STATE_ERR)
88 mlx5e_tx_disable_queue(sq->txq);
90 err = mlx5e_wait_for_sq_flush(sq);
94 /* At this point, no new packets will arrive from the stack as TXQ is
95 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
96 * pending WQEs. SQ can safely reset the SQ.
99 err = mlx5e_sq_to_ready(sq, state);
103 mlx5e_reset_txqsq_cc_pc(sq);
104 sq->stats->recover++;
105 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
106 mlx5e_activate_txqsq(sq);
110 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
114 static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
116 struct mlx5e_tx_err_ctx *err_ctx)
119 netdev_err(err_ctx->sq->channel->netdev, err_str);
120 return err_ctx->recover(err_ctx->sq);
123 return devlink_health_report(tx_reporter, err_str, err_ctx);
126 void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq)
128 char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
129 struct mlx5e_tx_err_ctx err_ctx = {0};
132 err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
133 sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn);
135 mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
139 static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq)
141 struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
144 netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
145 eq->core.eqn, eq->core.cons_index, eq->core.irqn);
147 eqe_count = mlx5_eq_poll_irq_disabled(eq);
149 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
153 netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n",
154 eqe_count, eq->core.eqn);
155 sq->channel->stats->eq_rearm++;
159 int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq)
161 char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
162 struct mlx5e_tx_err_ctx err_ctx;
165 err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
167 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
168 sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
169 jiffies_to_usecs(jiffies - sq->txq->trans_start));
171 return mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
175 /* state lock cannot be grabbed within this function.
176 * It can cause a dead lock or a read-after-free.
178 static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
180 return err_ctx->recover(err_ctx->sq);
183 static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
188 mutex_lock(&priv->state_lock);
190 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
193 err = mlx5e_safe_reopen_channels(priv);
196 mutex_unlock(&priv->state_lock);
202 static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
205 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
206 struct mlx5e_tx_err_ctx *err_ctx = context;
208 return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) :
209 mlx5e_tx_reporter_recover_all(priv);
213 mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
214 u32 sqn, u8 state, bool stopped)
218 err = devlink_fmsg_obj_nest_start(fmsg);
222 err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sqn);
226 err = devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
230 err = devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped);
234 err = devlink_fmsg_obj_nest_end(fmsg);
241 static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
242 struct devlink_fmsg *fmsg)
244 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
247 mutex_lock(&priv->state_lock);
249 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
252 err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
256 for (i = 0; i < priv->channels.num * priv->channels.params.num_tc;
258 struct mlx5e_txqsq *sq = priv->txq2sq[i];
261 err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
265 err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn,
267 netif_xmit_stopped(sq->txq));
271 err = devlink_fmsg_arr_pair_nest_end(fmsg);
276 mutex_unlock(&priv->state_lock);
280 static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
282 .recover = mlx5e_tx_reporter_recover,
283 .diagnose = mlx5e_tx_reporter_diagnose,
286 #define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
288 int mlx5e_tx_reporter_create(struct mlx5e_priv *priv)
290 struct devlink_health_reporter *reporter;
291 struct mlx5_core_dev *mdev = priv->mdev;
292 struct devlink *devlink = priv_to_devlink(mdev);
295 devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops,
296 MLX5_REPORTER_TX_GRACEFUL_PERIOD,
298 if (IS_ERR(reporter)) {
299 netdev_warn(priv->netdev,
300 "Failed to create tx reporter, err = %ld\n",
302 return PTR_ERR(reporter);
304 priv->tx_reporter = reporter;
308 void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv)
310 if (!priv->tx_reporter)
313 devlink_health_reporter_destroy(priv->tx_reporter);