1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2019 Mellanox Technologies.
11 /* Keep this string array consistent with the MLX5E_RQ_STATE_* enums in en.h */
12 static const char * const rq_sw_state_type_name[] = {
13 [MLX5E_RQ_STATE_ENABLED] = "enabled",
14 [MLX5E_RQ_STATE_RECOVERING] = "recovering",
15 [MLX5E_RQ_STATE_DIM] = "dim",
16 [MLX5E_RQ_STATE_NO_CSUM_COMPLETE] = "no_csum_complete",
17 [MLX5E_RQ_STATE_CSUM_FULL] = "csum_full",
18 [MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX] = "mini_cqe_hw_stridx",
19 [MLX5E_RQ_STATE_SHAMPO] = "shampo",
20 [MLX5E_RQ_STATE_MINI_CQE_ENHANCED] = "mini_cqe_enhanced",
21 [MLX5E_RQ_STATE_XSK] = "xsk",
24 static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
26 int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
31 out = kvzalloc(outlen, GFP_KERNEL);
35 err = mlx5_core_query_rq(dev, rqn, out);
39 rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
40 *state = MLX5_GET(rqc, rqc, state);
47 static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq)
49 struct mlx5_core_dev *dev = icosq->channel->mdev;
50 unsigned long exp_time;
52 exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
54 while (time_before(jiffies, exp_time)) {
55 if (icosq->cc == icosq->pc)
61 netdev_err(icosq->channel->netdev,
62 "Wait for ICOSQ 0x%x flush timeout (cc = 0x%x, pc = 0x%x)\n",
63 icosq->sqn, icosq->cc, icosq->pc);
68 static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)
70 WARN_ONCE(icosq->cc != icosq->pc, "ICOSQ 0x%x: cc (0x%x) != pc (0x%x)\n",
71 icosq->sqn, icosq->cc, icosq->pc);
76 static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
78 struct mlx5e_rq *xskrq = NULL;
79 struct mlx5_core_dev *mdev;
80 struct mlx5e_icosq *icosq;
81 struct net_device *dev;
88 mutex_lock(&icosq->channel->icosq_recovery_lock);
90 /* mlx5e_close_rq cancels this work before RQ and ICOSQ are killed. */
91 rq = &icosq->channel->rq;
92 if (test_bit(MLX5E_RQ_STATE_ENABLED, &icosq->channel->xskrq.state))
93 xskrq = &icosq->channel->xskrq;
94 mdev = icosq->channel->mdev;
95 dev = icosq->channel->netdev;
96 err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
98 netdev_err(dev, "Failed to query ICOSQ 0x%x state. err = %d\n",
103 if (state != MLX5_SQC_STATE_ERR)
106 mlx5e_deactivate_rq(rq);
108 mlx5e_deactivate_rq(xskrq);
110 err = mlx5e_wait_for_icosq_flush(icosq);
114 mlx5e_deactivate_icosq(icosq);
116 /* At this point, both the rq and the icosq are disabled */
118 err = mlx5e_health_sq_to_ready(mdev, dev, icosq->sqn);
122 mlx5e_reset_icosq_cc_pc(icosq);
124 mlx5e_free_rx_missing_descs(rq);
126 mlx5e_free_rx_missing_descs(xskrq);
128 clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
129 mlx5e_activate_icosq(icosq);
131 mlx5e_activate_rq(rq);
132 rq->stats->recover++;
135 mlx5e_activate_rq(xskrq);
136 xskrq->stats->recover++;
139 mlx5e_trigger_napi_icosq(icosq->channel);
141 mutex_unlock(&icosq->channel->icosq_recovery_lock);
145 clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
146 mutex_unlock(&icosq->channel->icosq_recovery_lock);
150 static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
152 struct mlx5e_rq *rq = ctx;
155 mlx5e_deactivate_rq(rq);
156 err = mlx5e_flush_rq(rq, MLX5_RQC_STATE_ERR);
157 clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
161 mlx5e_activate_rq(rq);
162 rq->stats->recover++;
164 mlx5e_trigger_napi_icosq(rq->channel);
166 mlx5e_trigger_napi_sched(rq->cq.napi);
170 static int mlx5e_rx_reporter_timeout_recover(void *ctx)
172 struct mlx5_eq_comp *eq;
179 err = mlx5e_health_channel_eq_recover(rq->netdev, eq, rq->cq.ch_stats);
180 if (err && rq->icosq)
181 clear_bit(MLX5E_SQ_STATE_ENABLED, &rq->icosq->state);
186 static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
188 return err_ctx->recover(err_ctx->ctx);
191 static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter,
193 struct netlink_ext_ack *extack)
195 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
196 struct mlx5e_err_ctx *err_ctx = context;
198 return err_ctx ? mlx5e_rx_reporter_recover_from_ctx(err_ctx) :
199 mlx5e_health_recover_channels(priv);
202 static void mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
203 struct devlink_fmsg *fmsg)
205 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
206 devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn);
207 devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
208 devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cc);
209 devlink_fmsg_u32_pair_put(fmsg, "pc", icosq->pc);
210 devlink_fmsg_u32_pair_put(fmsg, "WQE size", mlx5_wq_cyc_get_size(&icosq->wq));
212 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
213 devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn);
214 devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cq.wq.cc);
215 devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&icosq->cq.wq));
216 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
218 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
221 static void mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq)
225 BUILD_BUG_ON_MSG(ARRAY_SIZE(rq_sw_state_type_name) != MLX5E_NUM_RQ_STATES,
226 "rq_sw_state_type_name string array must be consistent with MLX5E_RQ_STATE_* enum in en.h");
227 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
229 for (i = 0; i < ARRAY_SIZE(rq_sw_state_type_name); ++i)
230 devlink_fmsg_u32_pair_put(fmsg, rq_sw_state_type_name[i],
231 test_bit(i, &rq->state));
233 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
237 mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq,
238 struct devlink_fmsg *fmsg)
246 err = mlx5e_query_rq_state(rq->mdev, rq->rqn, &hw_state);
250 wqes_sz = mlx5e_rqwq_get_cur_sz(rq);
251 wq_head = mlx5e_rqwq_get_head(rq);
252 wqe_counter = mlx5e_rqwq_get_wqe_counter(rq);
254 devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
255 devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
256 devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter);
257 devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz);
258 devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head);
259 mlx5e_health_rq_put_sw_state(fmsg, rq);
260 mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg);
261 mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg);
264 struct mlx5e_icosq *icosq = rq->icosq;
268 err = mlx5_core_query_sq_state(rq->mdev, icosq->sqn, &icosq_hw_state);
272 mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg);
278 static void mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
279 struct devlink_fmsg *fmsg)
281 devlink_fmsg_obj_nest_start(fmsg);
282 devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
283 mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
284 devlink_fmsg_obj_nest_end(fmsg);
287 static void mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
288 struct devlink_fmsg *fmsg)
290 struct mlx5e_priv *priv = rq->priv;
291 struct mlx5e_params *params;
292 u32 rq_stride, rq_sz;
295 params = &priv->channels.params;
296 rq_sz = mlx5e_rqwq_get_size(rq);
297 real_time = mlx5_is_real_time_rq(priv->mdev);
298 rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
300 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
301 devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
302 devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride);
303 devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz);
304 devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
305 mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg);
306 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
310 mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx5e_ptp *ptp_ch,
311 struct devlink_fmsg *fmsg)
313 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
314 devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter);
315 mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg);
316 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
320 mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
321 struct devlink_fmsg *fmsg)
323 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
324 struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
325 struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
327 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
328 mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg);
329 if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
330 mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg);
331 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
334 static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
335 struct devlink_fmsg *fmsg)
337 devlink_fmsg_obj_nest_start(fmsg);
338 devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
339 mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
340 devlink_fmsg_obj_nest_end(fmsg);
343 static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
344 struct devlink_fmsg *fmsg,
345 struct netlink_ext_ack *extack)
347 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
348 struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
351 mutex_lock(&priv->state_lock);
353 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
356 mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
357 devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
359 for (i = 0; i < priv->channels.num; i++) {
360 struct mlx5e_channel *c = priv->channels.c[i];
363 rq = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state) ?
366 mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
368 if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
369 mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
370 devlink_fmsg_arr_pair_nest_end(fmsg);
372 mutex_unlock(&priv->state_lock);
376 static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
379 struct mlx5e_txqsq *icosq = ctx;
380 struct mlx5_rsc_key key = {};
382 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
385 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
386 key.size = PAGE_SIZE;
387 key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
388 mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
389 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
391 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
393 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
394 key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
395 key.index1 = icosq->sqn;
397 mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
398 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
400 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
401 key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
402 key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
403 mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
404 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
406 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
411 static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
414 struct mlx5_rsc_key key = {};
415 struct mlx5e_rq *rq = ctx;
417 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
420 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
421 key.size = PAGE_SIZE;
422 key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
423 mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
424 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
426 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
428 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
429 key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
430 key.index1 = rq->rqn;
432 mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
433 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
435 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff");
436 key.rsc = MLX5_SGMT_TYPE_RCV_BUFF;
437 key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
438 mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
439 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
441 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
446 static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
447 struct devlink_fmsg *fmsg)
449 struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
450 struct mlx5_rsc_key key = {};
452 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
455 mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
456 key.size = PAGE_SIZE;
457 key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
458 mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
459 mlx5e_health_fmsg_named_obj_nest_end(fmsg);
460 devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
462 for (int i = 0; i < priv->channels.num; i++) {
463 struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
465 mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ");
468 if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
469 mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ");
471 devlink_fmsg_arr_pair_nest_end(fmsg);
475 static int mlx5e_rx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
476 struct mlx5e_err_ctx *err_ctx,
477 struct devlink_fmsg *fmsg)
479 return err_ctx->dump(priv, fmsg, err_ctx->ctx);
482 static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
483 struct devlink_fmsg *fmsg, void *context,
484 struct netlink_ext_ack *extack)
486 struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
487 struct mlx5e_err_ctx *err_ctx = context;
489 return err_ctx ? mlx5e_rx_reporter_dump_from_ctx(priv, err_ctx, fmsg) :
490 mlx5e_rx_reporter_dump_all_rqs(priv, fmsg);
493 void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
495 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
496 struct mlx5e_icosq *icosq = rq->icosq;
497 struct mlx5e_priv *priv = rq->priv;
498 struct mlx5e_err_ctx err_ctx = {};
499 char icosq_str[32] = {};
502 err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
503 err_ctx.dump = mlx5e_rx_reporter_dump_rq;
506 snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn);
507 snprintf(err_str, sizeof(err_str),
508 "RX timeout on channel: %d, %s RQ: 0x%x, CQ: 0x%x",
509 rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn);
511 mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
514 void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
516 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
517 struct mlx5e_priv *priv = rq->priv;
518 struct mlx5e_err_ctx err_ctx = {};
521 err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover;
522 err_ctx.dump = mlx5e_rx_reporter_dump_rq;
523 snprintf(err_str, sizeof(err_str), "ERR CQE on RQ: 0x%x", rq->rqn);
525 mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
528 void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
530 struct mlx5e_priv *priv = icosq->channel->priv;
531 char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
532 struct mlx5e_err_ctx err_ctx = {};
535 err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover;
536 err_ctx.dump = mlx5e_rx_reporter_dump_icosq;
537 snprintf(err_str, sizeof(err_str), "ERR CQE on ICOSQ: 0x%x", icosq->sqn);
539 mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
542 void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c)
544 mutex_lock(&c->icosq_recovery_lock);
547 void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
549 mutex_unlock(&c->icosq_recovery_lock);
552 static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
554 .recover = mlx5e_rx_reporter_recover,
555 .diagnose = mlx5e_rx_reporter_diagnose,
556 .dump = mlx5e_rx_reporter_dump,
559 #define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
561 void mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
563 struct devlink_health_reporter *reporter;
565 reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
566 &mlx5_rx_reporter_ops,
567 MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv);
568 if (IS_ERR(reporter)) {
569 netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
573 priv->rx_reporter = reporter;
576 void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv)
578 if (!priv->rx_reporter)
581 devlink_health_reporter_destroy(priv->rx_reporter);
582 priv->rx_reporter = NULL;