1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
4 #include <linux/device.h>
6 #include "hclge_debugfs.h"
8 #include "hclge_main.h"
12 static const char * const state_str[] = { "off", "on" };
13 static const char * const hclge_mac_state_str[] = {
14 "TO_ADD", "TO_DEL", "ACTIVE"
17 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
18 { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
19 .dfx_msg = &hclge_dbg_bios_common_reg[0],
20 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
21 .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
22 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
23 { .cmd = HNAE3_DBG_CMD_REG_SSU,
24 .dfx_msg = &hclge_dbg_ssu_reg_0[0],
25 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
26 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
27 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
28 { .cmd = HNAE3_DBG_CMD_REG_SSU,
29 .dfx_msg = &hclge_dbg_ssu_reg_1[0],
30 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
31 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
32 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
33 { .cmd = HNAE3_DBG_CMD_REG_SSU,
34 .dfx_msg = &hclge_dbg_ssu_reg_2[0],
35 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
36 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
37 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
38 { .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
39 .dfx_msg = &hclge_dbg_igu_egu_reg[0],
40 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
41 .offset = HCLGE_DBG_DFX_IGU_OFFSET,
42 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
43 { .cmd = HNAE3_DBG_CMD_REG_RPU,
44 .dfx_msg = &hclge_dbg_rpu_reg_0[0],
45 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
46 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
47 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
48 { .cmd = HNAE3_DBG_CMD_REG_RPU,
49 .dfx_msg = &hclge_dbg_rpu_reg_1[0],
50 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
51 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
52 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
53 { .cmd = HNAE3_DBG_CMD_REG_NCSI,
54 .dfx_msg = &hclge_dbg_ncsi_reg[0],
55 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
56 .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
57 .cmd = HCLGE_OPC_DFX_NCSI_REG } },
58 { .cmd = HNAE3_DBG_CMD_REG_RTC,
59 .dfx_msg = &hclge_dbg_rtc_reg[0],
60 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
61 .offset = HCLGE_DBG_DFX_RTC_OFFSET,
62 .cmd = HCLGE_OPC_DFX_RTC_REG } },
63 { .cmd = HNAE3_DBG_CMD_REG_PPP,
64 .dfx_msg = &hclge_dbg_ppp_reg[0],
65 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
66 .offset = HCLGE_DBG_DFX_PPP_OFFSET,
67 .cmd = HCLGE_OPC_DFX_PPP_REG } },
68 { .cmd = HNAE3_DBG_CMD_REG_RCB,
69 .dfx_msg = &hclge_dbg_rcb_reg[0],
70 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
71 .offset = HCLGE_DBG_DFX_RCB_OFFSET,
72 .cmd = HCLGE_OPC_DFX_RCB_REG } },
73 { .cmd = HNAE3_DBG_CMD_REG_TQP,
74 .dfx_msg = &hclge_dbg_tqp_reg[0],
75 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
76 .offset = HCLGE_DBG_DFX_TQP_OFFSET,
77 .cmd = HCLGE_OPC_DFX_TQP_REG } },
80 static void hclge_dbg_fill_content(char *content, u16 len,
81 const struct hclge_dbg_item *items,
82 const char **result, u16 size)
87 memset(content, ' ', len);
88 for (i = 0; i < size; i++) {
90 strncpy(pos, result[i], strlen(result[i]));
92 strncpy(pos, items[i].name, strlen(items[i].name));
93 pos += strlen(items[i].name) + items[i].interval;
99 static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
102 sprintf(buf, "vf%u", id - 1);
109 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
112 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
113 int entries_per_desc;
117 ret = hclge_query_bd_num_cmd_send(hdev, desc);
119 dev_err(&hdev->pdev->dev,
120 "failed to get dfx bd_num, offset = %d, ret = %d\n",
125 entries_per_desc = ARRAY_SIZE(desc[0].data);
126 index = offset % entries_per_desc;
128 *bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
130 dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
137 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
138 struct hclge_desc *desc_src,
139 int index, int bd_num,
140 enum hclge_opcode_type cmd)
142 struct hclge_desc *desc = desc_src;
145 hclge_cmd_setup_basic_desc(desc, cmd, true);
146 desc->data[0] = cpu_to_le32(index);
148 for (i = 1; i < bd_num; i++) {
149 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
151 hclge_cmd_setup_basic_desc(desc, cmd, true);
154 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
156 dev_err(&hdev->pdev->dev,
157 "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
162 hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
163 const struct hclge_dbg_reg_type_info *reg_info,
164 char *buf, int len, int *pos)
166 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
167 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
168 struct hclge_desc *desc_src;
169 u32 index, entry, i, cnt;
170 int bd_num, min_num, ret;
171 struct hclge_desc *desc;
173 ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
177 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
181 min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
183 for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
184 *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
185 cnt++, dfx_message->message);
187 for (i = 0; i < cnt; i++)
188 *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
190 *pos += scnprintf(buf + *pos, len - *pos, "\n");
192 for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
193 dfx_message = reg_info->dfx_msg;
195 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
200 for (i = 0; i < min_num; i++, dfx_message++) {
201 entry = i % HCLGE_DESC_DATA_LEN;
205 *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
206 le32_to_cpu(desc->data[entry]));
208 *pos += scnprintf(buf + *pos, len - *pos, "\n");
216 hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
217 const struct hclge_dbg_reg_type_info *reg_info,
218 char *buf, int len, int *pos)
220 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
221 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
222 struct hclge_desc *desc_src;
223 int bd_num, min_num, ret;
224 struct hclge_desc *desc;
227 ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
231 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
237 ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
243 min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
245 for (i = 0; i < min_num; i++, dfx_message++) {
246 entry = i % HCLGE_DESC_DATA_LEN;
249 if (!dfx_message->flag)
252 *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
253 dfx_message->message,
254 le32_to_cpu(desc->data[entry]));
261 static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
264 struct hclge_config_mac_mode_cmd *req;
265 struct hclge_desc desc;
269 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
273 dev_err(&hdev->pdev->dev,
274 "failed to dump mac enable status, ret = %d\n", ret);
278 req = (struct hclge_config_mac_mode_cmd *)desc.data;
279 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
281 *pos += scnprintf(buf + *pos, len - *pos, "mac_trans_en: %#x\n",
282 hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
283 *pos += scnprintf(buf + *pos, len - *pos, "mac_rcv_en: %#x\n",
284 hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
285 *pos += scnprintf(buf + *pos, len - *pos, "pad_trans_en: %#x\n",
286 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
287 *pos += scnprintf(buf + *pos, len - *pos, "pad_rcv_en: %#x\n",
288 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
289 *pos += scnprintf(buf + *pos, len - *pos, "1588_trans_en: %#x\n",
290 hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
291 *pos += scnprintf(buf + *pos, len - *pos, "1588_rcv_en: %#x\n",
292 hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
293 *pos += scnprintf(buf + *pos, len - *pos, "mac_app_loop_en: %#x\n",
294 hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
295 *pos += scnprintf(buf + *pos, len - *pos, "mac_line_loop_en: %#x\n",
296 hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
297 *pos += scnprintf(buf + *pos, len - *pos, "mac_fcs_tx_en: %#x\n",
298 hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
299 *pos += scnprintf(buf + *pos, len - *pos,
300 "mac_rx_oversize_truncate_en: %#x\n",
301 hnae3_get_bit(loop_en,
302 HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
303 *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_strip_en: %#x\n",
304 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
305 *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_en: %#x\n",
306 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
307 *pos += scnprintf(buf + *pos, len - *pos,
308 "mac_tx_under_min_err_en: %#x\n",
309 hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
310 *pos += scnprintf(buf + *pos, len - *pos,
311 "mac_tx_oversize_truncate_en: %#x\n",
312 hnae3_get_bit(loop_en,
313 HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
318 static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
321 struct hclge_config_max_frm_size_cmd *req;
322 struct hclge_desc desc;
325 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
327 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
329 dev_err(&hdev->pdev->dev,
330 "failed to dump mac frame size, ret = %d\n", ret);
334 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
336 *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
337 le16_to_cpu(req->max_frm_size));
338 *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
344 static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
347 #define HCLGE_MAC_SPEED_SHIFT 0
348 #define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
349 #define HCLGE_MAC_DUPLEX_SHIFT 7
351 struct hclge_config_mac_speed_dup_cmd *req;
352 struct hclge_desc desc;
355 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
357 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
359 dev_err(&hdev->pdev->dev,
360 "failed to dump mac speed duplex, ret = %d\n", ret);
364 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
366 *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
367 hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
368 HCLGE_MAC_SPEED_SHIFT));
369 *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
370 hnae3_get_bit(req->speed_dup,
371 HCLGE_MAC_DUPLEX_SHIFT));
375 static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
380 ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
384 ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
388 return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
391 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
394 struct hclge_dbg_bitmap_cmd *bitmap;
395 struct hclge_desc desc;
396 u16 qset_id, qset_num;
399 ret = hclge_tm_get_qset_num(hdev, &qset_num);
403 *pos += scnprintf(buf + *pos, len - *pos,
404 "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
405 for (qset_id = 0; qset_id < qset_num; qset_id++) {
406 ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
407 HCLGE_OPC_QSET_DFX_STS);
411 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
413 *pos += scnprintf(buf + *pos, len - *pos,
414 "%04u %#x %#x %#x %#x\n",
415 qset_id, bitmap->bit0, bitmap->bit1,
416 bitmap->bit2, bitmap->bit3);
422 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
425 struct hclge_dbg_bitmap_cmd *bitmap;
426 struct hclge_desc desc;
430 ret = hclge_tm_get_pri_num(hdev, &pri_num);
434 *pos += scnprintf(buf + *pos, len - *pos,
435 "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
436 for (pri_id = 0; pri_id < pri_num; pri_id++) {
437 ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
438 HCLGE_OPC_PRI_DFX_STS);
442 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
444 *pos += scnprintf(buf + *pos, len - *pos,
445 "%03u %#x %#x %#x\n",
446 pri_id, bitmap->bit0, bitmap->bit1,
453 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
456 struct hclge_dbg_bitmap_cmd *bitmap;
457 struct hclge_desc desc;
461 *pos += scnprintf(buf + *pos, len - *pos,
462 "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
463 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
464 ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
465 HCLGE_OPC_PG_DFX_STS);
469 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
471 *pos += scnprintf(buf + *pos, len - *pos,
472 "%03u %#x %#x %#x\n",
473 pg_id, bitmap->bit0, bitmap->bit1,
480 static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
483 struct hclge_desc desc;
487 *pos += scnprintf(buf + *pos, len - *pos,
488 "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
489 for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
490 ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
491 HCLGE_OPC_SCH_NQ_CNT);
495 *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
496 nq_id, le32_to_cpu(desc.data[1]));
498 ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
499 HCLGE_OPC_SCH_RQ_CNT);
503 *pos += scnprintf(buf + *pos, len - *pos,
505 le32_to_cpu(desc.data[1]));
511 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
514 struct hclge_dbg_bitmap_cmd *bitmap;
515 struct hclge_desc desc;
519 ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
520 HCLGE_OPC_PORT_DFX_STS);
524 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
526 *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
528 *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
534 static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
537 struct hclge_desc desc[2];
541 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
542 HCLGE_OPC_TM_INTERNAL_CNT);
546 *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
547 le32_to_cpu(desc[0].data[1]));
548 *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
549 le32_to_cpu(desc[0].data[2]));
551 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
552 HCLGE_OPC_TM_INTERNAL_STS);
556 *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
557 le32_to_cpu(desc[0].data[1]));
558 *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
559 le32_to_cpu(desc[0].data[2]));
560 *pos += scnprintf(buf + *pos, len - *pos,
561 "sch_roce_fifo_afull_gap: %#x\n",
562 le32_to_cpu(desc[0].data[3]));
563 *pos += scnprintf(buf + *pos, len - *pos,
564 "tx_private_waterline: %#x\n",
565 le32_to_cpu(desc[0].data[4]));
566 *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
567 le32_to_cpu(desc[0].data[5]));
568 *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
569 le32_to_cpu(desc[1].data[0]));
570 *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
571 le32_to_cpu(desc[1].data[1]));
573 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
576 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
577 HCLGE_OPC_TM_INTERNAL_STS_1);
581 *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
582 le32_to_cpu(desc[0].data[1]));
583 *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
584 le32_to_cpu(desc[0].data[2]));
585 *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
586 le32_to_cpu(desc[0].data[3]));
587 *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
588 le32_to_cpu(desc[0].data[4]));
589 *pos += scnprintf(buf + *pos, len - *pos,
590 "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
591 le32_to_cpu(desc[0].data[5]));
596 static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
601 ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
605 ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
609 ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
613 ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
617 ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
621 return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
624 static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
625 enum hnae3_dbg_cmd cmd, char *buf, int len)
627 const struct hclge_dbg_reg_type_info *reg_info;
628 int pos = 0, ret = 0;
631 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
632 reg_info = &hclge_dbg_reg_info[i];
633 if (cmd == reg_info->cmd) {
634 if (cmd == HNAE3_DBG_CMD_REG_TQP)
635 return hclge_dbg_dump_reg_tqp(hdev, reg_info,
638 ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
648 static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
650 struct hclge_ets_tc_weight_cmd *ets_weight;
651 struct hclge_desc desc;
657 if (!hnae3_dev_dcb_supported(hdev)) {
658 dev_err(&hdev->pdev->dev,
659 "Only DCB-supported dev supports tc\n");
663 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
664 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
666 dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
671 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
673 pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
674 hdev->tm_info.num_tc);
675 pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
676 ets_weight->weight_offset);
678 pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
679 for (i = 0; i < HNAE3_MAX_TC; i++) {
680 sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
681 pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
683 hdev->tm_info.pg_info[0].tc_dwrr[i]);
689 static const struct hclge_dbg_item tm_pg_items[] = {
700 { "C_RATE(Mbps)", 2 },
707 { "P_RATE(Mbps)", 0 }
710 static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
711 char **result, u8 *index)
713 sprintf(result[(*index)++], "%3u", para->ir_b);
714 sprintf(result[(*index)++], "%3u", para->ir_u);
715 sprintf(result[(*index)++], "%3u", para->ir_s);
716 sprintf(result[(*index)++], "%3u", para->bs_b);
717 sprintf(result[(*index)++], "%3u", para->bs_s);
718 sprintf(result[(*index)++], "%3u", para->flag);
719 sprintf(result[(*index)++], "%6u", para->rate);
722 static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
724 char data_str[ARRAY_SIZE(tm_pg_items)][HCLGE_DBG_DATA_STR_LEN];
725 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
726 char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
727 u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
728 char content[HCLGE_DBG_TM_INFO_LEN];
732 for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++)
733 result[i] = &data_str[i][0];
735 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
736 NULL, ARRAY_SIZE(tm_pg_items));
737 pos += scnprintf(buf + pos, len - pos, "%s", content);
739 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
740 ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
744 ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
748 ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
752 ret = hclge_tm_get_pg_shaper(hdev, pg_id,
753 HCLGE_OPC_TM_PG_C_SHAPPING,
758 ret = hclge_tm_get_pg_shaper(hdev, pg_id,
759 HCLGE_OPC_TM_PG_P_SHAPPING,
764 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
768 sprintf(result[j++], "%02u", pg_id);
769 sprintf(result[j++], "0x%02x", pri_bit_map);
770 sprintf(result[j++], "%4s", sch_mode_str);
771 sprintf(result[j++], "%3u", weight);
772 hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
773 hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
775 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
776 (const char **)result,
777 ARRAY_SIZE(tm_pg_items));
778 pos += scnprintf(buf + pos, len - pos, "%s", content);
784 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
786 struct hclge_tm_shaper_para shaper_para;
790 ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
794 pos += scnprintf(buf + pos, len - pos,
795 "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
796 pos += scnprintf(buf + pos, len - pos,
797 "%3u %3u %3u %3u %3u %1u %6u\n",
798 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
799 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
805 static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
808 u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
809 struct hclge_bp_to_qs_map_cmd *map;
810 struct hclge_desc desc;
817 grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
818 HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
819 map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
820 for (group_id = 0; group_id < grp_num; group_id++) {
821 hclge_cmd_setup_basic_desc(&desc,
822 HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
825 map->qs_group_id = group_id;
826 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
828 dev_err(&hdev->pdev->dev,
829 "failed to get bp to qset map, ret = %d\n",
834 qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
837 pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
838 for (group_id = 0; group_id < grp_num / 8; group_id++) {
839 pos += scnprintf(buf + pos, len - pos,
840 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
841 group_id * 256, qset_mapping[i + 7],
842 qset_mapping[i + 6], qset_mapping[i + 5],
843 qset_mapping[i + 4], qset_mapping[i + 3],
844 qset_mapping[i + 2], qset_mapping[i + 1],
852 static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
862 for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
863 ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
867 ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
872 ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
876 pos += scnprintf(buf + pos, len - pos,
877 "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
878 pos += scnprintf(buf + pos, len - pos,
879 "%04u %4u %3u %2u\n",
880 queue_id, qset_id, pri_id, tc_id);
882 if (!hnae3_dev_dcb_supported(hdev))
885 ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
891 pos += scnprintf(buf + pos, len - pos, "\n");
897 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
899 struct hclge_tm_nodes_cmd *nodes;
900 struct hclge_desc desc;
904 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
905 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
907 dev_err(&hdev->pdev->dev,
908 "failed to dump tm nodes, ret = %d\n", ret);
912 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
914 pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
915 pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
916 nodes->pg_base_id, nodes->pg_num);
917 pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
918 nodes->pri_base_id, nodes->pri_num);
919 pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
920 le16_to_cpu(nodes->qset_base_id),
921 le16_to_cpu(nodes->qset_num));
922 pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
923 le16_to_cpu(nodes->queue_base_id),
924 le16_to_cpu(nodes->queue_num));
929 static const struct hclge_dbg_item tm_pri_items[] = {
939 { "C_RATE(Mbps)", 2 },
946 { "P_RATE(Mbps)", 0 }
949 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
951 char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
952 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
953 char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
954 char content[HCLGE_DBG_TM_INFO_LEN];
955 u8 pri_num, sch_mode, weight, i, j;
958 ret = hclge_tm_get_pri_num(hdev, &pri_num);
962 for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
963 result[i] = &data_str[i][0];
965 hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
966 NULL, ARRAY_SIZE(tm_pri_items));
967 pos = scnprintf(buf, len, "%s", content);
969 for (i = 0; i < pri_num; i++) {
970 ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
974 ret = hclge_tm_get_pri_weight(hdev, i, &weight);
978 ret = hclge_tm_get_pri_shaper(hdev, i,
979 HCLGE_OPC_TM_PRI_C_SHAPPING,
984 ret = hclge_tm_get_pri_shaper(hdev, i,
985 HCLGE_OPC_TM_PRI_P_SHAPPING,
990 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
994 sprintf(result[j++], "%04u", i);
995 sprintf(result[j++], "%4s", sch_mode_str);
996 sprintf(result[j++], "%3u", weight);
997 hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
998 hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
999 hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
1000 (const char **)result,
1001 ARRAY_SIZE(tm_pri_items));
1002 pos += scnprintf(buf + pos, len - pos, "%s", content);
1008 static const struct hclge_dbg_item tm_qset_items[] = {
1023 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
1025 char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
1026 char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
1027 u8 priority, link_vld, sch_mode, weight;
1028 struct hclge_tm_shaper_para shaper_para;
1029 char content[HCLGE_DBG_TM_INFO_LEN];
1034 ret = hclge_tm_get_qset_num(hdev, &qset_num);
1038 for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
1039 result[i] = &data_str[i][0];
1041 hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1042 NULL, ARRAY_SIZE(tm_qset_items));
1043 pos = scnprintf(buf, len, "%s", content);
1045 for (i = 0; i < qset_num; i++) {
1046 ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
1050 ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
1054 ret = hclge_tm_get_qset_weight(hdev, i, &weight);
1058 ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
1062 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1066 sprintf(result[j++], "%04u", i);
1067 sprintf(result[j++], "%4u", priority);
1068 sprintf(result[j++], "%4u", link_vld);
1069 sprintf(result[j++], "%4s", sch_mode_str);
1070 sprintf(result[j++], "%3u", weight);
1071 hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
1073 hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1074 (const char **)result,
1075 ARRAY_SIZE(tm_qset_items));
1076 pos += scnprintf(buf + pos, len - pos, "%s", content);
1082 static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
1085 struct hclge_cfg_pause_param_cmd *pause_param;
1086 struct hclge_desc desc;
1090 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
1091 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1093 dev_err(&hdev->pdev->dev,
1094 "failed to dump qos pause, ret = %d\n", ret);
1098 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1100 pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
1101 pause_param->pause_trans_gap);
1102 pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
1103 le16_to_cpu(pause_param->pause_trans_time));
1107 static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
1110 #define HCLGE_DBG_TC_MASK 0x0F
1111 #define HCLGE_DBG_TC_BIT_WIDTH 4
1113 struct hclge_qos_pri_map_cmd *pri_map;
1114 struct hclge_desc desc;
1120 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
1121 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1123 dev_err(&hdev->pdev->dev,
1124 "failed to dump qos pri map, ret = %d\n", ret);
1128 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1130 pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
1132 pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
1134 pri_tc = (u8 *)pri_map;
1135 for (i = 0; i < HNAE3_MAX_TC; i++) {
1136 tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
1137 tc &= HCLGE_DBG_TC_MASK;
1138 pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
1144 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
1146 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1147 struct hclge_desc desc;
1151 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
1152 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1154 dev_err(&hdev->pdev->dev,
1155 "failed to dump tx buf, ret = %d\n", ret);
1159 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1160 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1161 pos += scnprintf(buf + pos, len - pos,
1162 "tx_packet_buf_tc_%d: 0x%x\n", i,
1163 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1168 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
1171 struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1172 struct hclge_desc desc;
1176 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1177 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1179 dev_err(&hdev->pdev->dev,
1180 "failed to dump rx priv buf, ret = %d\n", ret);
1184 pos += scnprintf(buf + pos, len - pos, "\n");
1186 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1187 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1188 pos += scnprintf(buf + pos, len - pos,
1189 "rx_packet_buf_tc_%d: 0x%x\n", i,
1190 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1192 pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
1193 le16_to_cpu(rx_buf_cmd->shared_buf));
1198 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
1201 struct hclge_rx_com_wl *rx_com_wl;
1202 struct hclge_desc desc;
1206 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1207 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1209 dev_err(&hdev->pdev->dev,
1210 "failed to dump rx common wl, ret = %d\n", ret);
1214 rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1215 pos += scnprintf(buf + pos, len - pos, "\n");
1216 pos += scnprintf(buf + pos, len - pos,
1217 "rx_com_wl: high: 0x%x, low: 0x%x\n",
1218 le16_to_cpu(rx_com_wl->com_wl.high),
1219 le16_to_cpu(rx_com_wl->com_wl.low));
1224 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
1227 struct hclge_rx_com_wl *rx_packet_cnt;
1228 struct hclge_desc desc;
1232 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1233 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1235 dev_err(&hdev->pdev->dev,
1236 "failed to dump rx global pkt cnt, ret = %d\n", ret);
1240 rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1241 pos += scnprintf(buf + pos, len - pos,
1242 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1243 le16_to_cpu(rx_packet_cnt->com_wl.high),
1244 le16_to_cpu(rx_packet_cnt->com_wl.low));
1249 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
1252 struct hclge_rx_priv_wl_buf *rx_priv_wl;
1253 struct hclge_desc desc[2];
1257 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1258 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1259 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1260 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1262 dev_err(&hdev->pdev->dev,
1263 "failed to dump rx priv wl buf, ret = %d\n", ret);
1267 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1268 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1269 pos += scnprintf(buf + pos, len - pos,
1270 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1271 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1272 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1274 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1275 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1276 pos += scnprintf(buf + pos, len - pos,
1277 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1278 i + HCLGE_TC_NUM_ONE_DESC,
1279 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1280 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1285 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
1288 struct hclge_rx_com_thrd *rx_com_thrd;
1289 struct hclge_desc desc[2];
1293 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1294 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1295 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1296 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1298 dev_err(&hdev->pdev->dev,
1299 "failed to dump rx common threshold, ret = %d\n", ret);
1303 pos += scnprintf(buf + pos, len - pos, "\n");
1304 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1305 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1306 pos += scnprintf(buf + pos, len - pos,
1307 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1308 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1309 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1311 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1312 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1313 pos += scnprintf(buf + pos, len - pos,
1314 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1315 i + HCLGE_TC_NUM_ONE_DESC,
1316 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1317 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1322 static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
1328 ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
1333 ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
1338 ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
1343 ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
1348 pos += scnprintf(buf + pos, len - pos, "\n");
1349 if (!hnae3_dev_dcb_supported(hdev))
1352 ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
1357 ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
1365 static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
1367 struct hclge_mac_ethertype_idx_rd_cmd *req0;
1368 struct hclge_desc desc;
1369 u32 msg_egress_port;
1373 pos += scnprintf(buf + pos, len - pos,
1374 "entry mac_addr mask ether ");
1375 pos += scnprintf(buf + pos, len - pos,
1376 "mask vlan mask i_map i_dir e_type ");
1377 pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
1379 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1380 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1382 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1383 req0->index = cpu_to_le16(i);
1385 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1387 dev_err(&hdev->pdev->dev,
1388 "failed to dump manage table, ret = %d\n", ret);
1392 if (!req0->resp_code)
1395 pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
1396 le16_to_cpu(req0->index), req0->mac_addr);
1398 pos += scnprintf(buf + pos, len - pos,
1400 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1401 le16_to_cpu(req0->ethter_type),
1402 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1403 le16_to_cpu(req0->vlan_tag) &
1404 HCLGE_DBG_MNG_VLAN_TAG);
1406 pos += scnprintf(buf + pos, len - pos,
1408 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1409 req0->i_port_bitmap, req0->i_port_direction);
1411 msg_egress_port = le16_to_cpu(req0->egress_port);
1412 pos += scnprintf(buf + pos, len - pos,
1413 "%x %x %02x %04x %x\n",
1414 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1415 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1416 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1417 le16_to_cpu(req0->egress_queue),
1418 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1424 #define HCLGE_DBG_TCAM_BUF_SIZE 256
1426 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1428 struct hclge_dbg_tcam_msg tcam_msg)
1430 struct hclge_fd_tcam_config_1_cmd *req1;
1431 struct hclge_fd_tcam_config_2_cmd *req2;
1432 struct hclge_fd_tcam_config_3_cmd *req3;
1433 struct hclge_desc desc[3];
1438 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1439 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1440 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1441 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1442 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1444 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1445 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1446 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1448 req1->stage = tcam_msg.stage;
1449 req1->xy_sel = sel_x ? 1 : 0;
1450 req1->index = cpu_to_le32(tcam_msg.loc);
1452 ret = hclge_cmd_send(&hdev->hw, desc, 3);
1456 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1457 "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
1460 /* tcam_data0 ~ tcam_data1 */
1461 req = (u32 *)req1->tcam_data;
1462 for (i = 0; i < 2; i++)
1463 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1466 /* tcam_data2 ~ tcam_data7 */
1467 req = (u32 *)req2->tcam_data;
1468 for (i = 0; i < 6; i++)
1469 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1472 /* tcam_data8 ~ tcam_data12 */
1473 req = (u32 *)req3->tcam_data;
1474 for (i = 0; i < 5; i++)
1475 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1481 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1483 struct hclge_fd_rule *rule;
1484 struct hlist_node *node;
1487 spin_lock_bh(&hdev->fd_rule_lock);
1488 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1489 rule_locs[cnt] = rule->location;
1492 spin_unlock_bh(&hdev->fd_rule_lock);
1494 if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
1500 static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
1502 u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
1503 struct hclge_dbg_tcam_msg tcam_msg;
1504 int i, ret, rule_cnt;
1509 if (!hnae3_dev_fd_supported(hdev)) {
1510 dev_err(&hdev->pdev->dev,
1511 "Only FD-supported dev supports dump fd tcam\n");
1515 if (!hdev->hclge_fd_rule_num || !rule_num)
1518 rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
1522 tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
1528 rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1531 dev_err(&hdev->pdev->dev,
1532 "failed to get rule number, ret = %d\n", ret);
1537 for (i = 0; i < rule_cnt; i++) {
1538 tcam_msg.stage = HCLGE_FD_STAGE_1;
1539 tcam_msg.loc = rule_locs[i];
1541 ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
1543 dev_err(&hdev->pdev->dev,
1544 "failed to get fd tcam key x, ret = %d\n", ret);
1548 pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1550 ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
1552 dev_err(&hdev->pdev->dev,
1553 "failed to get fd tcam key y, ret = %d\n", ret);
1557 pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1566 static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
1568 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
1569 struct hclge_fd_ad_cnt_read_cmd *req;
1570 char str_id[HCLGE_DBG_ID_LEN];
1571 struct hclge_desc desc;
1577 pos += scnprintf(buf + pos, len - pos,
1578 "func_id\thit_times\n");
1580 for (i = 0; i < func_num; i++) {
1581 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
1582 req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
1583 req->index = cpu_to_le16(i);
1584 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1586 dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
1590 cnt = le64_to_cpu(req->cnt);
1591 hclge_dbg_get_func_id_str(str_id, i);
1592 pos += scnprintf(buf + pos, len - pos,
1593 "%s\t%llu\n", str_id, cnt);
1599 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
1603 pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
1604 hdev->rst_stats.pf_rst_cnt);
1605 pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
1606 hdev->rst_stats.flr_rst_cnt);
1607 pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
1608 hdev->rst_stats.global_rst_cnt);
1609 pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
1610 hdev->rst_stats.imp_rst_cnt);
1611 pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
1612 hdev->rst_stats.reset_done_cnt);
1613 pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
1614 hdev->rst_stats.hw_reset_done_cnt);
1615 pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
1616 hdev->rst_stats.reset_cnt);
1617 pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
1618 hdev->rst_stats.reset_fail_cnt);
1619 pos += scnprintf(buf + pos, len - pos,
1620 "vector0 interrupt enable status: 0x%x\n",
1621 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
1622 pos += scnprintf(buf + pos, len - pos, "reset interrupt source: 0x%x\n",
1623 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
1624 pos += scnprintf(buf + pos, len - pos, "reset interrupt status: 0x%x\n",
1625 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
1626 pos += scnprintf(buf + pos, len - pos, "RAS interrupt status: 0x%x\n",
1627 hclge_read_dev(&hdev->hw,
1628 HCLGE_RAS_PF_OTHER_INT_STS_REG));
1629 pos += scnprintf(buf + pos, len - pos, "hardware reset status: 0x%x\n",
1630 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
1631 pos += scnprintf(buf + pos, len - pos, "handshake status: 0x%x\n",
1632 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
1633 pos += scnprintf(buf + pos, len - pos, "function reset status: 0x%x\n",
1634 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
1635 pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
1641 static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
1643 unsigned long rem_nsec;
1648 rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
1650 pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
1651 (unsigned long)lc, rem_nsec / 1000);
1652 pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
1653 jiffies_to_msecs(jiffies - hdev->last_serv_processed));
1654 pos += scnprintf(buf + pos, len - pos,
1655 "last_service_task_processed: %lu(jiffies)\n",
1656 hdev->last_serv_processed);
1657 pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
1658 hdev->serv_processed_cnt);
1663 static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
1667 pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
1669 pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
1670 hdev->num_roce_msi);
1671 pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
1672 hdev->num_msi_used);
1673 pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
1674 hdev->num_msi_left);
1679 static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
1680 char *buf, int len, u32 bd_num)
1682 #define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
1684 struct hclge_desc *desc_index = desc_src;
1689 pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1691 for (i = 0; i < bd_num; i++) {
1693 while (j < HCLGE_DESC_DATA_LEN - 1) {
1694 pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
1696 pos += scnprintf(buf + pos, len - pos, "0x%08x ",
1697 le32_to_cpu(desc_index->data[j++]));
1698 pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
1699 le32_to_cpu(desc_index->data[j++]));
1700 offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
1707 hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
1709 struct hclge_get_imp_bd_cmd *req;
1710 struct hclge_desc *desc_src;
1711 struct hclge_desc desc;
1715 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
1717 req = (struct hclge_get_imp_bd_cmd *)desc.data;
1718 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1720 dev_err(&hdev->pdev->dev,
1721 "failed to get imp statistics bd number, ret = %d\n",
1726 bd_num = le32_to_cpu(req->bd_num);
1728 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1732 ret = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
1733 HCLGE_OPC_IMP_STATS_INFO);
1736 dev_err(&hdev->pdev->dev,
1737 "failed to get imp statistics, ret = %d\n", ret);
1741 hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
1748 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
1749 #define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
1751 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
1752 char *buf, int *len, int *pos)
1754 #define HCLGE_CMD_DATA_NUM 6
1756 int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
1759 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1760 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1761 if (i == 0 && j == 0)
1764 *pos += scnprintf(buf + *pos, *len - *pos,
1765 "0x%04x | 0x%08x\n", offset,
1766 le32_to_cpu(desc[i].data[j]));
1768 offset += sizeof(u32);
1769 *index -= sizeof(u32);
1778 hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
1780 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
1782 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1783 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1784 int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
1789 pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1792 data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
1793 if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1794 data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1796 data0 |= (u32)index << 16;
1797 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1798 HCLGE_OPC_QUERY_NCL_CONFIG);
1802 hclge_ncl_config_data_print(desc, &index, buf, &len, &pos);
1808 static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
1810 struct phy_device *phydev = hdev->hw.mac.phydev;
1811 struct hclge_config_mac_mode_cmd *req_app;
1812 struct hclge_common_lb_cmd *req_common;
1813 struct hclge_desc desc;
1818 req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1819 req_common = (struct hclge_common_lb_cmd *)desc.data;
1821 pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
1822 hdev->hw.mac.mac_id);
1824 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1825 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1827 dev_err(&hdev->pdev->dev,
1828 "failed to dump app loopback status, ret = %d\n", ret);
1832 loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1833 HCLGE_MAC_APP_LP_B);
1834 pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
1835 state_str[loopback_en]);
1837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
1838 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1840 dev_err(&hdev->pdev->dev,
1841 "failed to dump common loopback status, ret = %d\n",
1846 loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1847 pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
1848 state_str[loopback_en]);
1850 loopback_en = req_common->enable &
1851 HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
1852 pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
1853 state_str[loopback_en]);
1856 loopback_en = phydev->loopback_enabled;
1857 pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1858 state_str[loopback_en]);
1859 } else if (hnae3_dev_phy_imp_supported(hdev)) {
1860 loopback_en = req_common->enable &
1861 HCLGE_CMD_GE_PHY_INNER_LOOP_B;
1862 pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1863 state_str[loopback_en]);
1869 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1870 * @hdev: pointer to struct hclge_dev
1873 hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
1875 struct hclge_mac_tnl_stats stats;
1876 unsigned long rem_nsec;
1879 pos += scnprintf(buf + pos, len - pos,
1880 "Recently generated mac tnl interruption:\n");
1882 while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1883 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1885 pos += scnprintf(buf + pos, len - pos,
1886 "[%07lu.%03lu] status = 0x%x\n",
1887 (unsigned long)stats.time, rem_nsec / 1000,
1895 static const struct hclge_dbg_item mac_list_items[] = {
1901 static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
1904 char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
1905 char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
1906 char *result[ARRAY_SIZE(mac_list_items)];
1907 struct hclge_mac_node *mac_node, *tmp;
1908 struct hclge_vport *vport;
1909 struct list_head *list;
1914 for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
1915 result[i] = &data_str[i][0];
1917 pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
1918 is_unicast ? "UC" : "MC");
1919 hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
1920 NULL, ARRAY_SIZE(mac_list_items));
1921 pos += scnprintf(buf + pos, len - pos, "%s", content);
1923 for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
1924 vport = &hdev->vport[func_id];
1925 list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
1926 spin_lock_bh(&vport->mac_list_lock);
1927 list_for_each_entry_safe(mac_node, tmp, list, node) {
1929 result[i++] = hclge_dbg_get_func_id_str(str_id,
1931 sprintf(result[i++], "%pM", mac_node->mac_addr);
1932 sprintf(result[i++], "%5s",
1933 hclge_mac_state_str[mac_node->state]);
1934 hclge_dbg_fill_content(content, sizeof(content),
1936 (const char **)result,
1937 ARRAY_SIZE(mac_list_items));
1938 pos += scnprintf(buf + pos, len - pos, "%s", content);
1940 spin_unlock_bh(&vport->mac_list_lock);
1944 static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
1946 u8 func_num = pci_num_vf(hdev->pdev) + 1;
1947 struct hclge_vport *vport;
1951 pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
1952 hdev->num_alloc_vport);
1953 pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
1954 hdev->max_umv_size);
1955 pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
1956 hdev->wanted_umv_size);
1957 pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
1958 hdev->priv_umv_size);
1960 mutex_lock(&hdev->vport_lock);
1961 pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
1962 hdev->share_umv_size);
1963 for (i = 0; i < func_num; i++) {
1964 vport = &hdev->vport[i];
1965 pos += scnprintf(buf + pos, len - pos,
1966 "vport(%u) used_umv_num : %u\n",
1967 i, vport->used_umv_num);
1969 mutex_unlock(&hdev->vport_lock);
1974 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
1975 struct hclge_dbg_vlan_cfg *vlan_cfg)
1977 struct hclge_vport_vtag_rx_cfg_cmd *req;
1978 struct hclge_desc desc;
1983 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
1985 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
1986 req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
1987 bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
1988 req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
1990 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1992 dev_err(&hdev->pdev->dev,
1993 "failed to get vport%u rxvlan cfg, ret = %d\n",
1998 rx_cfg = req->vport_vlan_cfg;
1999 vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
2000 vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
2001 vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
2002 vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
2003 vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
2004 vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
2009 static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2010 struct hclge_dbg_vlan_cfg *vlan_cfg)
2012 struct hclge_vport_vtag_tx_cfg_cmd *req;
2013 struct hclge_desc desc;
2018 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
2019 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
2020 req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2021 bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2022 req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2024 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2026 dev_err(&hdev->pdev->dev,
2027 "failed to get vport%u txvlan cfg, ret = %d\n",
2032 tx_cfg = req->vport_vlan_cfg;
2033 vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
2035 vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
2036 vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
2037 vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
2038 vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
2039 vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
2040 vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
2041 vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
2046 static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
2047 u8 vlan_type, u8 vf_id,
2048 struct hclge_desc *desc)
2050 struct hclge_vlan_filter_ctrl_cmd *req;
2053 hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
2054 req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
2055 req->vlan_type = vlan_type;
2058 ret = hclge_cmd_send(&hdev->hw, desc, 1);
2060 dev_err(&hdev->pdev->dev,
2061 "failed to get vport%u vlan filter config, ret = %d.\n",
2067 static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
2068 u8 vf_id, u8 *vlan_fe)
2070 struct hclge_vlan_filter_ctrl_cmd *req;
2071 struct hclge_desc desc;
2074 ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
2078 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
2079 *vlan_fe = req->vlan_fe;
2084 static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
2085 u8 vf_id, u8 *bypass_en)
2087 struct hclge_port_vlan_filter_bypass_cmd *req;
2088 struct hclge_desc desc;
2091 if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
2094 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
2095 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
2098 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2100 dev_err(&hdev->pdev->dev,
2101 "failed to get vport%u port vlan filter bypass state, ret = %d.\n",
2106 *bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
2111 static const struct hclge_dbg_item vlan_filter_items[] = {
2113 { "I_VF_VLAN_FILTER", 2 },
2114 { "E_VF_VLAN_FILTER", 2 },
2115 { "PORT_VLAN_FILTER_BYPASS", 0 }
2118 static const struct hclge_dbg_item vlan_offload_items[] = {
2121 { "ACCEPT_TAG1", 2 },
2122 { "ACCEPT_TAG2", 2 },
2123 { "ACCEPT_UNTAG1", 2 },
2124 { "ACCEPT_UNTAG2", 2 },
2125 { "INSERT_TAG1", 2 },
2126 { "INSERT_TAG2", 2 },
2128 { "STRIP_TAG1", 2 },
2129 { "STRIP_TAG2", 2 },
2132 { "PRI_ONLY_TAG1", 2 },
2133 { "PRI_ONLY_TAG2", 0 }
2136 static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
2139 char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2140 const char *result[ARRAY_SIZE(vlan_filter_items)];
2141 u8 i, j, vlan_fe, bypass, ingress, egress;
2142 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2145 ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
2149 ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2150 egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2152 *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
2153 state_str[ingress]);
2154 *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
2157 hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
2158 NULL, ARRAY_SIZE(vlan_filter_items));
2159 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2161 for (i = 0; i < func_num; i++) {
2162 ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
2167 ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2168 egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2169 ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
2173 result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2174 result[j++] = state_str[ingress];
2175 result[j++] = state_str[egress];
2177 test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
2178 hdev->ae_dev->caps) ? state_str[bypass] : "NA";
2179 hclge_dbg_fill_content(content, sizeof(content),
2180 vlan_filter_items, result,
2181 ARRAY_SIZE(vlan_filter_items));
2182 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2184 *pos += scnprintf(buf + *pos, len - *pos, "\n");
2189 static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
2192 char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
2193 const char *result[ARRAY_SIZE(vlan_offload_items)];
2194 char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
2195 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2196 struct hclge_dbg_vlan_cfg vlan_cfg;
2200 hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
2201 NULL, ARRAY_SIZE(vlan_offload_items));
2202 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2204 for (i = 0; i < func_num; i++) {
2205 ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
2209 ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
2213 sprintf(str_pvid, "%u", vlan_cfg.pvid);
2215 result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2216 result[j++] = str_pvid;
2217 result[j++] = state_str[vlan_cfg.accept_tag1];
2218 result[j++] = state_str[vlan_cfg.accept_tag2];
2219 result[j++] = state_str[vlan_cfg.accept_untag1];
2220 result[j++] = state_str[vlan_cfg.accept_untag2];
2221 result[j++] = state_str[vlan_cfg.insert_tag1];
2222 result[j++] = state_str[vlan_cfg.insert_tag2];
2223 result[j++] = state_str[vlan_cfg.shift_tag];
2224 result[j++] = state_str[vlan_cfg.strip_tag1];
2225 result[j++] = state_str[vlan_cfg.strip_tag2];
2226 result[j++] = state_str[vlan_cfg.drop_tag1];
2227 result[j++] = state_str[vlan_cfg.drop_tag2];
2228 result[j++] = state_str[vlan_cfg.pri_only1];
2229 result[j++] = state_str[vlan_cfg.pri_only2];
2231 hclge_dbg_fill_content(content, sizeof(content),
2232 vlan_offload_items, result,
2233 ARRAY_SIZE(vlan_offload_items));
2234 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2240 static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
2246 ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
2250 return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
2253 static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
2255 struct hclge_ptp *ptp = hdev->ptp;
2256 u32 sw_cfg = ptp->ptp_cfg;
2257 unsigned int tx_start;
2258 unsigned int last_rx;
2263 pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
2265 pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
2266 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
2268 pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
2269 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
2271 pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
2272 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
2275 last_rx = jiffies_to_msecs(ptp->last_rx);
2276 pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
2277 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
2278 pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
2280 tx_start = jiffies_to_msecs(ptp->tx_start);
2281 pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
2282 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
2283 pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
2284 pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
2286 pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
2288 pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
2289 ptp->last_tx_seqid);
2291 ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
2295 pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
2298 pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
2299 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
2304 static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
2306 hclge_dbg_dump_mac_list(hdev, buf, len, true);
2311 static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
2313 hclge_dbg_dump_mac_list(hdev, buf, len, false);
2318 static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
2320 .cmd = HNAE3_DBG_CMD_TM_NODES,
2321 .dbg_dump = hclge_dbg_dump_tm_nodes,
2324 .cmd = HNAE3_DBG_CMD_TM_PRI,
2325 .dbg_dump = hclge_dbg_dump_tm_pri,
2328 .cmd = HNAE3_DBG_CMD_TM_QSET,
2329 .dbg_dump = hclge_dbg_dump_tm_qset,
2332 .cmd = HNAE3_DBG_CMD_TM_MAP,
2333 .dbg_dump = hclge_dbg_dump_tm_map,
2336 .cmd = HNAE3_DBG_CMD_TM_PG,
2337 .dbg_dump = hclge_dbg_dump_tm_pg,
2340 .cmd = HNAE3_DBG_CMD_TM_PORT,
2341 .dbg_dump = hclge_dbg_dump_tm_port,
2344 .cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
2345 .dbg_dump = hclge_dbg_dump_tc,
2348 .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
2349 .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
2352 .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
2353 .dbg_dump = hclge_dbg_dump_qos_pri_map,
2356 .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
2357 .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
2360 .cmd = HNAE3_DBG_CMD_MAC_UC,
2361 .dbg_dump = hclge_dbg_dump_mac_uc,
2364 .cmd = HNAE3_DBG_CMD_MAC_MC,
2365 .dbg_dump = hclge_dbg_dump_mac_mc,
2368 .cmd = HNAE3_DBG_CMD_MNG_TBL,
2369 .dbg_dump = hclge_dbg_dump_mng_table,
2372 .cmd = HNAE3_DBG_CMD_LOOPBACK,
2373 .dbg_dump = hclge_dbg_dump_loopback,
2376 .cmd = HNAE3_DBG_CMD_PTP_INFO,
2377 .dbg_dump = hclge_dbg_dump_ptp_info,
2380 .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
2381 .dbg_dump = hclge_dbg_dump_interrupt,
2384 .cmd = HNAE3_DBG_CMD_RESET_INFO,
2385 .dbg_dump = hclge_dbg_dump_rst_info,
2388 .cmd = HNAE3_DBG_CMD_IMP_INFO,
2389 .dbg_dump = hclge_dbg_get_imp_stats_info,
2392 .cmd = HNAE3_DBG_CMD_NCL_CONFIG,
2393 .dbg_dump = hclge_dbg_dump_ncl_config,
2396 .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
2397 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2400 .cmd = HNAE3_DBG_CMD_REG_SSU,
2401 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2404 .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
2405 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2408 .cmd = HNAE3_DBG_CMD_REG_RPU,
2409 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2412 .cmd = HNAE3_DBG_CMD_REG_NCSI,
2413 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2416 .cmd = HNAE3_DBG_CMD_REG_RTC,
2417 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2420 .cmd = HNAE3_DBG_CMD_REG_PPP,
2421 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2424 .cmd = HNAE3_DBG_CMD_REG_RCB,
2425 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2428 .cmd = HNAE3_DBG_CMD_REG_TQP,
2429 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2432 .cmd = HNAE3_DBG_CMD_REG_MAC,
2433 .dbg_dump = hclge_dbg_dump_mac,
2436 .cmd = HNAE3_DBG_CMD_REG_DCB,
2437 .dbg_dump = hclge_dbg_dump_dcb,
2440 .cmd = HNAE3_DBG_CMD_FD_TCAM,
2441 .dbg_dump = hclge_dbg_dump_fd_tcam,
2444 .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
2445 .dbg_dump = hclge_dbg_dump_mac_tnl_status,
2448 .cmd = HNAE3_DBG_CMD_SERV_INFO,
2449 .dbg_dump = hclge_dbg_dump_serv_info,
2452 .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
2453 .dbg_dump = hclge_dbg_dump_vlan_config,
2456 .cmd = HNAE3_DBG_CMD_FD_COUNTER,
2457 .dbg_dump = hclge_dbg_dump_fd_counter,
2460 .cmd = HNAE3_DBG_CMD_UMV_INFO,
2461 .dbg_dump = hclge_dbg_dump_umv_info,
2465 int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2468 struct hclge_vport *vport = hclge_get_vport(handle);
2469 const struct hclge_dbg_func *cmd_func;
2470 struct hclge_dev *hdev = vport->back;
2473 for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2474 if (cmd == hclge_dbg_cmd_func[i].cmd) {
2475 cmd_func = &hclge_dbg_cmd_func[i];
2476 if (cmd_func->dbg_dump)
2477 return cmd_func->dbg_dump(hdev, buf, len);
2479 return cmd_func->dbg_dump_reg(hdev, cmd, buf,
2484 dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);