1 // SPDX-License-Identifier: GPL-2.0-only
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015 QLogic Corporation
6 #include <linux/module.h>
7 #include <linux/vmalloc.h>
8 #include <linux/crc32.h>
14 #include "qed_reg_addr.h"
16 /* Memory groups enum */
34 MEM_GROUP_CONN_CFC_MEM,
37 MEM_GROUP_CAU_MEM_EXT,
47 MEM_GROUP_TASK_CFC_MEM,
51 /* Memory groups names */
52 static const char * const s_mem_group_names[] = {
85 /* Idle check conditions */
87 static u32 cond5(const u32 *r, const u32 *imm)
89 return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
92 static u32 cond7(const u32 *r, const u32 *imm)
94 return ((r[0] >> imm[0]) & imm[1]) != imm[2];
97 static u32 cond6(const u32 *r, const u32 *imm)
99 return (r[0] & imm[0]) != imm[1];
102 static u32 cond9(const u32 *r, const u32 *imm)
104 return ((r[0] & imm[0]) >> imm[1]) !=
105 (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
108 static u32 cond10(const u32 *r, const u32 *imm)
110 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
113 static u32 cond4(const u32 *r, const u32 *imm)
115 return (r[0] & ~imm[0]) != imm[1];
118 static u32 cond0(const u32 *r, const u32 *imm)
120 return (r[0] & ~r[1]) != imm[0];
123 static u32 cond1(const u32 *r, const u32 *imm)
125 return r[0] != imm[0];
128 static u32 cond11(const u32 *r, const u32 *imm)
130 return r[0] != r[1] && r[2] == imm[0];
133 static u32 cond12(const u32 *r, const u32 *imm)
135 return r[0] != r[1] && r[2] > imm[0];
138 static u32 cond3(const u32 *r, const u32 *imm)
143 static u32 cond13(const u32 *r, const u32 *imm)
145 return r[0] & imm[0];
148 static u32 cond8(const u32 *r, const u32 *imm)
150 return r[0] < (r[1] - imm[0]);
153 static u32 cond2(const u32 *r, const u32 *imm)
155 return r[0] > imm[0];
158 /* Array of Idle Check conditions */
159 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
176 #define NUM_PHYS_BLOCKS 84
178 #define NUM_DBG_RESET_REGS 8
180 /******************************* Data Types **********************************/
191 /* CM context types */
200 /* Debug bus frame modes */
201 enum dbg_bus_frame_modes {
202 DBG_BUS_FRAME_MODE_4ST = 0, /* 4 Storm dwords (no HW) */
203 DBG_BUS_FRAME_MODE_2ST_2HW = 1, /* 2 Storm dwords, 2 HW dwords */
204 DBG_BUS_FRAME_MODE_1ST_3HW = 2, /* 1 Storm dwords, 3 HW dwords */
205 DBG_BUS_FRAME_MODE_4HW = 3, /* 4 HW dwords (no Storms) */
206 DBG_BUS_FRAME_MODE_8HW = 4, /* 8 HW dwords (no Storms) */
207 DBG_BUS_NUM_FRAME_MODES
210 /* Chip constant definitions */
216 /* HW type constant definitions */
217 struct hw_type_defs {
224 /* RBC reset definitions */
225 struct rbc_reset_defs {
227 u32 reset_val[MAX_CHIP_IDS];
230 /* Storm constant definitions.
231 * Addresses are in bytes, sizes are in quad-regs.
235 enum block_id sem_block_id;
236 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
238 u32 sem_fast_mem_addr;
239 u32 sem_frame_mode_addr;
240 u32 sem_slow_enable_addr;
241 u32 sem_slow_mode_addr;
242 u32 sem_slow_mode1_conf_addr;
243 u32 sem_sync_dbg_empty_addr;
244 u32 sem_gpre_vect_addr;
246 u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
247 u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
250 /* Debug Bus Constraint operation constant definitions */
251 struct dbg_bus_constraint_op_defs {
256 /* Storm Mode definitions */
257 struct storm_mode_defs {
261 u32 src_disable_reg_addr;
263 bool exists[MAX_CHIP_IDS];
266 struct grc_param_defs {
267 u32 default_val[MAX_CHIP_IDS];
272 u32 exclude_all_preset_val;
273 u32 crash_preset_val[MAX_CHIP_IDS];
276 /* Address is in 128b units. Width is in bits. */
277 struct rss_mem_defs {
278 const char *mem_name;
279 const char *type_name;
282 u32 num_entries[MAX_CHIP_IDS];
285 struct vfc_ram_defs {
286 const char *mem_name;
287 const char *type_name;
292 struct big_ram_defs {
293 const char *instance_name;
294 enum mem_groups mem_group_id;
295 enum mem_groups ram_mem_group_id;
296 enum dbg_grc_params grc_param;
299 u32 is_256b_reg_addr;
300 u32 is_256b_bit_offset[MAX_CHIP_IDS];
301 u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
305 const char *phy_name;
307 /* PHY base GRC address */
310 /* Relative address of indirect TBUS address register (bits 0..7) */
311 u32 tbus_addr_lo_addr;
313 /* Relative address of indirect TBUS address register (bits 8..10) */
314 u32 tbus_addr_hi_addr;
316 /* Relative address of indirect TBUS data register (bits 0..7) */
317 u32 tbus_data_lo_addr;
319 /* Relative address of indirect TBUS data register (bits 8..11) */
320 u32 tbus_data_hi_addr;
323 /* Split type definitions */
324 struct split_type_defs {
328 /******************************** Constants **********************************/
330 #define BYTES_IN_DWORD sizeof(u32)
331 /* In the macros below, size and offset are specified in bits */
332 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
333 #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
334 #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
335 #define FIELD_DWORD_OFFSET(type, field) \
336 (int)(FIELD_BIT_OFFSET(type, field) / 32)
337 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
338 #define FIELD_BIT_MASK(type, field) \
339 (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
340 FIELD_DWORD_SHIFT(type, field))
342 #define SET_VAR_FIELD(var, type, field, val) \
344 var[FIELD_DWORD_OFFSET(type, field)] &= \
345 (~FIELD_BIT_MASK(type, field)); \
346 var[FIELD_DWORD_OFFSET(type, field)] |= \
347 (val) << FIELD_DWORD_SHIFT(type, field); \
350 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
352 for (i = 0; i < (arr_size); i++) \
353 qed_wr(dev, ptt, addr, (arr)[i]); \
356 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
357 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
359 /* extra lines include a signature line + optional latency events line */
360 #define NUM_EXTRA_DBG_LINES(block) \
361 (GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
362 #define NUM_DBG_LINES(block) \
363 ((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
365 #define USE_DMAE true
366 #define PROTECT_WIDE_BUS true
368 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
369 #define RAM_LINES_TO_BYTES(lines) \
370 DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
372 #define REG_DUMP_LEN_SHIFT 24
373 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
374 BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
376 #define IDLE_CHK_RULE_SIZE_DWORDS \
377 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
379 #define IDLE_CHK_RESULT_HDR_DWORDS \
380 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
382 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
383 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
385 #define PAGE_MEM_DESC_SIZE_DWORDS \
386 BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
388 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
390 /* The sizes and offsets below are specified in bits */
391 #define VFC_CAM_CMD_STRUCT_SIZE 64
392 #define VFC_CAM_CMD_ROW_OFFSET 48
393 #define VFC_CAM_CMD_ROW_SIZE 9
394 #define VFC_CAM_ADDR_STRUCT_SIZE 16
395 #define VFC_CAM_ADDR_OP_OFFSET 0
396 #define VFC_CAM_ADDR_OP_SIZE 4
397 #define VFC_CAM_RESP_STRUCT_SIZE 256
398 #define VFC_RAM_ADDR_STRUCT_SIZE 16
399 #define VFC_RAM_ADDR_OP_OFFSET 0
400 #define VFC_RAM_ADDR_OP_SIZE 2
401 #define VFC_RAM_ADDR_ROW_OFFSET 2
402 #define VFC_RAM_ADDR_ROW_SIZE 10
403 #define VFC_RAM_RESP_STRUCT_SIZE 256
405 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
406 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
407 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
408 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
409 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
410 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
412 #define NUM_VFC_RAM_TYPES 4
414 #define VFC_CAM_NUM_ROWS 512
416 #define VFC_OPCODE_CAM_RD 14
417 #define VFC_OPCODE_RAM_RD 0
419 #define NUM_RSS_MEM_TYPES 5
421 #define NUM_BIG_RAM_TYPES 3
422 #define BIG_RAM_NAME_LEN 3
424 #define NUM_PHY_TBUS_ADDRESSES 2048
425 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
427 #define RESET_REG_UNRESET_OFFSET 4
429 #define STALL_DELAY_MS 500
431 #define STATIC_DEBUG_LINE_DWORDS 9
433 #define NUM_COMMON_GLOBAL_PARAMS 9
435 #define MAX_RECURSION_DEPTH 10
437 #define FW_IMG_MAIN 1
439 #define REG_FIFO_ELEMENT_DWORDS 2
440 #define REG_FIFO_DEPTH_ELEMENTS 32
441 #define REG_FIFO_DEPTH_DWORDS \
442 (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
444 #define IGU_FIFO_ELEMENT_DWORDS 4
445 #define IGU_FIFO_DEPTH_ELEMENTS 64
446 #define IGU_FIFO_DEPTH_DWORDS \
447 (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
449 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
450 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
451 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
452 (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
453 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
455 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
457 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
459 #define MAX_SW_PLTAFORM_STR_SIZE 64
461 #define EMPTY_FW_VERSION_STR "???_???_???_???"
462 #define EMPTY_FW_IMAGE_STR "???????????????"
464 /***************************** Constant Arrays *******************************/
466 /* Chip constant definitions array */
467 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
468 {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
469 {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
472 /* Storm constant definitions array */
473 static struct storm_defs s_storm_defs[] = {
476 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
478 TSEM_REG_FAST_MEMORY,
479 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
480 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
481 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
482 TCM_REG_CTX_RBC_ACCS,
483 {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
484 TCM_REG_SM_TASK_CTX},
485 {{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
490 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
492 MSEM_REG_FAST_MEMORY,
493 MSEM_REG_DBG_FRAME_MODE_BB_K2,
494 MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
495 MSEM_REG_SLOW_DBG_MODE_BB_K2,
496 MSEM_REG_DBG_MODE1_CFG_BB_K2,
497 MSEM_REG_SYNC_DBG_EMPTY,
498 MSEM_REG_DBG_GPRE_VECT,
499 MCM_REG_CTX_RBC_ACCS,
500 {MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
501 MCM_REG_SM_TASK_CTX },
502 {{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
507 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
509 USEM_REG_FAST_MEMORY,
510 USEM_REG_DBG_FRAME_MODE_BB_K2,
511 USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
512 USEM_REG_SLOW_DBG_MODE_BB_K2,
513 USEM_REG_DBG_MODE1_CFG_BB_K2,
514 USEM_REG_SYNC_DBG_EMPTY,
515 USEM_REG_DBG_GPRE_VECT,
516 UCM_REG_CTX_RBC_ACCS,
517 {UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
518 UCM_REG_SM_TASK_CTX},
519 {{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
524 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
526 XSEM_REG_FAST_MEMORY,
527 XSEM_REG_DBG_FRAME_MODE_BB_K2,
528 XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
529 XSEM_REG_SLOW_DBG_MODE_BB_K2,
530 XSEM_REG_DBG_MODE1_CFG_BB_K2,
531 XSEM_REG_SYNC_DBG_EMPTY,
532 XSEM_REG_DBG_GPRE_VECT,
533 XCM_REG_CTX_RBC_ACCS,
534 {XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
535 {{9, 15, 0, 0}, {9, 15, 0, 0}} /* {bb} {k2} */
540 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
542 YSEM_REG_FAST_MEMORY,
543 YSEM_REG_DBG_FRAME_MODE_BB_K2,
544 YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
545 YSEM_REG_SLOW_DBG_MODE_BB_K2,
546 YSEM_REG_DBG_MODE1_CFG_BB_K2,
547 YSEM_REG_SYNC_DBG_EMPTY,
548 YSEM_REG_DBG_GPRE_VECT,
549 YCM_REG_CTX_RBC_ACCS,
550 {YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
551 YCM_REG_SM_TASK_CTX},
552 {{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
557 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
559 PSEM_REG_FAST_MEMORY,
560 PSEM_REG_DBG_FRAME_MODE_BB_K2,
561 PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
562 PSEM_REG_SLOW_DBG_MODE_BB_K2,
563 PSEM_REG_DBG_MODE1_CFG_BB_K2,
564 PSEM_REG_SYNC_DBG_EMPTY,
565 PSEM_REG_DBG_GPRE_VECT,
566 PCM_REG_CTX_RBC_ACCS,
567 {0, PCM_REG_SM_CON_CTX, 0, 0},
568 {{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
572 static struct hw_type_defs s_hw_type_defs[] = {
574 {"asic", 1, 256, 32768},
575 {"reserved", 0, 0, 0},
576 {"reserved2", 0, 0, 0},
577 {"reserved3", 0, 0, 0}
580 static struct grc_param_defs s_grc_param_defs[] = {
581 /* DBG_GRC_PARAM_DUMP_TSTORM */
582 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
584 /* DBG_GRC_PARAM_DUMP_MSTORM */
585 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
587 /* DBG_GRC_PARAM_DUMP_USTORM */
588 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
590 /* DBG_GRC_PARAM_DUMP_XSTORM */
591 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
593 /* DBG_GRC_PARAM_DUMP_YSTORM */
594 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
596 /* DBG_GRC_PARAM_DUMP_PSTORM */
597 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
599 /* DBG_GRC_PARAM_DUMP_REGS */
600 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
602 /* DBG_GRC_PARAM_DUMP_RAM */
603 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
605 /* DBG_GRC_PARAM_DUMP_PBUF */
606 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
608 /* DBG_GRC_PARAM_DUMP_IOR */
609 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
611 /* DBG_GRC_PARAM_DUMP_VFC */
612 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
614 /* DBG_GRC_PARAM_DUMP_CM_CTX */
615 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
617 /* DBG_GRC_PARAM_DUMP_ILT */
618 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
620 /* DBG_GRC_PARAM_DUMP_RSS */
621 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
623 /* DBG_GRC_PARAM_DUMP_CAU */
624 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
626 /* DBG_GRC_PARAM_DUMP_QM */
627 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
629 /* DBG_GRC_PARAM_DUMP_MCP */
630 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
632 /* DBG_GRC_PARAM_DUMP_DORQ */
633 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
635 /* DBG_GRC_PARAM_DUMP_CFC */
636 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
638 /* DBG_GRC_PARAM_DUMP_IGU */
639 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
641 /* DBG_GRC_PARAM_DUMP_BRB */
642 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
644 /* DBG_GRC_PARAM_DUMP_BTB */
645 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
647 /* DBG_GRC_PARAM_DUMP_BMB */
648 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
650 /* DBG_GRC_PARAM_RESERVED1 */
651 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
653 /* DBG_GRC_PARAM_DUMP_MULD */
654 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
656 /* DBG_GRC_PARAM_DUMP_PRS */
657 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
659 /* DBG_GRC_PARAM_DUMP_DMAE */
660 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
662 /* DBG_GRC_PARAM_DUMP_TM */
663 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
665 /* DBG_GRC_PARAM_DUMP_SDM */
666 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
668 /* DBG_GRC_PARAM_DUMP_DIF */
669 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
671 /* DBG_GRC_PARAM_DUMP_STATIC */
672 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
674 /* DBG_GRC_PARAM_UNSTALL */
675 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
677 /* DBG_GRC_PARAM_RESERVED2 */
678 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
680 /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
681 {{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
683 /* DBG_GRC_PARAM_EXCLUDE_ALL */
684 {{0, 0}, 0, 1, true, false, 0, {0, 0}},
686 /* DBG_GRC_PARAM_CRASH */
687 {{0, 0}, 0, 1, true, false, 0, {0, 0}},
689 /* DBG_GRC_PARAM_PARITY_SAFE */
690 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
692 /* DBG_GRC_PARAM_DUMP_CM */
693 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
695 /* DBG_GRC_PARAM_DUMP_PHY */
696 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
698 /* DBG_GRC_PARAM_NO_MCP */
699 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
701 /* DBG_GRC_PARAM_NO_FW_VER */
702 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
704 /* DBG_GRC_PARAM_RESERVED3 */
705 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
707 /* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
708 {{0, 1}, 0, 1, false, false, 0, {0, 1}},
710 /* DBG_GRC_PARAM_DUMP_ILT_CDUC */
711 {{1, 1}, 0, 1, false, false, 0, {0, 0}},
713 /* DBG_GRC_PARAM_DUMP_ILT_CDUT */
714 {{1, 1}, 0, 1, false, false, 0, {0, 0}},
716 /* DBG_GRC_PARAM_DUMP_CAU_EXT */
717 {{0, 0}, 0, 1, false, false, 0, {1, 1}}
720 static struct rss_mem_defs s_rss_mem_defs[] = {
721 {"rss_mem_cid", "rss_cid", 0, 32,
724 {"rss_mem_key_msb", "rss_key", 1024, 256,
727 {"rss_mem_key_lsb", "rss_key", 2048, 64,
730 {"rss_mem_info", "rss_info", 3072, 16,
733 {"rss_mem_ind", "rss_ind", 4096, 16,
737 static struct vfc_ram_defs s_vfc_ram_defs[] = {
738 {"vfc_ram_tt1", "vfc_ram", 0, 512},
739 {"vfc_ram_mtt2", "vfc_ram", 512, 128},
740 {"vfc_ram_stt2", "vfc_ram", 640, 32},
741 {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
744 static struct big_ram_defs s_big_ram_defs[] = {
745 {"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
746 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
747 MISC_REG_BLOCK_256B_EN, {0, 0},
750 {"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
751 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
752 MISC_REG_BLOCK_256B_EN, {0, 1},
755 {"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
756 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
757 MISCS_REG_BLOCK_256B_EN, {0, 0},
761 static struct rbc_reset_defs s_rbc_reset_defs[] = {
762 {MISCS_REG_RESET_PL_HV,
764 {MISC_REG_RESET_PL_PDA_VMAIN_1,
765 {0x4404040, 0x4404040}},
766 {MISC_REG_RESET_PL_PDA_VMAIN_2,
768 {MISC_REG_RESET_PL_PDA_VAUX,
772 static struct phy_defs s_phy_defs[] = {
773 {"nw_phy", NWS_REG_NWS_CMU_K2,
774 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
775 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
776 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
777 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
778 {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
779 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
780 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
781 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
782 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
783 {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
784 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
785 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
786 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
787 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
788 {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
789 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
790 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
791 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
792 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
795 static struct split_type_defs s_split_type_defs[] = {
796 /* SPLIT_TYPE_NONE */
799 /* SPLIT_TYPE_PORT */
805 /* SPLIT_TYPE_PORT_PF */
812 /**************************** Private Functions ******************************/
814 /* Reads and returns a single dword from the specified unaligned buffer */
815 static u32 qed_read_unaligned_dword(u8 *buf)
819 memcpy((u8 *)&dword, buf, sizeof(dword));
823 /* Sets the value of the specified GRC param */
824 static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
825 enum dbg_grc_params grc_param, u32 val)
827 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
829 dev_data->grc.param_val[grc_param] = val;
832 /* Returns the value of the specified GRC param */
833 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
834 enum dbg_grc_params grc_param)
836 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
838 return dev_data->grc.param_val[grc_param];
841 /* Initializes the GRC parameters */
842 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
844 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
846 if (!dev_data->grc.params_initialized) {
847 qed_dbg_grc_set_params_default(p_hwfn);
848 dev_data->grc.params_initialized = 1;
852 /* Sets pointer and size for the specified binary buffer type */
853 static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
854 enum bin_dbg_buffer_type buf_type,
855 const u32 *ptr, u32 size)
857 struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
859 buf->ptr = (void *)ptr;
863 /* Initializes debug data for the specified device */
864 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
866 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
867 u8 num_pfs = 0, max_pfs_per_port = 0;
869 if (dev_data->initialized)
870 return DBG_STATUS_OK;
873 if (QED_IS_K2(p_hwfn->cdev)) {
874 dev_data->chip_id = CHIP_K2;
875 dev_data->mode_enable[MODE_K2] = 1;
876 dev_data->num_vfs = MAX_NUM_VFS_K2;
877 num_pfs = MAX_NUM_PFS_K2;
878 max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
879 } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
880 dev_data->chip_id = CHIP_BB;
881 dev_data->mode_enable[MODE_BB] = 1;
882 dev_data->num_vfs = MAX_NUM_VFS_BB;
883 num_pfs = MAX_NUM_PFS_BB;
884 max_pfs_per_port = MAX_NUM_PFS_BB;
886 return DBG_STATUS_UNKNOWN_CHIP;
890 dev_data->hw_type = HW_TYPE_ASIC;
891 dev_data->mode_enable[MODE_ASIC] = 1;
894 switch (p_hwfn->cdev->num_ports_in_engine) {
896 dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
899 dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
902 dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
907 if (QED_IS_CMT(p_hwfn->cdev))
908 dev_data->mode_enable[MODE_100G] = 1;
910 /* Set number of ports */
911 if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
912 dev_data->mode_enable[MODE_100G])
913 dev_data->num_ports = 1;
914 else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
915 dev_data->num_ports = 2;
916 else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
917 dev_data->num_ports = 4;
919 /* Set number of PFs per port */
920 dev_data->num_pfs_per_port = min_t(u32,
921 num_pfs / dev_data->num_ports,
924 /* Initializes the GRC parameters */
925 qed_dbg_grc_init_params(p_hwfn);
927 dev_data->use_dmae = true;
928 dev_data->initialized = 1;
930 return DBG_STATUS_OK;
933 static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
934 enum block_id block_id)
936 const struct dbg_block *dbg_block;
938 dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
939 return dbg_block + block_id;
942 static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
947 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
949 return (const struct dbg_block_chip *)
950 p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
951 block_id * MAX_CHIP_IDS + dev_data->chip_id;
954 static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
958 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
960 return (const struct dbg_reset_reg *)
961 p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
962 reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
965 /* Reads the FW info structure for the specified Storm from the chip,
966 * and writes it to the specified fw_info pointer.
968 static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
969 struct qed_ptt *p_ptt,
970 u8 storm_id, struct fw_info *fw_info)
972 struct storm_defs *storm = &s_storm_defs[storm_id];
973 struct fw_info_location fw_info_location;
976 memset(&fw_info_location, 0, sizeof(fw_info_location));
977 memset(fw_info, 0, sizeof(*fw_info));
979 /* Read first the address that points to fw_info location.
980 * The address is located in the last line of the Storm RAM.
982 addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
983 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
984 sizeof(fw_info_location);
986 dest = (u32 *)&fw_info_location;
988 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
989 i++, addr += BYTES_IN_DWORD)
990 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
992 /* Read FW version info from Storm RAM */
993 if (fw_info_location.size > 0 && fw_info_location.size <=
995 addr = fw_info_location.grc_addr;
996 dest = (u32 *)fw_info;
997 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
998 i++, addr += BYTES_IN_DWORD)
999 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1003 /* Dumps the specified string to the specified buffer.
1004 * Returns the dumped size in bytes.
1006 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1009 strcpy(dump_buf, str);
1011 return (u32)strlen(str) + 1;
1014 /* Dumps zeros to align the specified buffer to dwords.
1015 * Returns the dumped size in bytes.
1017 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1019 u8 offset_in_dword, align_size;
1021 offset_in_dword = (u8)(byte_offset & 0x3);
1022 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1024 if (dump && align_size)
1025 memset(dump_buf, 0, align_size);
1030 /* Writes the specified string param to the specified buffer.
1031 * Returns the dumped size in dwords.
1033 static u32 qed_dump_str_param(u32 *dump_buf,
1035 const char *param_name, const char *param_val)
1037 char *char_buf = (char *)dump_buf;
1040 /* Dump param name */
1041 offset += qed_dump_str(char_buf + offset, dump, param_name);
1043 /* Indicate a string param value */
1045 *(char_buf + offset) = 1;
1048 /* Dump param value */
1049 offset += qed_dump_str(char_buf + offset, dump, param_val);
1051 /* Align buffer to next dword */
1052 offset += qed_dump_align(char_buf + offset, dump, offset);
1054 return BYTES_TO_DWORDS(offset);
1057 /* Writes the specified numeric param to the specified buffer.
1058 * Returns the dumped size in dwords.
1060 static u32 qed_dump_num_param(u32 *dump_buf,
1061 bool dump, const char *param_name, u32 param_val)
1063 char *char_buf = (char *)dump_buf;
1066 /* Dump param name */
1067 offset += qed_dump_str(char_buf + offset, dump, param_name);
1069 /* Indicate a numeric param value */
1071 *(char_buf + offset) = 0;
1074 /* Align buffer to next dword */
1075 offset += qed_dump_align(char_buf + offset, dump, offset);
1077 /* Dump param value (and change offset from bytes to dwords) */
1078 offset = BYTES_TO_DWORDS(offset);
1080 *(dump_buf + offset) = param_val;
1086 /* Reads the FW version and writes it as a param to the specified buffer.
1087 * Returns the dumped size in dwords.
1089 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1090 struct qed_ptt *p_ptt,
1091 u32 *dump_buf, bool dump)
1093 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1094 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1095 struct fw_info fw_info = { {0}, {0} };
1098 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1099 /* Read FW info from chip */
1100 qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1102 /* Create FW version/image strings */
1103 if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1104 "%d_%d_%d_%d", fw_info.ver.num.major,
1105 fw_info.ver.num.minor, fw_info.ver.num.rev,
1106 fw_info.ver.num.eng) < 0)
1108 "Unexpected debug error: invalid FW version string\n");
1109 switch (fw_info.ver.image_id) {
1111 strcpy(fw_img_str, "main");
1114 strcpy(fw_img_str, "unknown");
1119 /* Dump FW version, image and timestamp */
1120 offset += qed_dump_str_param(dump_buf + offset,
1121 dump, "fw-version", fw_ver_str);
1122 offset += qed_dump_str_param(dump_buf + offset,
1123 dump, "fw-image", fw_img_str);
1124 offset += qed_dump_num_param(dump_buf + offset,
1126 "fw-timestamp", fw_info.ver.timestamp);
1131 /* Reads the MFW version and writes it as a param to the specified buffer.
1132 * Returns the dumped size in dwords.
1134 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1135 struct qed_ptt *p_ptt,
1136 u32 *dump_buf, bool dump)
1138 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1141 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1142 u32 global_section_offsize, global_section_addr, mfw_ver;
1143 u32 public_data_addr, global_section_offsize_addr;
1145 /* Find MCP public data GRC address. Needs to be ORed with
1146 * MCP_REG_SCRATCH due to a HW bug.
1148 public_data_addr = qed_rd(p_hwfn,
1150 MISC_REG_SHARED_MEM_ADDR) |
1153 /* Find MCP public global section offset */
1154 global_section_offsize_addr = public_data_addr +
1155 offsetof(struct mcp_public_data,
1157 sizeof(offsize_t) * PUBLIC_GLOBAL;
1158 global_section_offsize = qed_rd(p_hwfn, p_ptt,
1159 global_section_offsize_addr);
1160 global_section_addr =
1162 (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
1164 /* Read MFW version from MCP public global section */
1165 mfw_ver = qed_rd(p_hwfn, p_ptt,
1166 global_section_addr +
1167 offsetof(struct public_global, mfw_ver));
1169 /* Dump MFW version param */
1170 if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
1171 (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
1172 (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
1174 "Unexpected debug error: invalid MFW version string\n");
1177 return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1180 /* Reads the chip revision from the chip and writes it as a param to the
1181 * specified buffer. Returns the dumped size in dwords.
1183 static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
1184 struct qed_ptt *p_ptt,
1185 u32 *dump_buf, bool dump)
1187 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1188 char param_str[3] = "??";
1190 if (dev_data->hw_type == HW_TYPE_ASIC) {
1191 u32 chip_rev, chip_metal;
1193 chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
1194 chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
1196 param_str[0] = 'a' + (u8)chip_rev;
1197 param_str[1] = '0' + (u8)chip_metal;
1200 return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
1203 /* Writes a section header to the specified buffer.
1204 * Returns the dumped size in dwords.
1206 static u32 qed_dump_section_hdr(u32 *dump_buf,
1207 bool dump, const char *name, u32 num_params)
1209 return qed_dump_num_param(dump_buf, dump, name, num_params);
1212 /* Writes the common global params to the specified buffer.
1213 * Returns the dumped size in dwords.
1215 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1216 struct qed_ptt *p_ptt,
1219 u8 num_specific_global_params)
1221 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1225 /* Dump global params section header */
1226 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
1227 (dev_data->chip_id == CHIP_BB ? 1 : 0);
1228 offset += qed_dump_section_hdr(dump_buf + offset,
1229 dump, "global_params", num_params);
1232 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1233 offset += qed_dump_mfw_ver_param(p_hwfn,
1234 p_ptt, dump_buf + offset, dump);
1235 offset += qed_dump_chip_revision_param(p_hwfn,
1236 p_ptt, dump_buf + offset, dump);
1237 offset += qed_dump_num_param(dump_buf + offset,
1238 dump, "tools-version", TOOLS_VERSION);
1239 offset += qed_dump_str_param(dump_buf + offset,
1242 s_chip_defs[dev_data->chip_id].name);
1243 offset += qed_dump_str_param(dump_buf + offset,
1246 s_hw_type_defs[dev_data->hw_type].name);
1247 offset += qed_dump_num_param(dump_buf + offset,
1248 dump, "pci-func", p_hwfn->abs_pf_id);
1249 if (dev_data->chip_id == CHIP_BB)
1250 offset += qed_dump_num_param(dump_buf + offset,
1251 dump, "path", QED_PATH_ID(p_hwfn));
1256 /* Writes the "last" section (including CRC) to the specified buffer at the
1257 * given offset. Returns the dumped size in dwords.
1259 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1261 u32 start_offset = offset;
1263 /* Dump CRC section header */
1264 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1266 /* Calculate CRC32 and add it to the dword after the "last" section */
1268 *(dump_buf + offset) = ~crc32(0xffffffff,
1270 DWORDS_TO_BYTES(offset));
1274 return offset - start_offset;
1277 /* Update blocks reset state */
1278 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1279 struct qed_ptt *p_ptt)
1281 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1282 u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1286 /* Read reset registers */
1287 for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
1288 const struct dbg_reset_reg *rst_reg;
1289 bool rst_reg_removed;
1292 rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
1293 rst_reg_removed = GET_FIELD(rst_reg->data,
1294 DBG_RESET_REG_IS_REMOVED);
1295 rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
1296 DBG_RESET_REG_ADDR));
1298 if (!rst_reg_removed)
1299 reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
1303 /* Check if blocks are in reset */
1304 for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
1305 const struct dbg_block_chip *blk;
1309 blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
1310 is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1311 has_rst_reg = GET_FIELD(blk->flags,
1312 DBG_BLOCK_CHIP_HAS_RESET_REG);
1314 if (!is_removed && has_rst_reg)
1315 dev_data->block_in_reset[blk_id] =
1316 !(reg_val[blk->reset_reg_id] &
1317 BIT(blk->reset_reg_bit_offset));
1321 /* is_mode_match recursive function */
1322 static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
1323 u16 *modes_buf_offset, u8 rec_depth)
1325 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1330 if (rec_depth > MAX_RECURSION_DEPTH) {
1332 "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
1336 /* Get next element from modes tree buffer */
1337 dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1338 tree_val = dbg_array[(*modes_buf_offset)++];
1341 case INIT_MODE_OP_NOT:
1342 return !qed_is_mode_match_rec(p_hwfn,
1343 modes_buf_offset, rec_depth + 1);
1344 case INIT_MODE_OP_OR:
1345 case INIT_MODE_OP_AND:
1346 arg1 = qed_is_mode_match_rec(p_hwfn,
1347 modes_buf_offset, rec_depth + 1);
1348 arg2 = qed_is_mode_match_rec(p_hwfn,
1349 modes_buf_offset, rec_depth + 1);
1350 return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1351 arg2) : (arg1 && arg2);
1353 return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1357 /* Returns true if the mode (specified using modes_buf_offset) is enabled */
1358 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1360 return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
1363 /* Enable / disable the Debug block */
1364 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1365 struct qed_ptt *p_ptt, bool enable)
1367 qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1370 /* Resets the Debug block */
1371 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1372 struct qed_ptt *p_ptt)
1374 u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1375 const struct dbg_reset_reg *reset_reg;
1376 const struct dbg_block_chip *block;
1378 block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
1379 reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
1381 DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
1383 old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
1385 old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
1387 qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
1388 qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
1391 /* Enable / disable Debug Bus clients according to the specified mask
1392 * (1 = enable, 0 = disable).
1394 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1395 struct qed_ptt *p_ptt, u32 client_mask)
1397 qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1400 static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
1401 struct qed_ptt *p_ptt,
1402 enum block_id block_id,
1406 u8 force_valid_mask, u8 force_frame_mask)
1408 const struct dbg_block_chip *block =
1409 qed_get_dbg_block_per_chip(p_hwfn, block_id);
1411 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
1413 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
1415 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
1417 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
1419 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
1423 /* Disable debug bus in all blocks */
1424 static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
1425 struct qed_ptt *p_ptt)
1427 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1430 /* Disable all blocks */
1431 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
1432 const struct dbg_block_chip *block_per_chip =
1433 qed_get_dbg_block_per_chip(p_hwfn,
1434 (enum block_id)block_id);
1436 if (GET_FIELD(block_per_chip->flags,
1437 DBG_BLOCK_CHIP_IS_REMOVED) ||
1438 dev_data->block_in_reset[block_id])
1441 /* Disable debug bus */
1442 if (GET_FIELD(block_per_chip->flags,
1443 DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
1445 block_per_chip->dbg_dword_enable_reg_addr;
1446 u16 modes_buf_offset =
1447 GET_FIELD(block_per_chip->dbg_bus_mode.data,
1448 DBG_MODE_HDR_MODES_BUF_OFFSET);
1450 GET_FIELD(block_per_chip->dbg_bus_mode.data,
1451 DBG_MODE_HDR_EVAL_MODE) > 0;
1454 qed_is_mode_match(p_hwfn, &modes_buf_offset))
1455 qed_wr(p_hwfn, p_ptt,
1456 DWORDS_TO_BYTES(dbg_en_addr),
1462 /* Returns true if the specified entity (indicated by GRC param) should be
1463 * included in the dump, false otherwise.
1465 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1466 enum dbg_grc_params grc_param)
1468 return qed_grc_get_param(p_hwfn, grc_param) > 0;
1471 /* Returns the storm_id that matches the specified Storm letter,
1472 * or MAX_DBG_STORMS if invalid storm letter.
1474 static enum dbg_storms qed_get_id_from_letter(char storm_letter)
1478 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
1479 if (s_storm_defs[storm_id].letter == storm_letter)
1480 return (enum dbg_storms)storm_id;
1482 return MAX_DBG_STORMS;
1485 /* Returns true of the specified Storm should be included in the dump, false
1488 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1489 enum dbg_storms storm)
1491 return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1494 /* Returns true if the specified memory should be included in the dump, false
1497 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1498 enum block_id block_id, u8 mem_group_id)
1500 const struct dbg_block *block;
1503 block = get_dbg_block(p_hwfn, block_id);
1505 /* If the block is associated with a Storm, check Storm match */
1506 if (block->associated_storm_letter) {
1507 enum dbg_storms associated_storm_id =
1508 qed_get_id_from_letter(block->associated_storm_letter);
1510 if (associated_storm_id == MAX_DBG_STORMS ||
1511 !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
1515 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
1516 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
1518 if (mem_group_id == big_ram->mem_group_id ||
1519 mem_group_id == big_ram->ram_mem_group_id)
1520 return qed_grc_is_included(p_hwfn, big_ram->grc_param);
1523 switch (mem_group_id) {
1524 case MEM_GROUP_PXP_ILT:
1525 case MEM_GROUP_PXP_MEM:
1526 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1528 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1529 case MEM_GROUP_PBUF:
1530 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1531 case MEM_GROUP_CAU_MEM:
1532 case MEM_GROUP_CAU_SB:
1533 case MEM_GROUP_CAU_PI:
1534 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
1535 case MEM_GROUP_CAU_MEM_EXT:
1536 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
1537 case MEM_GROUP_QM_MEM:
1538 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
1539 case MEM_GROUP_CFC_MEM:
1540 case MEM_GROUP_CONN_CFC_MEM:
1541 case MEM_GROUP_TASK_CFC_MEM:
1542 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
1543 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
1544 case MEM_GROUP_DORQ_MEM:
1545 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
1546 case MEM_GROUP_IGU_MEM:
1547 case MEM_GROUP_IGU_MSIX:
1548 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
1549 case MEM_GROUP_MULD_MEM:
1550 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
1551 case MEM_GROUP_PRS_MEM:
1552 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
1553 case MEM_GROUP_DMAE_MEM:
1554 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
1555 case MEM_GROUP_TM_MEM:
1556 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
1557 case MEM_GROUP_SDM_MEM:
1558 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
1559 case MEM_GROUP_TDIF_CTX:
1560 case MEM_GROUP_RDIF_CTX:
1561 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
1562 case MEM_GROUP_CM_MEM:
1563 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
1565 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
1571 /* Stalls all Storms */
1572 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
1573 struct qed_ptt *p_ptt, bool stall)
1578 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
1579 if (!qed_grc_is_storm_included(p_hwfn,
1580 (enum dbg_storms)storm_id))
1583 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1584 SEM_FAST_REG_STALL_0_BB_K2;
1585 qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
1588 msleep(STALL_DELAY_MS);
1591 /* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
1592 * taken out of reset.
1594 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
1595 struct qed_ptt *p_ptt, bool rbc_only)
1597 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1598 u8 chip_id = dev_data->chip_id;
1601 /* Take RBCs out of reset */
1602 for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
1603 if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
1606 s_rbc_reset_defs[i].reset_reg_addr +
1607 RESET_REG_UNRESET_OFFSET,
1608 s_rbc_reset_defs[i].reset_val[chip_id]);
1611 u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1615 /* Fill reset regs values */
1616 for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1617 bool is_removed, has_reset_reg, unreset_before_dump;
1618 const struct dbg_block_chip *block;
1620 block = qed_get_dbg_block_per_chip(p_hwfn,
1624 GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1626 GET_FIELD(block->flags,
1627 DBG_BLOCK_CHIP_HAS_RESET_REG);
1628 unreset_before_dump =
1629 GET_FIELD(block->flags,
1630 DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
1632 if (!is_removed && has_reset_reg && unreset_before_dump)
1633 reg_val[block->reset_reg_id] |=
1634 BIT(block->reset_reg_bit_offset);
1637 /* Write reset registers */
1638 for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
1640 const struct dbg_reset_reg *reset_reg;
1643 reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
1646 (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
1649 if (reg_val[reset_reg_id]) {
1651 GET_FIELD(reset_reg->data,
1652 DBG_RESET_REG_ADDR);
1655 DWORDS_TO_BYTES(reset_reg_addr) +
1656 RESET_REG_UNRESET_OFFSET,
1657 reg_val[reset_reg_id]);
1663 /* Returns the attention block data of the specified block */
1664 static const struct dbg_attn_block_type_data *
1665 qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
1666 enum block_id block_id, enum dbg_attn_type attn_type)
1668 const struct dbg_attn_block *base_attn_block_arr =
1669 (const struct dbg_attn_block *)
1670 p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
1672 return &base_attn_block_arr[block_id].per_type_data[attn_type];
1675 /* Returns the attention registers of the specified block */
1676 static const struct dbg_attn_reg *
1677 qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
1678 enum block_id block_id, enum dbg_attn_type attn_type,
1681 const struct dbg_attn_block_type_data *block_type_data =
1682 qed_get_block_attn_data(p_hwfn, block_id, attn_type);
1684 *num_attn_regs = block_type_data->num_regs;
1686 return (const struct dbg_attn_reg *)
1687 p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
1688 block_type_data->regs_offset;
1691 /* For each block, clear the status of all parities */
1692 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
1693 struct qed_ptt *p_ptt)
1695 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1696 const struct dbg_attn_reg *attn_reg_arr;
1697 u8 reg_idx, num_attn_regs;
1700 for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1701 if (dev_data->block_in_reset[block_id])
1704 attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
1705 (enum block_id)block_id,
1709 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
1710 const struct dbg_attn_reg *reg_data =
1711 &attn_reg_arr[reg_idx];
1712 u16 modes_buf_offset;
1716 eval_mode = GET_FIELD(reg_data->mode.data,
1717 DBG_MODE_HDR_EVAL_MODE) > 0;
1719 GET_FIELD(reg_data->mode.data,
1720 DBG_MODE_HDR_MODES_BUF_OFFSET);
1722 /* If Mode match: clear parity status */
1724 qed_is_mode_match(p_hwfn, &modes_buf_offset))
1725 qed_rd(p_hwfn, p_ptt,
1726 DWORDS_TO_BYTES(reg_data->
1732 /* Dumps GRC registers section header. Returns the dumped size in dwords.
1733 * the following parameters are dumped:
1734 * - count: no. of dumped entries
1735 * - split_type: split type
1736 * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
1737 * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
1739 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
1741 u32 num_reg_entries,
1742 enum init_split_types split_type,
1743 u8 split_id, const char *reg_type_name)
1746 (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
1749 offset += qed_dump_section_hdr(dump_buf + offset,
1750 dump, "grc_regs", num_params);
1751 offset += qed_dump_num_param(dump_buf + offset,
1752 dump, "count", num_reg_entries);
1753 offset += qed_dump_str_param(dump_buf + offset,
1755 s_split_type_defs[split_type].name);
1756 if (split_type != SPLIT_TYPE_NONE)
1757 offset += qed_dump_num_param(dump_buf + offset,
1758 dump, "id", split_id);
1760 offset += qed_dump_str_param(dump_buf + offset,
1761 dump, "type", reg_type_name);
1766 /* Reads the specified registers into the specified buffer.
1767 * The addr and len arguments are specified in dwords.
1769 void qed_read_regs(struct qed_hwfn *p_hwfn,
1770 struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
1774 for (i = 0; i < len; i++)
1775 buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
1778 /* Dumps the GRC registers in the specified address range.
1779 * Returns the dumped size in dwords.
1780 * The addr and len arguments are specified in dwords.
1782 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
1783 struct qed_ptt *p_ptt,
1785 bool dump, u32 addr, u32 len, bool wide_bus,
1786 enum init_split_types split_type,
1789 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1790 u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
1791 bool read_using_dmae = false;
1797 switch (split_type) {
1798 case SPLIT_TYPE_PORT:
1804 case SPLIT_TYPE_PORT_PF:
1805 port_id = split_id / dev_data->num_pfs_per_port;
1806 pf_id = port_id + dev_data->num_ports *
1807 (split_id % dev_data->num_pfs_per_port);
1816 /* Try reading using DMAE */
1817 if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
1818 (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
1819 (PROTECT_WIDE_BUS && wide_bus))) {
1820 struct qed_dmae_params dmae_params;
1822 /* Set DMAE params */
1823 memset(&dmae_params, 0, sizeof(dmae_params));
1824 SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
1825 switch (split_type) {
1826 case SPLIT_TYPE_PORT:
1827 SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1829 dmae_params.port_id = port_id;
1832 SET_FIELD(dmae_params.flags,
1833 QED_DMAE_PARAMS_SRC_PF_VALID, 1);
1834 dmae_params.src_pfid = pf_id;
1836 case SPLIT_TYPE_PORT_PF:
1837 SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1839 SET_FIELD(dmae_params.flags,
1840 QED_DMAE_PARAMS_SRC_PF_VALID, 1);
1841 dmae_params.port_id = port_id;
1842 dmae_params.src_pfid = pf_id;
1848 /* Execute DMAE command */
1849 read_using_dmae = !qed_dmae_grc2host(p_hwfn,
1851 DWORDS_TO_BYTES(addr),
1852 (u64)(uintptr_t)(dump_buf),
1854 if (!read_using_dmae) {
1855 dev_data->use_dmae = 0;
1858 "Failed reading from chip using DMAE, using GRC instead\n");
1862 if (read_using_dmae)
1865 /* If not read using DMAE, read using GRC */
1868 if (split_type != dev_data->pretend.split_type ||
1869 split_id != dev_data->pretend.split_id) {
1870 switch (split_type) {
1871 case SPLIT_TYPE_PORT:
1872 qed_port_pretend(p_hwfn, p_ptt, port_id);
1875 fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1877 qed_fid_pretend(p_hwfn, p_ptt, fid);
1879 case SPLIT_TYPE_PORT_PF:
1880 fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1882 qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
1885 fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
1886 | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
1888 qed_fid_pretend(p_hwfn, p_ptt, fid);
1894 dev_data->pretend.split_type = (u8)split_type;
1895 dev_data->pretend.split_id = split_id;
1898 /* Read registers using GRC */
1899 qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
1903 dev_data->num_regs_read += len;
1904 thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
1905 if ((dev_data->num_regs_read / thresh) >
1906 ((dev_data->num_regs_read - len) / thresh))
1909 "Dumped %d registers...\n", dev_data->num_regs_read);
1914 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
1915 * The addr and len arguments are specified in dwords.
1917 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
1918 bool dump, u32 addr, u32 len)
1921 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
1926 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
1927 * The addr and len arguments are specified in dwords.
1929 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
1930 struct qed_ptt *p_ptt,
1932 bool dump, u32 addr, u32 len, bool wide_bus,
1933 enum init_split_types split_type, u8 split_id)
1937 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
1938 offset += qed_grc_dump_addr_range(p_hwfn,
1941 dump, addr, len, wide_bus,
1942 split_type, split_id);
1947 /* Dumps GRC registers sequence with skip cycle.
1948 * Returns the dumped size in dwords.
1949 * - addr: start GRC address in dwords
1950 * - total_len: total no. of dwords to dump
1951 * - read_len: no. consecutive dwords to read
1952 * - skip_len: no. of dwords to skip (and fill with zeros)
1954 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
1955 struct qed_ptt *p_ptt,
1960 u32 read_len, u32 skip_len)
1962 u32 offset = 0, reg_offset = 0;
1964 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
1967 return offset + total_len;
1969 while (reg_offset < total_len) {
1970 u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
1972 offset += qed_grc_dump_addr_range(p_hwfn,
1975 dump, addr, curr_len, false,
1976 SPLIT_TYPE_NONE, 0);
1977 reg_offset += curr_len;
1980 if (reg_offset < total_len) {
1981 curr_len = min_t(u32, skip_len, total_len - skip_len);
1982 memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
1984 reg_offset += curr_len;
1992 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
1993 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
1994 struct qed_ptt *p_ptt,
1995 struct virt_mem_desc input_regs_arr,
1998 enum init_split_types split_type,
2000 bool block_enable[MAX_BLOCK_ID],
2001 u32 *num_dumped_reg_entries)
2003 u32 i, offset = 0, input_offset = 0;
2004 bool mode_match = true;
2006 *num_dumped_reg_entries = 0;
2008 while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
2009 const struct dbg_dump_cond_hdr *cond_hdr =
2010 (const struct dbg_dump_cond_hdr *)
2011 input_regs_arr.ptr + input_offset++;
2012 u16 modes_buf_offset;
2015 /* Check mode/block */
2016 eval_mode = GET_FIELD(cond_hdr->mode.data,
2017 DBG_MODE_HDR_EVAL_MODE) > 0;
2020 GET_FIELD(cond_hdr->mode.data,
2021 DBG_MODE_HDR_MODES_BUF_OFFSET);
2022 mode_match = qed_is_mode_match(p_hwfn,
2026 if (!mode_match || !block_enable[cond_hdr->block_id]) {
2027 input_offset += cond_hdr->data_size;
2031 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2032 const struct dbg_dump_reg *reg =
2033 (const struct dbg_dump_reg *)
2034 input_regs_arr.ptr + input_offset;
2038 addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2039 len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2040 wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2041 offset += qed_grc_dump_reg_entry(p_hwfn,
2048 split_type, split_id);
2049 (*num_dumped_reg_entries)++;
2056 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2057 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2058 struct qed_ptt *p_ptt,
2059 struct virt_mem_desc input_regs_arr,
2062 bool block_enable[MAX_BLOCK_ID],
2063 enum init_split_types split_type,
2064 u8 split_id, const char *reg_type_name)
2066 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2067 enum init_split_types hdr_split_type = split_type;
2068 u32 num_dumped_reg_entries, offset;
2069 u8 hdr_split_id = split_id;
2071 /* In PORT_PF split type, print a port split header */
2072 if (split_type == SPLIT_TYPE_PORT_PF) {
2073 hdr_split_type = SPLIT_TYPE_PORT;
2074 hdr_split_id = split_id / dev_data->num_pfs_per_port;
2077 /* Calculate register dump header size (and skip it for now) */
2078 offset = qed_grc_dump_regs_hdr(dump_buf,
2082 hdr_split_id, reg_type_name);
2084 /* Dump registers */
2085 offset += qed_grc_dump_regs_entries(p_hwfn,
2093 &num_dumped_reg_entries);
2095 /* Write register dump header */
2096 if (dump && num_dumped_reg_entries > 0)
2097 qed_grc_dump_regs_hdr(dump_buf,
2099 num_dumped_reg_entries,
2101 hdr_split_id, reg_type_name);
2103 return num_dumped_reg_entries > 0 ? offset : 0;
2106 /* Dumps registers according to the input registers array. Returns the dumped
2109 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2110 struct qed_ptt *p_ptt,
2113 bool block_enable[MAX_BLOCK_ID],
2114 const char *reg_type_name)
2116 struct virt_mem_desc *dbg_buf =
2117 &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
2118 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2119 u32 offset = 0, input_offset = 0;
2121 while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2122 const struct dbg_dump_split_hdr *split_hdr;
2123 struct virt_mem_desc curr_input_regs_arr;
2124 enum init_split_types split_type;
2125 u16 split_count = 0;
2126 u32 split_data_size;
2130 (const struct dbg_dump_split_hdr *)
2131 dbg_buf->ptr + input_offset++;
2133 GET_FIELD(split_hdr->hdr,
2134 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2135 split_data_size = GET_FIELD(split_hdr->hdr,
2136 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2137 curr_input_regs_arr.ptr =
2138 (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
2140 curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
2142 switch (split_type) {
2143 case SPLIT_TYPE_NONE:
2146 case SPLIT_TYPE_PORT:
2147 split_count = dev_data->num_ports;
2150 case SPLIT_TYPE_PORT_PF:
2151 split_count = dev_data->num_ports *
2152 dev_data->num_pfs_per_port;
2155 split_count = dev_data->num_vfs;
2161 for (split_id = 0; split_id < split_count; split_id++)
2162 offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2163 curr_input_regs_arr,
2170 input_offset += split_data_size;
2173 /* Cancel pretends (pretend to original PF) */
2175 qed_fid_pretend(p_hwfn, p_ptt,
2176 FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2177 p_hwfn->rel_pf_id));
2178 dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2179 dev_data->pretend.split_id = 0;
2185 /* Dump reset registers. Returns the dumped size in dwords. */
2186 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2187 struct qed_ptt *p_ptt,
2188 u32 *dump_buf, bool dump)
2190 u32 offset = 0, num_regs = 0;
2193 /* Calculate header size */
2194 offset += qed_grc_dump_regs_hdr(dump_buf,
2196 0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
2198 /* Write reset registers */
2199 for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
2201 const struct dbg_reset_reg *reset_reg;
2204 reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
2206 if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
2209 reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
2210 offset += qed_grc_dump_reg_entry(p_hwfn,
2215 1, false, SPLIT_TYPE_NONE, 0);
2221 qed_grc_dump_regs_hdr(dump_buf,
2222 true, num_regs, SPLIT_TYPE_NONE,
2228 /* Dump registers that are modified during GRC Dump and therefore must be
2229 * dumped first. Returns the dumped size in dwords.
2231 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2232 struct qed_ptt *p_ptt,
2233 u32 *dump_buf, bool dump)
2235 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2236 u32 block_id, offset = 0, stall_regs_offset;
2237 const struct dbg_attn_reg *attn_reg_arr;
2238 u8 storm_id, reg_idx, num_attn_regs;
2239 u32 num_reg_entries = 0;
2241 /* Write empty header for attention registers */
2242 offset += qed_grc_dump_regs_hdr(dump_buf,
2244 0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2246 /* Write parity registers */
2247 for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
2248 if (dev_data->block_in_reset[block_id] && dump)
2251 attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
2252 (enum block_id)block_id,
2256 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2257 const struct dbg_attn_reg *reg_data =
2258 &attn_reg_arr[reg_idx];
2259 u16 modes_buf_offset;
2264 eval_mode = GET_FIELD(reg_data->mode.data,
2265 DBG_MODE_HDR_EVAL_MODE) > 0;
2267 GET_FIELD(reg_data->mode.data,
2268 DBG_MODE_HDR_MODES_BUF_OFFSET);
2270 !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2273 /* Mode match: read & dump registers */
2274 addr = reg_data->mask_address;
2275 offset += qed_grc_dump_reg_entry(p_hwfn,
2281 SPLIT_TYPE_NONE, 0);
2282 addr = GET_FIELD(reg_data->data,
2283 DBG_ATTN_REG_STS_ADDRESS);
2284 offset += qed_grc_dump_reg_entry(p_hwfn,
2290 SPLIT_TYPE_NONE, 0);
2291 num_reg_entries += 2;
2295 /* Overwrite header for attention registers */
2297 qed_grc_dump_regs_hdr(dump_buf,
2300 SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2302 /* Write empty header for stall registers */
2303 stall_regs_offset = offset;
2304 offset += qed_grc_dump_regs_hdr(dump_buf,
2305 false, 0, SPLIT_TYPE_NONE, 0, "REGS");
2307 /* Write Storm stall status registers */
2308 for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
2310 struct storm_defs *storm = &s_storm_defs[storm_id];
2313 if (dev_data->block_in_reset[storm->sem_block_id] && dump)
2317 BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
2318 SEM_FAST_REG_STALLED);
2319 offset += qed_grc_dump_reg_entry(p_hwfn,
2325 false, SPLIT_TYPE_NONE, 0);
2329 /* Overwrite header for stall registers */
2331 qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
2334 SPLIT_TYPE_NONE, 0, "REGS");
2339 /* Dumps registers that can't be represented in the debug arrays */
2340 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2341 struct qed_ptt *p_ptt,
2342 u32 *dump_buf, bool dump)
2344 u32 offset = 0, addr;
2346 offset += qed_grc_dump_regs_hdr(dump_buf,
2347 dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
2349 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2352 addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2353 offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2358 RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2361 addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2363 qed_grc_dump_reg_entry_skip(p_hwfn,
2368 TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2375 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2376 * dwords. The following parameters are dumped:
2377 * - name: dumped only if it's not NULL.
2378 * - addr: in dwords, dumped only if name is NULL.
2379 * - len: in dwords, always dumped.
2380 * - width: dumped if it's not zero.
2381 * - packed: dumped only if it's not false.
2382 * - mem_group: always dumped.
2383 * - is_storm: true only if the memory is related to a Storm.
2384 * - storm_letter: valid only if is_storm is true.
2387 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2395 const char *mem_group, char storm_letter)
2403 "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2410 /* Dump section header */
2411 offset += qed_dump_section_hdr(dump_buf + offset,
2412 dump, "grc_mem", num_params);
2417 strcpy(buf, "?STORM_");
2418 buf[0] = storm_letter;
2419 strcpy(buf + strlen(buf), name);
2424 offset += qed_dump_str_param(dump_buf + offset,
2428 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2430 offset += qed_dump_num_param(dump_buf + offset,
2431 dump, "addr", addr_in_bytes);
2435 offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2437 /* Dump bit width */
2439 offset += qed_dump_num_param(dump_buf + offset,
2440 dump, "width", bit_width);
2444 offset += qed_dump_num_param(dump_buf + offset,
2449 strcpy(buf, "?STORM_");
2450 buf[0] = storm_letter;
2451 strcpy(buf + strlen(buf), mem_group);
2453 strcpy(buf, mem_group);
2456 offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2461 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2462 * Returns the dumped size in dwords.
2463 * The addr and len arguments are specified in dwords.
2465 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2466 struct qed_ptt *p_ptt,
2475 const char *mem_group, char storm_letter)
2479 offset += qed_grc_dump_mem_hdr(p_hwfn,
2486 packed, mem_group, storm_letter);
2487 offset += qed_grc_dump_addr_range(p_hwfn,
2490 dump, addr, len, wide_bus,
2491 SPLIT_TYPE_NONE, 0);
2496 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
2497 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2498 struct qed_ptt *p_ptt,
2499 struct virt_mem_desc input_mems_arr,
2500 u32 *dump_buf, bool dump)
2502 u32 i, offset = 0, input_offset = 0;
2503 bool mode_match = true;
2505 while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
2506 const struct dbg_dump_cond_hdr *cond_hdr;
2507 u16 modes_buf_offset;
2512 (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
2514 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2516 /* Check required mode */
2517 eval_mode = GET_FIELD(cond_hdr->mode.data,
2518 DBG_MODE_HDR_EVAL_MODE) > 0;
2521 GET_FIELD(cond_hdr->mode.data,
2522 DBG_MODE_HDR_MODES_BUF_OFFSET);
2523 mode_match = qed_is_mode_match(p_hwfn,
2528 input_offset += cond_hdr->data_size;
2532 for (i = 0; i < num_entries;
2533 i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2534 const struct dbg_dump_mem *mem =
2535 (const struct dbg_dump_mem *)((u32 *)
2538 const struct dbg_block *block;
2539 char storm_letter = 0;
2540 u32 mem_addr, mem_len;
2544 mem_group_id = GET_FIELD(mem->dword0,
2545 DBG_DUMP_MEM_MEM_GROUP_ID);
2546 if (mem_group_id >= MEM_GROUPS_NUM) {
2547 DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2551 if (!qed_grc_is_mem_included(p_hwfn,
2557 mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
2558 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
2559 mem_wide_bus = GET_FIELD(mem->dword1,
2560 DBG_DUMP_MEM_WIDE_BUS);
2562 block = get_dbg_block(p_hwfn,
2563 cond_hdr->block_id);
2565 /* If memory is associated with Storm,
2566 * update storm details
2568 if (block->associated_storm_letter)
2569 storm_letter = block->associated_storm_letter;
2572 offset += qed_grc_dump_mem(p_hwfn,
2582 s_mem_group_names[mem_group_id],
2590 /* Dumps GRC memories according to the input array dump_mem.
2591 * Returns the dumped size in dwords.
2593 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2594 struct qed_ptt *p_ptt,
2595 u32 *dump_buf, bool dump)
2597 struct virt_mem_desc *dbg_buf =
2598 &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
2599 u32 offset = 0, input_offset = 0;
2601 while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2602 const struct dbg_dump_split_hdr *split_hdr;
2603 struct virt_mem_desc curr_input_mems_arr;
2604 enum init_split_types split_type;
2605 u32 split_data_size;
2608 (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
2610 split_type = GET_FIELD(split_hdr->hdr,
2611 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2612 split_data_size = GET_FIELD(split_hdr->hdr,
2613 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2614 curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
2615 curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
2617 if (split_type == SPLIT_TYPE_NONE)
2618 offset += qed_grc_dump_mem_entries(p_hwfn,
2620 curr_input_mems_arr,
2625 "Dumping split memories is currently not supported\n");
2627 input_offset += split_data_size;
2633 /* Dumps GRC context data for the specified Storm.
2634 * Returns the dumped size in dwords.
2635 * The lid_size argument is specified in quad-regs.
2637 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2638 struct qed_ptt *p_ptt,
2643 enum cm_ctx_types ctx_type, u8 storm_id)
2645 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2646 struct storm_defs *storm = &s_storm_defs[storm_id];
2647 u32 i, lid, lid_size, total_size;
2648 u32 rd_reg_addr, offset = 0;
2650 /* Convert quad-regs to dwords */
2651 lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
2656 total_size = num_lids * lid_size;
2658 offset += qed_grc_dump_mem_hdr(p_hwfn,
2665 false, name, storm->letter);
2668 return offset + total_size;
2670 rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
2672 /* Dump context data */
2673 for (lid = 0; lid < num_lids; lid++) {
2674 for (i = 0; i < lid_size; i++) {
2676 p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
2677 offset += qed_grc_dump_addr_range(p_hwfn,
2684 SPLIT_TYPE_NONE, 0);
2691 /* Dumps GRC contexts. Returns the dumped size in dwords. */
2692 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2693 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2698 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2699 if (!qed_grc_is_storm_included(p_hwfn,
2700 (enum dbg_storms)storm_id))
2703 /* Dump Conn AG context size */
2704 offset += qed_grc_dump_ctx_data(p_hwfn,
2710 CM_CTX_CONN_AG, storm_id);
2712 /* Dump Conn ST context size */
2713 offset += qed_grc_dump_ctx_data(p_hwfn,
2719 CM_CTX_CONN_ST, storm_id);
2721 /* Dump Task AG context size */
2722 offset += qed_grc_dump_ctx_data(p_hwfn,
2728 CM_CTX_TASK_AG, storm_id);
2730 /* Dump Task ST context size */
2731 offset += qed_grc_dump_ctx_data(p_hwfn,
2737 CM_CTX_TASK_ST, storm_id);
2743 #define VFC_STATUS_RESP_READY_BIT 0
2744 #define VFC_STATUS_BUSY_BIT 1
2745 #define VFC_STATUS_SENDING_CMD_BIT 2
2747 #define VFC_POLLING_DELAY_MS 1
2748 #define VFC_POLLING_COUNT 20
2750 /* Reads data from VFC. Returns the number of dwords read (0 on error).
2751 * Sizes are specified in dwords.
2753 static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
2754 struct qed_ptt *p_ptt,
2755 struct storm_defs *storm,
2760 u32 resp_size, u32 *dump_buf)
2762 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2763 u32 vfc_status, polling_ms, polling_count = 0, i;
2764 u32 reg_addr, sem_base;
2765 bool is_ready = false;
2767 sem_base = storm->sem_fast_mem_addr;
2768 polling_ms = VFC_POLLING_DELAY_MS *
2769 s_hw_type_defs[dev_data->hw_type].delay_factor;
2771 /* Write VFC command */
2774 sem_base + SEM_FAST_REG_VFC_DATA_WR,
2775 cmd_data, cmd_size);
2777 /* Write VFC address */
2780 sem_base + SEM_FAST_REG_VFC_ADDR,
2781 addr_data, addr_size);
2784 for (i = 0; i < resp_size; i++) {
2785 /* Poll until ready */
2787 reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
2788 qed_grc_dump_addr_range(p_hwfn,
2792 BYTES_TO_DWORDS(reg_addr),
2794 false, SPLIT_TYPE_NONE, 0);
2795 is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
2798 if (polling_count++ == VFC_POLLING_COUNT)
2803 } while (!is_ready);
2805 reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
2806 qed_grc_dump_addr_range(p_hwfn,
2810 BYTES_TO_DWORDS(reg_addr),
2811 1, false, SPLIT_TYPE_NONE, 0);
2817 /* Dump VFC CAM. Returns the dumped size in dwords. */
2818 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
2819 struct qed_ptt *p_ptt,
2820 u32 *dump_buf, bool dump, u8 storm_id)
2822 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
2823 struct storm_defs *storm = &s_storm_defs[storm_id];
2824 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
2825 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
2826 u32 row, offset = 0;
2828 offset += qed_grc_dump_mem_hdr(p_hwfn,
2835 false, "vfc_cam", storm->letter);
2838 return offset + total_size;
2840 /* Prepare CAM address */
2841 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
2843 /* Read VFC CAM data */
2844 for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
2845 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
2846 offset += qed_grc_dump_read_from_vfc(p_hwfn,
2852 VFC_CAM_ADDR_DWORDS,
2853 VFC_CAM_RESP_DWORDS,
2860 /* Dump VFC RAM. Returns the dumped size in dwords. */
2861 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
2862 struct qed_ptt *p_ptt,
2865 u8 storm_id, struct vfc_ram_defs *ram_defs)
2867 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
2868 struct storm_defs *storm = &s_storm_defs[storm_id];
2869 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
2870 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
2871 u32 row, offset = 0;
2873 offset += qed_grc_dump_mem_hdr(p_hwfn,
2881 ram_defs->type_name,
2885 return offset + total_size;
2887 /* Prepare RAM address */
2888 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
2890 /* Read VFC RAM data */
2891 for (row = ram_defs->base_row;
2892 row < ram_defs->base_row + ram_defs->num_rows; row++) {
2893 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
2894 offset += qed_grc_dump_read_from_vfc(p_hwfn,
2900 VFC_RAM_ADDR_DWORDS,
2901 VFC_RAM_RESP_DWORDS,
2908 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
2909 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
2910 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2915 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2916 if (!qed_grc_is_storm_included(p_hwfn,
2917 (enum dbg_storms)storm_id) ||
2918 !s_storm_defs[storm_id].has_vfc)
2922 offset += qed_grc_dump_vfc_cam(p_hwfn,
2928 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
2929 offset += qed_grc_dump_vfc_ram(p_hwfn,
2934 &s_vfc_ram_defs[i]);
2940 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
2941 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
2942 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2944 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2948 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
2949 u32 rss_addr, num_entries, total_dwords;
2950 struct rss_mem_defs *rss_defs;
2951 u32 addr, num_dwords_to_read;
2954 rss_defs = &s_rss_mem_defs[rss_mem_id];
2955 rss_addr = rss_defs->addr;
2956 num_entries = rss_defs->num_entries[dev_data->chip_id];
2957 total_dwords = (num_entries * rss_defs->entry_width) / 32;
2958 packed = (rss_defs->entry_width == 16);
2960 offset += qed_grc_dump_mem_hdr(p_hwfn,
2966 rss_defs->entry_width,
2968 rss_defs->type_name, 0);
2972 offset += total_dwords;
2976 addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
2977 while (total_dwords) {
2978 num_dwords_to_read = min_t(u32,
2979 RSS_REG_RSS_RAM_DATA_SIZE,
2981 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
2982 offset += qed_grc_dump_addr_range(p_hwfn,
2989 SPLIT_TYPE_NONE, 0);
2990 total_dwords -= num_dwords_to_read;
2998 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
2999 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3000 struct qed_ptt *p_ptt,
3001 u32 *dump_buf, bool dump, u8 big_ram_id)
3003 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3004 u32 block_size, ram_size, offset = 0, reg_val, i;
3005 char mem_name[12] = "???_BIG_RAM";
3006 char type_name[8] = "???_RAM";
3007 struct big_ram_defs *big_ram;
3009 big_ram = &s_big_ram_defs[big_ram_id];
3010 ram_size = big_ram->ram_size[dev_data->chip_id];
3012 reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3013 block_size = reg_val &
3014 BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3017 strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3018 strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3020 /* Dump memory header */
3021 offset += qed_grc_dump_mem_hdr(p_hwfn,
3028 false, type_name, 0);
3030 /* Read and dump Big RAM data */
3032 return offset + ram_size;
3035 for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3039 qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3040 addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3041 len = BRB_REG_BIG_RAM_DATA_SIZE;
3042 offset += qed_grc_dump_addr_range(p_hwfn,
3048 false, SPLIT_TYPE_NONE, 0);
3054 /* Dumps MCP scratchpad. Returns the dumped size in dwords. */
3055 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3056 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3058 bool block_enable[MAX_BLOCK_ID] = { 0 };
3059 u32 offset = 0, addr;
3060 bool halted = false;
3063 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3064 halted = !qed_mcp_halt(p_hwfn, p_ptt);
3066 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3069 /* Dump MCP scratchpad */
3070 offset += qed_grc_dump_mem(p_hwfn,
3075 BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3076 MCP_REG_SCRATCH_SIZE,
3077 false, 0, false, "MCP", 0);
3079 /* Dump MCP cpu_reg_file */
3080 offset += qed_grc_dump_mem(p_hwfn,
3085 BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3086 MCP_REG_CPU_REG_FILE_SIZE,
3087 false, 0, false, "MCP", 0);
3089 /* Dump MCP registers */
3090 block_enable[BLOCK_MCP] = true;
3091 offset += qed_grc_dump_registers(p_hwfn,
3094 dump, block_enable, "MCP");
3096 /* Dump required non-MCP registers */
3097 offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3098 dump, 1, SPLIT_TYPE_NONE, 0,
3100 addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3101 offset += qed_grc_dump_reg_entry(p_hwfn,
3107 false, SPLIT_TYPE_NONE, 0);
3110 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3111 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3116 /* Dumps the tbus indirect memory for all PHYs.
3117 * Returns the dumped size in dwords.
3119 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3120 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3122 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3126 for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3127 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3128 struct phy_defs *phy_defs;
3131 phy_defs = &s_phy_defs[phy_id];
3132 addr_lo_addr = phy_defs->base_addr +
3133 phy_defs->tbus_addr_lo_addr;
3134 addr_hi_addr = phy_defs->base_addr +
3135 phy_defs->tbus_addr_hi_addr;
3136 data_lo_addr = phy_defs->base_addr +
3137 phy_defs->tbus_data_lo_addr;
3138 data_hi_addr = phy_defs->base_addr +
3139 phy_defs->tbus_data_hi_addr;
3141 if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3142 phy_defs->phy_name) < 0)
3144 "Unexpected debug error: invalid PHY memory name\n");
3146 offset += qed_grc_dump_mem_hdr(p_hwfn,
3151 PHY_DUMP_SIZE_DWORDS,
3152 16, true, mem_name, 0);
3155 offset += PHY_DUMP_SIZE_DWORDS;
3159 bytes_buf = (u8 *)(dump_buf + offset);
3160 for (tbus_hi_offset = 0;
3161 tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3163 qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3164 for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3167 p_ptt, addr_lo_addr, tbus_lo_offset);
3168 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3171 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3177 offset += PHY_DUMP_SIZE_DWORDS;
3183 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3184 struct qed_ptt *p_ptt,
3186 u32 *nvram_offset_bytes,
3187 u32 *nvram_size_bytes);
3189 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
3190 struct qed_ptt *p_ptt,
3191 u32 nvram_offset_bytes,
3192 u32 nvram_size_bytes, u32 *ret_buf);
3194 /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
3195 static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
3196 struct qed_ptt *p_ptt,
3197 u32 *dump_buf, bool dump)
3199 u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
3200 u32 hw_dump_size_dwords = 0, offset = 0;
3201 enum dbg_status status;
3203 /* Read HW dump image from NVRAM */
3204 status = qed_find_nvram_image(p_hwfn,
3206 NVM_TYPE_HW_DUMP_OUT,
3207 &hw_dump_offset_bytes,
3208 &hw_dump_size_bytes);
3209 if (status != DBG_STATUS_OK)
3212 hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
3214 /* Dump HW dump image section */
3215 offset += qed_dump_section_hdr(dump_buf + offset,
3216 dump, "mcp_hw_dump", 1);
3217 offset += qed_dump_num_param(dump_buf + offset,
3218 dump, "size", hw_dump_size_dwords);
3220 /* Read MCP HW dump image into dump buffer */
3221 if (dump && hw_dump_size_dwords) {
3222 status = qed_nvram_read(p_hwfn,
3224 hw_dump_offset_bytes,
3225 hw_dump_size_bytes, dump_buf + offset);
3226 if (status != DBG_STATUS_OK) {
3228 "Failed to read MCP HW Dump image from NVRAM\n");
3232 offset += hw_dump_size_dwords;
3237 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3238 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3239 struct qed_ptt *p_ptt,
3240 u32 *dump_buf, bool dump)
3242 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3243 u32 block_id, line_id, offset = 0, addr, len;
3245 /* Don't dump static debug if a debug bus recording is in progress */
3246 if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3250 /* Disable debug bus in all blocks */
3251 qed_bus_disable_blocks(p_hwfn, p_ptt);
3253 qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3255 p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
3257 p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3258 qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3259 qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3262 /* Dump all static debug lines for each relevant block */
3263 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3264 const struct dbg_block_chip *block_per_chip;
3265 const struct dbg_block *block;
3266 bool is_removed, has_dbg_bus;
3267 u16 modes_buf_offset;
3271 qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
3272 is_removed = GET_FIELD(block_per_chip->flags,
3273 DBG_BLOCK_CHIP_IS_REMOVED);
3274 has_dbg_bus = GET_FIELD(block_per_chip->flags,
3275 DBG_BLOCK_CHIP_HAS_DBG_BUS);
3277 /* read+clear for NWS parity is not working, skip NWS block */
3278 if (block_id == BLOCK_NWS)
3281 if (!is_removed && has_dbg_bus &&
3282 GET_FIELD(block_per_chip->dbg_bus_mode.data,
3283 DBG_MODE_HDR_EVAL_MODE) > 0) {
3285 GET_FIELD(block_per_chip->dbg_bus_mode.data,
3286 DBG_MODE_HDR_MODES_BUF_OFFSET);
3287 if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
3288 has_dbg_bus = false;
3291 if (is_removed || !has_dbg_bus)
3294 block_dwords = NUM_DBG_LINES(block_per_chip) *
3295 STATIC_DEBUG_LINE_DWORDS;
3297 /* Dump static section params */
3298 block = get_dbg_block(p_hwfn, (enum block_id)block_id);
3299 offset += qed_grc_dump_mem_hdr(p_hwfn,
3305 32, false, "STATIC", 0);
3308 offset += block_dwords;
3312 /* If all lines are invalid - dump zeros */
3313 if (dev_data->block_in_reset[block_id]) {
3314 memset(dump_buf + offset, 0,
3315 DWORDS_TO_BYTES(block_dwords));
3316 offset += block_dwords;
3320 /* Enable block's client */
3321 qed_bus_enable_clients(p_hwfn,
3323 BIT(block_per_chip->dbg_client_id));
3325 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3326 len = STATIC_DEBUG_LINE_DWORDS;
3327 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
3329 /* Configure debug line ID */
3330 qed_bus_config_dbg_line(p_hwfn,
3332 (enum block_id)block_id,
3333 (u8)line_id, 0xf, 0, 0, 0);
3335 /* Read debug line info */
3336 offset += qed_grc_dump_addr_range(p_hwfn,
3342 true, SPLIT_TYPE_NONE,
3346 /* Disable block's client and debug output */
3347 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3348 qed_bus_config_dbg_line(p_hwfn, p_ptt,
3349 (enum block_id)block_id, 0, 0, 0, 0, 0);
3353 qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3354 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3360 /* Performs GRC Dump to the specified buffer.
3361 * Returns the dumped size in dwords.
3363 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3364 struct qed_ptt *p_ptt,
3366 bool dump, u32 *num_dumped_dwords)
3368 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3369 u32 dwords_read, offset = 0;
3370 bool parities_masked = false;
3373 *num_dumped_dwords = 0;
3374 dev_data->num_regs_read = 0;
3376 /* Update reset state */
3378 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3380 /* Dump global params */
3381 offset += qed_dump_common_global_params(p_hwfn,
3383 dump_buf + offset, dump, 4);
3384 offset += qed_dump_str_param(dump_buf + offset,
3385 dump, "dump-type", "grc-dump");
3386 offset += qed_dump_num_param(dump_buf + offset,
3390 offset += qed_dump_num_param(dump_buf + offset,
3394 offset += qed_dump_num_param(dump_buf + offset,
3395 dump, "num-ports", dev_data->num_ports);
3397 /* Dump reset registers (dumped before taking blocks out of reset ) */
3398 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3399 offset += qed_grc_dump_reset_regs(p_hwfn,
3401 dump_buf + offset, dump);
3403 /* Take all blocks out of reset (using reset registers) */
3405 qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
3406 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3409 /* Disable all parities using MFW command */
3411 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3412 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3413 if (!parities_masked) {
3415 "Failed to mask parities using MFW\n");
3416 if (qed_grc_get_param
3417 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3418 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3422 /* Dump modified registers (dumped before modifying them) */
3423 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3424 offset += qed_grc_dump_modified_regs(p_hwfn,
3426 dump_buf + offset, dump);
3430 (qed_grc_is_included(p_hwfn,
3431 DBG_GRC_PARAM_DUMP_IOR) ||
3432 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3433 qed_grc_stall_storms(p_hwfn, p_ptt, true);
3436 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3437 bool block_enable[MAX_BLOCK_ID];
3439 /* Dump all blocks except MCP */
3440 for (i = 0; i < MAX_BLOCK_ID; i++)
3441 block_enable[i] = true;
3442 block_enable[BLOCK_MCP] = false;
3443 offset += qed_grc_dump_registers(p_hwfn,
3448 block_enable, NULL);
3450 /* Dump special registers */
3451 offset += qed_grc_dump_special_regs(p_hwfn,
3453 dump_buf + offset, dump);
3457 offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3460 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3461 offset += qed_grc_dump_mcp(p_hwfn,
3462 p_ptt, dump_buf + offset, dump);
3465 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3466 offset += qed_grc_dump_ctx(p_hwfn,
3467 p_ptt, dump_buf + offset, dump);
3469 /* Dump RSS memories */
3470 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3471 offset += qed_grc_dump_rss(p_hwfn,
3472 p_ptt, dump_buf + offset, dump);
3475 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3476 if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3477 offset += qed_grc_dump_big_ram(p_hwfn,
3483 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
3484 dwords_read = qed_grc_dump_vfc(p_hwfn,
3485 p_ptt, dump_buf + offset, dump);
3486 offset += dwords_read;
3488 return DBG_STATUS_VFC_READ_ERROR;
3492 if (qed_grc_is_included(p_hwfn,
3493 DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3494 CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
3495 offset += qed_grc_dump_phy(p_hwfn,
3496 p_ptt, dump_buf + offset, dump);
3498 /* Dump MCP HW Dump */
3499 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
3500 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
3501 offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
3503 dump_buf + offset, dump);
3505 /* Dump static debug data (only if not during debug bus recording) */
3506 if (qed_grc_is_included(p_hwfn,
3507 DBG_GRC_PARAM_DUMP_STATIC) &&
3508 (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
3509 offset += qed_grc_dump_static_debug(p_hwfn,
3511 dump_buf + offset, dump);
3513 /* Dump last section */
3514 offset += qed_dump_last_section(dump_buf, offset, dump);
3517 /* Unstall storms */
3518 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3519 qed_grc_stall_storms(p_hwfn, p_ptt, false);
3521 /* Clear parity status */
3522 qed_grc_clear_all_prty(p_hwfn, p_ptt);
3524 /* Enable all parities using MFW command */
3525 if (parities_masked)
3526 qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3529 *num_dumped_dwords = offset;
3531 return DBG_STATUS_OK;
3534 /* Writes the specified failing Idle Check rule to the specified buffer.
3535 * Returns the dumped size in dwords.
3537 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3538 struct qed_ptt *p_ptt,
3543 const struct dbg_idle_chk_rule *rule,
3544 u16 fail_entry_id, u32 *cond_reg_values)
3546 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3547 const struct dbg_idle_chk_cond_reg *cond_regs;
3548 const struct dbg_idle_chk_info_reg *info_regs;
3549 u32 i, next_reg_offset = 0, offset = 0;
3550 struct dbg_idle_chk_result_hdr *hdr;
3551 const union dbg_idle_chk_reg *regs;
3554 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
3555 regs = (const union dbg_idle_chk_reg *)
3556 p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3558 cond_regs = ®s[0].cond_reg;
3559 info_regs = ®s[rule->num_cond_regs].info_reg;
3561 /* Dump rule data */
3563 memset(hdr, 0, sizeof(*hdr));
3564 hdr->rule_id = rule_id;
3565 hdr->mem_entry_id = fail_entry_id;
3566 hdr->severity = rule->severity;
3567 hdr->num_dumped_cond_regs = rule->num_cond_regs;
3570 offset += IDLE_CHK_RESULT_HDR_DWORDS;
3572 /* Dump condition register values */
3573 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3574 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3575 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3578 (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
3580 /* Write register header */
3582 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3587 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3588 memset(reg_hdr, 0, sizeof(*reg_hdr));
3589 reg_hdr->start_entry = reg->start_entry;
3590 reg_hdr->size = reg->entry_size;
3591 SET_FIELD(reg_hdr->data,
3592 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3593 reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
3594 SET_FIELD(reg_hdr->data,
3595 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3597 /* Write register values */
3598 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
3599 dump_buf[offset] = cond_reg_values[next_reg_offset];
3602 /* Dump info register values */
3603 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3604 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3607 /* Check if register's block is in reset */
3609 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3613 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3614 if (block_id >= MAX_BLOCK_ID) {
3615 DP_NOTICE(p_hwfn, "Invalid block_id\n");
3619 if (!dev_data->block_in_reset[block_id]) {
3620 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3621 bool wide_bus, eval_mode, mode_match = true;
3622 u16 modes_buf_offset;
3625 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
3626 (dump_buf + offset);
3629 eval_mode = GET_FIELD(reg->mode.data,
3630 DBG_MODE_HDR_EVAL_MODE) > 0;
3633 GET_FIELD(reg->mode.data,
3634 DBG_MODE_HDR_MODES_BUF_OFFSET);
3636 qed_is_mode_match(p_hwfn,
3643 addr = GET_FIELD(reg->data,
3644 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3645 wide_bus = GET_FIELD(reg->data,
3646 DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
3648 /* Write register header */
3649 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3650 hdr->num_dumped_info_regs++;
3651 memset(reg_hdr, 0, sizeof(*reg_hdr));
3652 reg_hdr->size = reg->size;
3653 SET_FIELD(reg_hdr->data,
3654 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3655 rule->num_cond_regs + reg_id);
3657 /* Write register values */
3658 offset += qed_grc_dump_addr_range(p_hwfn,
3663 reg->size, wide_bus,
3664 SPLIT_TYPE_NONE, 0);
3671 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3673 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3674 u32 *dump_buf, bool dump,
3675 const struct dbg_idle_chk_rule *input_rules,
3676 u32 num_input_rules, u32 *num_failing_rules)
3678 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3679 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3684 *num_failing_rules = 0;
3686 for (i = 0; i < num_input_rules; i++) {
3687 const struct dbg_idle_chk_cond_reg *cond_regs;
3688 const struct dbg_idle_chk_rule *rule;
3689 const union dbg_idle_chk_reg *regs;
3690 u16 num_reg_entries = 1;
3691 bool check_rule = true;
3692 const u32 *imm_values;
3694 rule = &input_rules[i];
3695 regs = (const union dbg_idle_chk_reg *)
3696 p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3698 cond_regs = ®s[0].cond_reg;
3700 (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
3703 /* Check if all condition register blocks are out of reset, and
3704 * find maximal number of entries (all condition registers that
3705 * are memories must have the same size, which is > 1).
3707 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3710 GET_FIELD(cond_regs[reg_id].data,
3711 DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3713 if (block_id >= MAX_BLOCK_ID) {
3714 DP_NOTICE(p_hwfn, "Invalid block_id\n");
3718 check_rule = !dev_data->block_in_reset[block_id];
3719 if (cond_regs[reg_id].num_entries > num_reg_entries)
3720 num_reg_entries = cond_regs[reg_id].num_entries;
3723 if (!check_rule && dump)
3727 u32 entry_dump_size =
3728 qed_idle_chk_dump_failure(p_hwfn,
3737 offset += num_reg_entries * entry_dump_size;
3738 (*num_failing_rules) += num_reg_entries;
3742 /* Go over all register entries (number of entries is the same
3743 * for all condition registers).
3745 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3746 u32 next_reg_offset = 0;
3748 /* Read current entry of all condition registers */
3749 for (reg_id = 0; reg_id < rule->num_cond_regs;
3751 const struct dbg_idle_chk_cond_reg *reg =
3753 u32 padded_entry_size, addr;
3756 /* Find GRC address (if it's a memory, the
3757 * address of the specific entry is calculated).
3759 addr = GET_FIELD(reg->data,
3760 DBG_IDLE_CHK_COND_REG_ADDRESS);
3762 GET_FIELD(reg->data,
3763 DBG_IDLE_CHK_COND_REG_WIDE_BUS);
3764 if (reg->num_entries > 1 ||
3765 reg->start_entry > 0) {
3767 reg->entry_size > 1 ?
3768 roundup_pow_of_two(reg->entry_size) :
3770 addr += (reg->start_entry + entry_id) *
3774 /* Read registers */
3775 if (next_reg_offset + reg->entry_size >=
3776 IDLE_CHK_MAX_ENTRIES_SIZE) {
3778 "idle check registers entry is too large\n");
3783 qed_grc_dump_addr_range(p_hwfn, p_ptt,
3789 SPLIT_TYPE_NONE, 0);
3792 /* Call rule condition function.
3793 * If returns true, it's a failure.
3795 if ((*cond_arr[rule->cond_id]) (cond_reg_values,
3797 offset += qed_idle_chk_dump_failure(p_hwfn,
3805 (*num_failing_rules)++;
3813 /* Performs Idle Check Dump to the specified buffer.
3814 * Returns the dumped size in dwords.
3816 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
3817 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3819 struct virt_mem_desc *dbg_buf =
3820 &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
3821 u32 num_failing_rules_offset, offset = 0,
3822 input_offset = 0, num_failing_rules = 0;
3824 /* Dump global params - 1 must match below amount of params */
3825 offset += qed_dump_common_global_params(p_hwfn,
3827 dump_buf + offset, dump, 1);
3828 offset += qed_dump_str_param(dump_buf + offset,
3829 dump, "dump-type", "idle-chk");
3831 /* Dump idle check section header with a single parameter */
3832 offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
3833 num_failing_rules_offset = offset;
3834 offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
3836 while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
3837 const struct dbg_idle_chk_cond_hdr *cond_hdr =
3838 (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
3840 bool eval_mode, mode_match = true;
3841 u32 curr_failing_rules;
3842 u16 modes_buf_offset;
3845 eval_mode = GET_FIELD(cond_hdr->mode.data,
3846 DBG_MODE_HDR_EVAL_MODE) > 0;
3849 GET_FIELD(cond_hdr->mode.data,
3850 DBG_MODE_HDR_MODES_BUF_OFFSET);
3851 mode_match = qed_is_mode_match(p_hwfn,
3856 const struct dbg_idle_chk_rule *rule =
3857 (const struct dbg_idle_chk_rule *)((u32 *)
3860 u32 num_input_rules =
3861 cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
3863 qed_idle_chk_dump_rule_entries(p_hwfn,
3870 &curr_failing_rules);
3871 num_failing_rules += curr_failing_rules;
3874 input_offset += cond_hdr->data_size;
3877 /* Overwrite num_rules parameter */
3879 qed_dump_num_param(dump_buf + num_failing_rules_offset,
3880 dump, "num_rules", num_failing_rules);
3882 /* Dump last section */
3883 offset += qed_dump_last_section(dump_buf, offset, dump);
3888 /* Finds the meta data image in NVRAM */
3889 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3890 struct qed_ptt *p_ptt,
3892 u32 *nvram_offset_bytes,
3893 u32 *nvram_size_bytes)
3895 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
3896 struct mcp_file_att file_att;
3899 /* Call NVRAM get file command */
3900 nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
3902 DRV_MSG_CODE_NVM_GET_FILE_ATT,
3906 &ret_txn_size, (u32 *)&file_att);
3908 /* Check response */
3910 (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3911 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
3913 /* Update return values */
3914 *nvram_offset_bytes = file_att.nvm_start_addr;
3915 *nvram_size_bytes = file_att.len;
3919 "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
3920 image_type, *nvram_offset_bytes, *nvram_size_bytes);
3922 /* Check alignment */
3923 if (*nvram_size_bytes & 0x3)
3924 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
3926 return DBG_STATUS_OK;
3929 /* Reads data from NVRAM */
3930 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
3931 struct qed_ptt *p_ptt,
3932 u32 nvram_offset_bytes,
3933 u32 nvram_size_bytes, u32 *ret_buf)
3935 u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
3936 s32 bytes_left = nvram_size_bytes;
3937 u32 read_offset = 0, param = 0;
3941 "nvram_read: reading image of size %d bytes from NVRAM\n",
3947 MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
3949 /* Call NVRAM read command */
3950 SET_MFW_FIELD(param,
3951 DRV_MB_PARAM_NVM_OFFSET,
3952 nvram_offset_bytes + read_offset);
3953 SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
3954 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3955 DRV_MSG_CODE_NVM_READ_NVRAM, param,
3957 &ret_mcp_param, &ret_read_size,
3958 (u32 *)((u8 *)ret_buf + read_offset)))
3959 return DBG_STATUS_NVRAM_READ_FAILED;
3961 /* Check response */
3962 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3963 return DBG_STATUS_NVRAM_READ_FAILED;
3965 /* Update read offset */
3966 read_offset += ret_read_size;
3967 bytes_left -= ret_read_size;
3968 } while (bytes_left > 0);
3970 return DBG_STATUS_OK;
3973 /* Get info on the MCP Trace data in the scratchpad:
3974 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
3975 * - trace_data_size (OUT): trace data size in bytes (without the header)
3977 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
3978 struct qed_ptt *p_ptt,
3979 u32 *trace_data_grc_addr,
3980 u32 *trace_data_size)
3982 u32 spad_trace_offsize, signature;
3984 /* Read trace section offsize structure from MCP scratchpad */
3985 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
3987 /* Extract trace section address from offsize (in scratchpad) */
3988 *trace_data_grc_addr =
3989 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
3991 /* Read signature from MCP trace section */
3992 signature = qed_rd(p_hwfn, p_ptt,
3993 *trace_data_grc_addr +
3994 offsetof(struct mcp_trace, signature));
3996 if (signature != MFW_TRACE_SIGNATURE)
3997 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
3999 /* Read trace size from MCP trace section */
4000 *trace_data_size = qed_rd(p_hwfn,
4002 *trace_data_grc_addr +
4003 offsetof(struct mcp_trace, size));
4005 return DBG_STATUS_OK;
4008 /* Reads MCP trace meta data image from NVRAM
4009 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4010 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4011 * loaded from file).
4012 * - trace_meta_size (OUT): size in bytes of the trace meta data.
4014 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4015 struct qed_ptt *p_ptt,
4016 u32 trace_data_size_bytes,
4017 u32 *running_bundle_id,
4018 u32 *trace_meta_offset,
4019 u32 *trace_meta_size)
4021 u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4023 /* Read MCP trace section offsize structure from MCP scratchpad */
4024 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4026 /* Find running bundle ID */
4028 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4029 QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4030 *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4031 if (*running_bundle_id > 1)
4032 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4034 /* Find image in NVRAM */
4036 (*running_bundle_id ==
4037 DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4038 return qed_find_nvram_image(p_hwfn,
4041 trace_meta_offset, trace_meta_size);
4044 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4045 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4046 struct qed_ptt *p_ptt,
4047 u32 nvram_offset_in_bytes,
4048 u32 size_in_bytes, u32 *buf)
4050 u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4051 enum dbg_status status;
4054 /* Read meta data from NVRAM */
4055 status = qed_nvram_read(p_hwfn,
4057 nvram_offset_in_bytes, size_in_bytes, buf);
4058 if (status != DBG_STATUS_OK)
4061 /* Extract and check first signature */
4062 signature = qed_read_unaligned_dword(byte_buf);
4063 byte_buf += sizeof(signature);
4064 if (signature != NVM_MAGIC_VALUE)
4065 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4067 /* Extract number of modules */
4068 modules_num = *(byte_buf++);
4070 /* Skip all modules */
4071 for (i = 0; i < modules_num; i++) {
4072 module_len = *(byte_buf++);
4073 byte_buf += module_len;
4076 /* Extract and check second signature */
4077 signature = qed_read_unaligned_dword(byte_buf);
4078 byte_buf += sizeof(signature);
4079 if (signature != NVM_MAGIC_VALUE)
4080 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4082 return DBG_STATUS_OK;
4085 /* Dump MCP Trace */
4086 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4087 struct qed_ptt *p_ptt,
4089 bool dump, u32 *num_dumped_dwords)
4091 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4092 u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4093 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4094 enum dbg_status status;
4098 *num_dumped_dwords = 0;
4100 use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4102 /* Get trace data info */
4103 status = qed_mcp_trace_get_data_info(p_hwfn,
4105 &trace_data_grc_addr,
4106 &trace_data_size_bytes);
4107 if (status != DBG_STATUS_OK)
4110 /* Dump global params */
4111 offset += qed_dump_common_global_params(p_hwfn,
4113 dump_buf + offset, dump, 1);
4114 offset += qed_dump_str_param(dump_buf + offset,
4115 dump, "dump-type", "mcp-trace");
4117 /* Halt MCP while reading from scratchpad so the read data will be
4118 * consistent. if halt fails, MCP trace is taken anyway, with a small
4119 * risk that it may be corrupt.
4121 if (dump && use_mfw) {
4122 halted = !qed_mcp_halt(p_hwfn, p_ptt);
4124 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4127 /* Find trace data size */
4128 trace_data_size_dwords =
4129 DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4132 /* Dump trace data section header and param */
4133 offset += qed_dump_section_hdr(dump_buf + offset,
4134 dump, "mcp_trace_data", 1);
4135 offset += qed_dump_num_param(dump_buf + offset,
4136 dump, "size", trace_data_size_dwords);
4138 /* Read trace data from scratchpad into dump buffer */
4139 offset += qed_grc_dump_addr_range(p_hwfn,
4143 BYTES_TO_DWORDS(trace_data_grc_addr),
4144 trace_data_size_dwords, false,
4145 SPLIT_TYPE_NONE, 0);
4147 /* Resume MCP (only if halt succeeded) */
4148 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4149 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4151 /* Dump trace meta section header */
4152 offset += qed_dump_section_hdr(dump_buf + offset,
4153 dump, "mcp_trace_meta", 1);
4155 /* If MCP Trace meta size parameter was set, use it.
4156 * Otherwise, read trace meta.
4157 * trace_meta_size_bytes is dword-aligned.
4159 trace_meta_size_bytes =
4160 qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4161 if ((!trace_meta_size_bytes || dump) && use_mfw)
4162 status = qed_mcp_trace_get_meta_info(p_hwfn,
4164 trace_data_size_bytes,
4166 &trace_meta_offset_bytes,
4167 &trace_meta_size_bytes);
4168 if (status == DBG_STATUS_OK)
4169 trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4171 /* Dump trace meta size param */
4172 offset += qed_dump_num_param(dump_buf + offset,
4173 dump, "size", trace_meta_size_dwords);
4175 /* Read trace meta image into dump buffer */
4176 if (dump && trace_meta_size_dwords)
4177 status = qed_mcp_trace_read_meta(p_hwfn,
4179 trace_meta_offset_bytes,
4180 trace_meta_size_bytes,
4182 if (status == DBG_STATUS_OK)
4183 offset += trace_meta_size_dwords;
4185 /* Dump last section */
4186 offset += qed_dump_last_section(dump_buf, offset, dump);
4188 *num_dumped_dwords = offset;
4190 /* If no mcp access, indicate that the dump doesn't contain the meta
4193 return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4197 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4198 struct qed_ptt *p_ptt,
4200 bool dump, u32 *num_dumped_dwords)
4202 u32 dwords_read, size_param_offset, offset = 0, addr, len;
4205 *num_dumped_dwords = 0;
4207 /* Dump global params */
4208 offset += qed_dump_common_global_params(p_hwfn,
4210 dump_buf + offset, dump, 1);
4211 offset += qed_dump_str_param(dump_buf + offset,
4212 dump, "dump-type", "reg-fifo");
4214 /* Dump fifo data section header and param. The size param is 0 for
4215 * now, and is overwritten after reading the FIFO.
4217 offset += qed_dump_section_hdr(dump_buf + offset,
4218 dump, "reg_fifo_data", 1);
4219 size_param_offset = offset;
4220 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4223 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4224 * test how much data is available, except for reading it.
4226 offset += REG_FIFO_DEPTH_DWORDS;
4230 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4231 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4233 /* Pull available data from fifo. Use DMAE since this is widebus memory
4234 * and must be accessed atomically. Test for dwords_read not passing
4235 * buffer size since more entries could be added to the buffer as we are
4238 addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4239 len = REG_FIFO_ELEMENT_DWORDS;
4240 for (dwords_read = 0;
4241 fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4242 dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4243 offset += qed_grc_dump_addr_range(p_hwfn,
4249 true, SPLIT_TYPE_NONE,
4251 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4252 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4255 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4258 /* Dump last section */
4259 offset += qed_dump_last_section(dump_buf, offset, dump);
4261 *num_dumped_dwords = offset;
4263 return DBG_STATUS_OK;
4267 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4268 struct qed_ptt *p_ptt,
4270 bool dump, u32 *num_dumped_dwords)
4272 u32 dwords_read, size_param_offset, offset = 0, addr, len;
4275 *num_dumped_dwords = 0;
4277 /* Dump global params */
4278 offset += qed_dump_common_global_params(p_hwfn,
4280 dump_buf + offset, dump, 1);
4281 offset += qed_dump_str_param(dump_buf + offset,
4282 dump, "dump-type", "igu-fifo");
4284 /* Dump fifo data section header and param. The size param is 0 for
4285 * now, and is overwritten after reading the FIFO.
4287 offset += qed_dump_section_hdr(dump_buf + offset,
4288 dump, "igu_fifo_data", 1);
4289 size_param_offset = offset;
4290 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4293 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4294 * test how much data is available, except for reading it.
4296 offset += IGU_FIFO_DEPTH_DWORDS;
4300 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4301 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4303 /* Pull available data from fifo. Use DMAE since this is widebus memory
4304 * and must be accessed atomically. Test for dwords_read not passing
4305 * buffer size since more entries could be added to the buffer as we are
4308 addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4309 len = IGU_FIFO_ELEMENT_DWORDS;
4310 for (dwords_read = 0;
4311 fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4312 dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4313 offset += qed_grc_dump_addr_range(p_hwfn,
4319 true, SPLIT_TYPE_NONE,
4321 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4322 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4325 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4328 /* Dump last section */
4329 offset += qed_dump_last_section(dump_buf, offset, dump);
4331 *num_dumped_dwords = offset;
4333 return DBG_STATUS_OK;
4336 /* Protection Override dump */
4337 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4338 struct qed_ptt *p_ptt,
4341 u32 *num_dumped_dwords)
4343 u32 size_param_offset, override_window_dwords, offset = 0, addr;
4345 *num_dumped_dwords = 0;
4347 /* Dump global params */
4348 offset += qed_dump_common_global_params(p_hwfn,
4350 dump_buf + offset, dump, 1);
4351 offset += qed_dump_str_param(dump_buf + offset,
4352 dump, "dump-type", "protection-override");
4354 /* Dump data section header and param. The size param is 0 for now,
4355 * and is overwritten after reading the data.
4357 offset += qed_dump_section_hdr(dump_buf + offset,
4358 dump, "protection_override_data", 1);
4359 size_param_offset = offset;
4360 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4363 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4367 /* Add override window info to buffer */
4368 override_window_dwords =
4369 qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4370 PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4371 if (override_window_dwords) {
4372 addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4373 offset += qed_grc_dump_addr_range(p_hwfn,
4378 override_window_dwords,
4379 true, SPLIT_TYPE_NONE, 0);
4380 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4381 override_window_dwords);
4384 /* Dump last section */
4385 offset += qed_dump_last_section(dump_buf, offset, dump);
4387 *num_dumped_dwords = offset;
4389 return DBG_STATUS_OK;
4392 /* Performs FW Asserts Dump to the specified buffer.
4393 * Returns the dumped size in dwords.
4395 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4396 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4398 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4399 struct fw_asserts_ram_section *asserts;
4400 char storm_letter_str[2] = "?";
4401 struct fw_info fw_info;
4405 /* Dump global params */
4406 offset += qed_dump_common_global_params(p_hwfn,
4408 dump_buf + offset, dump, 1);
4409 offset += qed_dump_str_param(dump_buf + offset,
4410 dump, "dump-type", "fw-asserts");
4412 /* Find Storm dump size */
4413 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4414 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4415 struct storm_defs *storm = &s_storm_defs[storm_id];
4416 u32 last_list_idx, addr;
4418 if (dev_data->block_in_reset[storm->sem_block_id])
4421 /* Read FW info for the current Storm */
4422 qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4424 asserts = &fw_info.fw_asserts_section;
4426 /* Dump FW Asserts section header and params */
4427 storm_letter_str[0] = storm->letter;
4428 offset += qed_dump_section_hdr(dump_buf + offset,
4429 dump, "fw_asserts", 2);
4430 offset += qed_dump_str_param(dump_buf + offset,
4431 dump, "storm", storm_letter_str);
4432 offset += qed_dump_num_param(dump_buf + offset,
4435 asserts->list_element_dword_size);
4437 /* Read and dump FW Asserts data */
4439 offset += asserts->list_element_dword_size;
4443 fw_asserts_section_addr = storm->sem_fast_mem_addr +
4444 SEM_FAST_REG_INT_RAM +
4445 RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4446 next_list_idx_addr = fw_asserts_section_addr +
4447 DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4448 next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4449 last_list_idx = (next_list_idx > 0 ?
4451 asserts->list_num_elements) - 1;
4452 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4453 asserts->list_dword_offset +
4454 last_list_idx * asserts->list_element_dword_size;
4456 qed_grc_dump_addr_range(p_hwfn, p_ptt,
4459 asserts->list_element_dword_size,
4460 false, SPLIT_TYPE_NONE, 0);
4463 /* Dump last section */
4464 offset += qed_dump_last_section(dump_buf, offset, dump);
4469 /* Dumps the specified ILT pages to the specified buffer.
4470 * Returns the dumped size in dwords.
4472 static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
4476 struct phys_mem_desc *ilt_pages,
4479 u32 page_id, end_page_id, offset = 0;
4484 end_page_id = start_page_id + num_pages - 1;
4486 for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
4487 struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
4491 * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
4495 if (!ilt_pages[page_id].virt_addr)
4498 if (dump_page_ids) {
4499 /* Copy page ID to dump buffer */
4501 *(dump_buf + offset) = page_id;
4504 /* Copy page memory to dump buffer */
4506 memcpy(dump_buf + offset,
4507 mem_desc->virt_addr, mem_desc->size);
4508 offset += BYTES_TO_DWORDS(mem_desc->size);
4515 /* Dumps a section containing the dumped ILT pages.
4516 * Returns the dumped size in dwords.
4518 static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
4521 u32 valid_conn_pf_pages,
4522 u32 valid_conn_vf_pages,
4523 struct phys_mem_desc *ilt_pages,
4526 struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4527 u32 pf_start_line, start_page_id, offset = 0;
4528 u32 cdut_pf_init_pages, cdut_vf_init_pages;
4529 u32 cdut_pf_work_pages, cdut_vf_work_pages;
4530 u32 base_data_offset, size_param_offset;
4531 u32 cdut_pf_pages, cdut_vf_pages;
4532 const char *section_name;
4535 section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
4536 cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
4537 cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
4538 cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
4539 cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
4540 cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
4541 cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
4542 pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
4545 qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
4547 /* Dump size parameter (0 for now, overwritten with real size later) */
4548 size_param_offset = offset;
4549 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4550 base_data_offset = offset;
4552 /* CDUC pages are ordered as follows:
4553 * - PF pages - valid section (included in PF connection type mapping)
4554 * - PF pages - invalid section (not dumped)
4555 * - For each VF in the PF:
4556 * - VF pages - valid section (included in VF connection type mapping)
4557 * - VF pages - invalid section (not dumped)
4559 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
4560 /* Dump connection PF pages */
4561 start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
4562 offset += qed_ilt_dump_pages_range(dump_buf + offset,
4565 valid_conn_pf_pages,
4566 ilt_pages, dump_page_ids);
4568 /* Dump connection VF pages */
4569 start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
4570 for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4571 i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
4572 offset += qed_ilt_dump_pages_range(dump_buf + offset,
4575 valid_conn_vf_pages,
4580 /* CDUT pages are ordered as follows:
4581 * - PF init pages (not dumped)
4583 * - For each VF in the PF:
4584 * - VF init pages (not dumped)
4587 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
4588 /* Dump task PF pages */
4589 start_page_id = clients[ILT_CLI_CDUT].first.val +
4590 cdut_pf_init_pages - pf_start_line;
4591 offset += qed_ilt_dump_pages_range(dump_buf + offset,
4595 ilt_pages, dump_page_ids);
4597 /* Dump task VF pages */
4598 start_page_id = clients[ILT_CLI_CDUT].first.val +
4599 cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
4600 for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4601 i++, start_page_id += cdut_vf_pages)
4602 offset += qed_ilt_dump_pages_range(dump_buf + offset,
4610 /* Overwrite size param */
4612 qed_dump_num_param(dump_buf + size_param_offset,
4613 dump, "size", offset - base_data_offset);
4618 /* Performs ILT Dump to the specified buffer.
4619 * Returns the dumped size in dwords.
4621 static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
4622 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4624 struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4625 u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
4626 u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
4627 u32 num_cids_per_page, conn_ctx_size;
4628 u32 cduc_page_size, cdut_page_size;
4629 struct phys_mem_desc *ilt_pages;
4632 cduc_page_size = 1 <<
4633 (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
4634 cdut_page_size = 1 <<
4635 (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
4636 conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
4637 num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
4638 ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
4640 /* Dump global params - 22 must match number of params below */
4641 offset += qed_dump_common_global_params(p_hwfn, p_ptt,
4642 dump_buf + offset, dump, 22);
4643 offset += qed_dump_str_param(dump_buf + offset,
4644 dump, "dump-type", "ilt-dump");
4645 offset += qed_dump_num_param(dump_buf + offset,
4647 "cduc-page-size", cduc_page_size);
4648 offset += qed_dump_num_param(dump_buf + offset,
4650 "cduc-first-page-id",
4651 clients[ILT_CLI_CDUC].first.val);
4652 offset += qed_dump_num_param(dump_buf + offset,
4654 "cduc-last-page-id",
4655 clients[ILT_CLI_CDUC].last.val);
4656 offset += qed_dump_num_param(dump_buf + offset,
4658 "cduc-num-pf-pages",
4660 [ILT_CLI_CDUC].pf_total_lines);
4661 offset += qed_dump_num_param(dump_buf + offset,
4663 "cduc-num-vf-pages",
4665 [ILT_CLI_CDUC].vf_total_lines);
4666 offset += qed_dump_num_param(dump_buf + offset,
4668 "max-conn-ctx-size",
4670 offset += qed_dump_num_param(dump_buf + offset,
4672 "cdut-page-size", cdut_page_size);
4673 offset += qed_dump_num_param(dump_buf + offset,
4675 "cdut-first-page-id",
4676 clients[ILT_CLI_CDUT].first.val);
4677 offset += qed_dump_num_param(dump_buf + offset,
4679 "cdut-last-page-id",
4680 clients[ILT_CLI_CDUT].last.val);
4681 offset += qed_dump_num_param(dump_buf + offset,
4683 "cdut-num-pf-init-pages",
4684 qed_get_cdut_num_pf_init_pages(p_hwfn));
4685 offset += qed_dump_num_param(dump_buf + offset,
4687 "cdut-num-vf-init-pages",
4688 qed_get_cdut_num_vf_init_pages(p_hwfn));
4689 offset += qed_dump_num_param(dump_buf + offset,
4691 "cdut-num-pf-work-pages",
4692 qed_get_cdut_num_pf_work_pages(p_hwfn));
4693 offset += qed_dump_num_param(dump_buf + offset,
4695 "cdut-num-vf-work-pages",
4696 qed_get_cdut_num_vf_work_pages(p_hwfn));
4697 offset += qed_dump_num_param(dump_buf + offset,
4699 "max-task-ctx-size",
4700 p_hwfn->p_cxt_mngr->task_ctx_size);
4701 offset += qed_dump_num_param(dump_buf + offset,
4704 p_hwfn->p_cxt_mngr->task_type_id);
4705 offset += qed_dump_num_param(dump_buf + offset,
4707 "first-vf-id-in-pf",
4708 p_hwfn->p_cxt_mngr->first_vf_in_pf);
4709 offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
4712 p_hwfn->p_cxt_mngr->vf_count);
4713 offset += qed_dump_num_param(dump_buf + offset,
4715 "ptr-size-bytes", sizeof(void *));
4716 offset += qed_dump_num_param(dump_buf + offset,
4719 p_hwfn->p_cxt_mngr->pf_start_line);
4720 offset += qed_dump_num_param(dump_buf + offset,
4722 "page-mem-desc-size-dwords",
4723 PAGE_MEM_DESC_SIZE_DWORDS);
4724 offset += qed_dump_num_param(dump_buf + offset,
4727 p_hwfn->p_cxt_mngr->ilt_shadow_size);
4728 /* Additional/Less parameters require matching of number in call to
4729 * dump_common_global_params()
4732 /* Dump section containing number of PF CIDs per connection type */
4733 offset += qed_dump_section_hdr(dump_buf + offset,
4734 dump, "num_pf_cids_per_conn_type", 1);
4735 offset += qed_dump_num_param(dump_buf + offset,
4736 dump, "size", NUM_OF_CONNECTION_TYPES_E4);
4737 for (conn_type = 0, valid_conn_pf_cids = 0;
4738 conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
4740 p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
4743 *(dump_buf + offset) = num_pf_cids;
4744 valid_conn_pf_cids += num_pf_cids;
4747 /* Dump section containing number of VF CIDs per connection type */
4748 offset += qed_dump_section_hdr(dump_buf + offset,
4749 dump, "num_vf_cids_per_conn_type", 1);
4750 offset += qed_dump_num_param(dump_buf + offset,
4751 dump, "size", NUM_OF_CONNECTION_TYPES_E4);
4752 for (conn_type = 0, valid_conn_vf_cids = 0;
4753 conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
4755 p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
4758 *(dump_buf + offset) = num_vf_cids;
4759 valid_conn_vf_cids += num_vf_cids;
4762 /* Dump section containing physical memory descs for each ILT page */
4763 num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
4764 offset += qed_dump_section_hdr(dump_buf + offset,
4765 dump, "ilt_page_desc", 1);
4766 offset += qed_dump_num_param(dump_buf + offset,
4769 num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
4771 /* Copy memory descriptors to dump buffer */
4775 for (page_id = 0; page_id < num_pages;
4776 page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
4777 memcpy(dump_buf + offset,
4778 &ilt_pages[page_id],
4779 DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
4781 offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
4784 valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
4786 valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
4789 /* Dump ILT pages IDs */
4790 offset += qed_ilt_dump_pages_section(p_hwfn,
4793 valid_conn_pf_pages,
4794 valid_conn_vf_pages,
4797 /* Dump ILT pages memory */
4798 offset += qed_ilt_dump_pages_section(p_hwfn,
4801 valid_conn_pf_pages,
4802 valid_conn_vf_pages,
4805 /* Dump last section */
4806 offset += qed_dump_last_section(dump_buf, offset, dump);
4811 /***************************** Public Functions *******************************/
4813 enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
4814 const u8 * const bin_ptr)
4816 struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
4819 /* Convert binary data to debug arrays */
4820 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
4821 qed_set_dbg_bin_buf(p_hwfn,
4823 (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
4824 buf_hdrs[buf_id].length);
4826 return DBG_STATUS_OK;
4829 bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
4830 struct qed_ptt *p_ptt, struct fw_info *fw_info)
4832 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4835 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4836 struct storm_defs *storm = &s_storm_defs[storm_id];
4838 /* Skip Storm if it's in reset */
4839 if (dev_data->block_in_reset[storm->sem_block_id])
4842 /* Read FW info for the current Storm */
4843 qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
4851 enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
4852 enum dbg_grc_params grc_param, u32 val)
4854 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4855 enum dbg_status status;
4860 "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
4862 status = qed_dbg_dev_init(p_hwfn);
4863 if (status != DBG_STATUS_OK)
4866 /* Initializes the GRC parameters (if not initialized). Needed in order
4867 * to set the default parameter values for the first time.
4869 qed_dbg_grc_init_params(p_hwfn);
4871 if (grc_param >= MAX_DBG_GRC_PARAMS)
4872 return DBG_STATUS_INVALID_ARGS;
4873 if (val < s_grc_param_defs[grc_param].min ||
4874 val > s_grc_param_defs[grc_param].max)
4875 return DBG_STATUS_INVALID_ARGS;
4877 if (s_grc_param_defs[grc_param].is_preset) {
4880 /* Disabling a preset is not allowed. Call
4881 * dbg_grc_set_params_default instead.
4884 return DBG_STATUS_INVALID_ARGS;
4886 /* Update all params with the preset values */
4887 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
4888 struct grc_param_defs *defs = &s_grc_param_defs[i];
4890 /* Skip persistent params */
4891 if (defs->is_persistent)
4894 /* Find preset value */
4895 if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
4897 defs->exclude_all_preset_val;
4898 else if (grc_param == DBG_GRC_PARAM_CRASH)
4900 defs->crash_preset_val[dev_data->chip_id];
4902 return DBG_STATUS_INVALID_ARGS;
4904 qed_grc_set_param(p_hwfn, i, preset_val);
4907 /* Regular param - set its value */
4908 qed_grc_set_param(p_hwfn, grc_param, val);
4911 return DBG_STATUS_OK;
4914 /* Assign default GRC param values */
4915 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
4917 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4920 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
4921 if (!s_grc_param_defs[i].is_persistent)
4922 dev_data->grc.param_val[i] =
4923 s_grc_param_defs[i].default_val[dev_data->chip_id];
4926 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4927 struct qed_ptt *p_ptt,
4930 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
4934 if (status != DBG_STATUS_OK)
4937 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4938 !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
4939 !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
4940 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
4941 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
4942 return DBG_STATUS_DBG_ARRAY_NOT_SET;
4944 return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4947 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
4948 struct qed_ptt *p_ptt,
4950 u32 buf_size_in_dwords,
4951 u32 *num_dumped_dwords)
4953 u32 needed_buf_size_in_dwords;
4954 enum dbg_status status;
4956 *num_dumped_dwords = 0;
4958 status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
4960 &needed_buf_size_in_dwords);
4961 if (status != DBG_STATUS_OK)
4964 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4965 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4968 status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
4970 /* Revert GRC params to their default */
4971 qed_dbg_grc_set_params_default(p_hwfn);
4976 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4977 struct qed_ptt *p_ptt,
4980 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4981 struct idle_chk_data *idle_chk = &dev_data->idle_chk;
4982 enum dbg_status status;
4986 status = qed_dbg_dev_init(p_hwfn);
4987 if (status != DBG_STATUS_OK)
4990 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4991 !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
4992 !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
4993 !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
4994 return DBG_STATUS_DBG_ARRAY_NOT_SET;
4996 if (!idle_chk->buf_size_set) {
4997 idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
4998 p_ptt, NULL, false);
4999 idle_chk->buf_size_set = true;
5002 *buf_size = idle_chk->buf_size;
5004 return DBG_STATUS_OK;
5007 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5008 struct qed_ptt *p_ptt,
5010 u32 buf_size_in_dwords,
5011 u32 *num_dumped_dwords)
5013 u32 needed_buf_size_in_dwords;
5014 enum dbg_status status;
5016 *num_dumped_dwords = 0;
5018 status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5020 &needed_buf_size_in_dwords);
5021 if (status != DBG_STATUS_OK)
5024 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5025 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5027 /* Update reset state */
5028 qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
5029 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5031 /* Idle Check Dump */
5032 *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5034 /* Revert GRC params to their default */
5035 qed_dbg_grc_set_params_default(p_hwfn);
5037 return DBG_STATUS_OK;
5040 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5041 struct qed_ptt *p_ptt,
5044 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5048 if (status != DBG_STATUS_OK)
5051 return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5054 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5055 struct qed_ptt *p_ptt,
5057 u32 buf_size_in_dwords,
5058 u32 *num_dumped_dwords)
5060 u32 needed_buf_size_in_dwords;
5061 enum dbg_status status;
5064 qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5066 &needed_buf_size_in_dwords);
5067 if (status != DBG_STATUS_OK && status !=
5068 DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5071 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5072 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5074 /* Update reset state */
5075 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5078 status = qed_mcp_trace_dump(p_hwfn,
5079 p_ptt, dump_buf, true, num_dumped_dwords);
5081 /* Revert GRC params to their default */
5082 qed_dbg_grc_set_params_default(p_hwfn);
5087 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5088 struct qed_ptt *p_ptt,
5091 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5095 if (status != DBG_STATUS_OK)
5098 return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5101 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5102 struct qed_ptt *p_ptt,
5104 u32 buf_size_in_dwords,
5105 u32 *num_dumped_dwords)
5107 u32 needed_buf_size_in_dwords;
5108 enum dbg_status status;
5110 *num_dumped_dwords = 0;
5112 status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5114 &needed_buf_size_in_dwords);
5115 if (status != DBG_STATUS_OK)
5118 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5119 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5121 /* Update reset state */
5122 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5124 status = qed_reg_fifo_dump(p_hwfn,
5125 p_ptt, dump_buf, true, num_dumped_dwords);
5127 /* Revert GRC params to their default */
5128 qed_dbg_grc_set_params_default(p_hwfn);
5133 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5134 struct qed_ptt *p_ptt,
5137 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5141 if (status != DBG_STATUS_OK)
5144 return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5147 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5148 struct qed_ptt *p_ptt,
5150 u32 buf_size_in_dwords,
5151 u32 *num_dumped_dwords)
5153 u32 needed_buf_size_in_dwords;
5154 enum dbg_status status;
5156 *num_dumped_dwords = 0;
5158 status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5160 &needed_buf_size_in_dwords);
5161 if (status != DBG_STATUS_OK)
5164 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5165 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5167 /* Update reset state */
5168 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5170 status = qed_igu_fifo_dump(p_hwfn,
5171 p_ptt, dump_buf, true, num_dumped_dwords);
5172 /* Revert GRC params to their default */
5173 qed_dbg_grc_set_params_default(p_hwfn);
5179 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5180 struct qed_ptt *p_ptt,
5183 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5187 if (status != DBG_STATUS_OK)
5190 return qed_protection_override_dump(p_hwfn,
5191 p_ptt, NULL, false, buf_size);
5194 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5195 struct qed_ptt *p_ptt,
5197 u32 buf_size_in_dwords,
5198 u32 *num_dumped_dwords)
5200 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5201 enum dbg_status status;
5203 *num_dumped_dwords = 0;
5206 qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5209 if (status != DBG_STATUS_OK)
5212 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5213 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5215 /* Update reset state */
5216 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5218 status = qed_protection_override_dump(p_hwfn,
5221 true, num_dumped_dwords);
5223 /* Revert GRC params to their default */
5224 qed_dbg_grc_set_params_default(p_hwfn);
5229 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5230 struct qed_ptt *p_ptt,
5233 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5237 if (status != DBG_STATUS_OK)
5240 /* Update reset state */
5241 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5243 *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5245 return DBG_STATUS_OK;
5248 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5249 struct qed_ptt *p_ptt,
5251 u32 buf_size_in_dwords,
5252 u32 *num_dumped_dwords)
5254 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5255 enum dbg_status status;
5257 *num_dumped_dwords = 0;
5260 qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5263 if (status != DBG_STATUS_OK)
5266 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5267 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5269 *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5271 /* Revert GRC params to their default */
5272 qed_dbg_grc_set_params_default(p_hwfn);
5274 return DBG_STATUS_OK;
5277 static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5278 struct qed_ptt *p_ptt,
5281 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5285 if (status != DBG_STATUS_OK)
5288 *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
5290 return DBG_STATUS_OK;
5293 static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
5294 struct qed_ptt *p_ptt,
5296 u32 buf_size_in_dwords,
5297 u32 *num_dumped_dwords)
5299 u32 needed_buf_size_in_dwords;
5300 enum dbg_status status;
5302 *num_dumped_dwords = 0;
5304 status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
5306 &needed_buf_size_in_dwords);
5307 if (status != DBG_STATUS_OK)
5310 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5311 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5313 *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
5315 /* Reveret GRC params to their default */
5316 qed_dbg_grc_set_params_default(p_hwfn);
5318 return DBG_STATUS_OK;
5321 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5322 struct qed_ptt *p_ptt,
5323 enum block_id block_id,
5324 enum dbg_attn_type attn_type,
5326 struct dbg_attn_block_result *results)
5328 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5329 u8 reg_idx, num_attn_regs, num_result_regs = 0;
5330 const struct dbg_attn_reg *attn_reg_arr;
5332 if (status != DBG_STATUS_OK)
5335 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5336 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5337 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5338 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5340 attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
5342 attn_type, &num_attn_regs);
5344 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5345 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5346 struct dbg_attn_reg_result *reg_result;
5347 u32 sts_addr, sts_val;
5348 u16 modes_buf_offset;
5352 eval_mode = GET_FIELD(reg_data->mode.data,
5353 DBG_MODE_HDR_EVAL_MODE) > 0;
5354 modes_buf_offset = GET_FIELD(reg_data->mode.data,
5355 DBG_MODE_HDR_MODES_BUF_OFFSET);
5356 if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5359 /* Mode match - read attention status register */
5360 sts_addr = DWORDS_TO_BYTES(clear_status ?
5361 reg_data->sts_clr_address :
5362 GET_FIELD(reg_data->data,
5363 DBG_ATTN_REG_STS_ADDRESS));
5364 sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5368 /* Non-zero attention status - add to results */
5369 reg_result = &results->reg_results[num_result_regs];
5370 SET_FIELD(reg_result->data,
5371 DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5372 SET_FIELD(reg_result->data,
5373 DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5374 GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5375 reg_result->block_attn_offset = reg_data->block_attn_offset;
5376 reg_result->sts_val = sts_val;
5377 reg_result->mask_val = qed_rd(p_hwfn,
5380 (reg_data->mask_address));
5384 results->block_id = (u8)block_id;
5385 results->names_offset =
5386 qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
5387 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5388 SET_FIELD(results->data,
5389 DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5391 return DBG_STATUS_OK;
5394 /******************************* Data Types **********************************/
5396 /* REG fifo element */
5397 struct reg_fifo_element {
5399 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
5400 #define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
5401 #define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
5402 #define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
5403 #define REG_FIFO_ELEMENT_PF_SHIFT 24
5404 #define REG_FIFO_ELEMENT_PF_MASK 0xf
5405 #define REG_FIFO_ELEMENT_VF_SHIFT 28
5406 #define REG_FIFO_ELEMENT_VF_MASK 0xff
5407 #define REG_FIFO_ELEMENT_PORT_SHIFT 36
5408 #define REG_FIFO_ELEMENT_PORT_MASK 0x3
5409 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
5410 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
5411 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
5412 #define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
5413 #define REG_FIFO_ELEMENT_MASTER_SHIFT 43
5414 #define REG_FIFO_ELEMENT_MASTER_MASK 0xf
5415 #define REG_FIFO_ELEMENT_ERROR_SHIFT 47
5416 #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
5419 /* REG fifo error element */
5420 struct reg_fifo_err {
5422 const char *err_msg;
5425 /* IGU fifo element */
5426 struct igu_fifo_element {
5428 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
5429 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
5430 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
5431 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
5432 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
5433 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
5434 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
5435 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
5436 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
5437 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
5440 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
5441 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
5442 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
5443 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
5447 struct igu_fifo_wr_data {
5449 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
5450 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
5451 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
5452 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
5453 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
5454 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
5455 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
5456 #define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
5457 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
5458 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
5459 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
5460 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
5463 struct igu_fifo_cleanup_wr_data {
5465 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
5466 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
5467 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
5468 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
5469 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
5470 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
5471 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
5472 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
5475 /* Protection override element */
5476 struct protection_override_element {
5478 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
5479 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
5480 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
5481 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
5482 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
5483 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
5484 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
5485 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
5486 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
5487 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
5488 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
5489 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
5492 enum igu_fifo_sources {
5506 enum igu_fifo_addr_types {
5507 IGU_ADDR_TYPE_MSIX_MEM,
5508 IGU_ADDR_TYPE_WRITE_PBA,
5509 IGU_ADDR_TYPE_WRITE_INT_ACK,
5510 IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5511 IGU_ADDR_TYPE_READ_INT,
5512 IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5513 IGU_ADDR_TYPE_RESERVED
5516 struct igu_fifo_addr_data {
5521 enum igu_fifo_addr_types type;
5524 /******************************** Constants **********************************/
5526 #define MAX_MSG_LEN 1024
5528 #define MCP_TRACE_MAX_MODULE_LEN 8
5529 #define MCP_TRACE_FORMAT_MAX_PARAMS 3
5530 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5531 (MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
5533 #define REG_FIFO_ELEMENT_ADDR_FACTOR 4
5534 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
5536 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
5538 /***************************** Constant Arrays *******************************/
5540 /* Status string array */
5541 static const char * const s_status_str[] = {
5543 "Operation completed successfully",
5545 /* DBG_STATUS_APP_VERSION_NOT_SET */
5546 "Debug application version wasn't set",
5548 /* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5549 "Unsupported debug application version",
5551 /* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5552 "The debug block wasn't reset since the last recording",
5554 /* DBG_STATUS_INVALID_ARGS */
5555 "Invalid arguments",
5557 /* DBG_STATUS_OUTPUT_ALREADY_SET */
5558 "The debug output was already set",
5560 /* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5561 "Invalid PCI buffer size",
5563 /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5564 "PCI buffer allocation failed",
5566 /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5567 "A PCI buffer wasn't allocated",
5569 /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
5570 "The filter/trigger constraint dword offsets are not enabled for recording",
5571 /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
5572 "No matching framing mode",
5574 /* DBG_STATUS_VFC_READ_ERROR */
5575 "Error reading from VFC",
5577 /* DBG_STATUS_STORM_ALREADY_ENABLED */
5578 "The Storm was already enabled",
5580 /* DBG_STATUS_STORM_NOT_ENABLED */
5581 "The specified Storm wasn't enabled",
5583 /* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5584 "The block was already enabled",
5586 /* DBG_STATUS_BLOCK_NOT_ENABLED */
5587 "The specified block wasn't enabled",
5589 /* DBG_STATUS_NO_INPUT_ENABLED */
5590 "No input was enabled for recording",
5592 /* DBG_STATUS_NO_FILTER_TRIGGER_256B */
5593 "Filters and triggers are not allowed in E4 256-bit mode",
5595 /* DBG_STATUS_FILTER_ALREADY_ENABLED */
5596 "The filter was already enabled",
5598 /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5599 "The trigger was already enabled",
5601 /* DBG_STATUS_TRIGGER_NOT_ENABLED */
5602 "The trigger wasn't enabled",
5604 /* DBG_STATUS_CANT_ADD_CONSTRAINT */
5605 "A constraint can be added only after a filter was enabled or a trigger state was added",
5607 /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5608 "Cannot add more than 3 trigger states",
5610 /* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5611 "Cannot add more than 4 constraints per filter or trigger state",
5613 /* DBG_STATUS_RECORDING_NOT_STARTED */
5614 "The recording wasn't started",
5616 /* DBG_STATUS_DATA_DIDNT_TRIGGER */
5617 "A trigger was configured, but it didn't trigger",
5619 /* DBG_STATUS_NO_DATA_RECORDED */
5620 "No data was recorded",
5622 /* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5623 "Dump buffer is too small",
5625 /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5626 "Dumped data is not aligned to chunks",
5628 /* DBG_STATUS_UNKNOWN_CHIP */
5631 /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5632 "Failed allocating virtual memory",
5634 /* DBG_STATUS_BLOCK_IN_RESET */
5635 "The input block is in reset",
5637 /* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5638 "Invalid MCP trace signature found in NVRAM",
5640 /* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5641 "Invalid bundle ID found in NVRAM",
5643 /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5644 "Failed getting NVRAM image",
5646 /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5647 "NVRAM image is not dword-aligned",
5649 /* DBG_STATUS_NVRAM_READ_FAILED */
5650 "Failed reading from NVRAM",
5652 /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5653 "Idle check parsing failed",
5655 /* DBG_STATUS_MCP_TRACE_BAD_DATA */
5656 "MCP Trace data is corrupt",
5658 /* DBG_STATUS_MCP_TRACE_NO_META */
5659 "Dump doesn't contain meta data - it must be provided in image file",
5661 /* DBG_STATUS_MCP_COULD_NOT_HALT */
5662 "Failed to halt MCP",
5664 /* DBG_STATUS_MCP_COULD_NOT_RESUME */
5665 "Failed to resume MCP after halt",
5667 /* DBG_STATUS_RESERVED0 */
5670 /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5671 "Failed to empty SEMI sync FIFO",
5673 /* DBG_STATUS_IGU_FIFO_BAD_DATA */
5674 "IGU FIFO data is corrupt",
5676 /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5677 "MCP failed to mask parities",
5679 /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5680 "FW Asserts parsing failed",
5682 /* DBG_STATUS_REG_FIFO_BAD_DATA */
5683 "GRC FIFO data is corrupt",
5685 /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5686 "Protection Override data is corrupt",
5688 /* DBG_STATUS_DBG_ARRAY_NOT_SET */
5689 "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5691 /* DBG_STATUS_RESERVED1 */
5694 /* DBG_STATUS_NON_MATCHING_LINES */
5695 "Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
5697 /* DBG_STATUS_INSUFFICIENT_HW_IDS */
5698 "Insufficient HW IDs. Try to record less Storms/blocks",
5700 /* DBG_STATUS_DBG_BUS_IN_USE */
5701 "The debug bus is in use",
5703 /* DBG_STATUS_INVALID_STORM_DBG_MODE */
5704 "The storm debug mode is not supported in the current chip",
5706 /* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
5707 "Other engine is supported only in BB",
5709 /* DBG_STATUS_FILTER_SINGLE_HW_ID */
5710 "The configured filter mode requires a single Storm/block input",
5712 /* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
5713 "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
5715 /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
5716 "When triggering on Storm data, the Storm to trigger on must be specified"
5719 /* Idle check severity names array */
5720 static const char * const s_idle_chk_severity_str[] = {
5722 "Error if no traffic",
5726 /* MCP Trace level names array */
5727 static const char * const s_mcp_trace_level_str[] = {
5733 /* Access type names array */
5734 static const char * const s_access_strs[] = {
5739 /* Privilege type names array */
5740 static const char * const s_privilege_strs[] = {
5747 /* Protection type names array */
5748 static const char * const s_protection_strs[] = {
5759 /* Master type names array */
5760 static const char * const s_master_strs[] = {
5779 /* REG FIFO error messages array */
5780 static struct reg_fifo_err s_reg_fifo_errors[] = {
5782 {2, "address doesn't belong to any block"},
5783 {4, "reserved address in block or write to read-only address"},
5784 {8, "privilege/protection mismatch"},
5785 {16, "path isolation error"},
5789 /* IGU FIFO sources array */
5790 static const char * const s_igu_fifo_source_strs[] = {
5804 /* IGU FIFO error messages */
5805 static const char * const s_igu_fifo_error_strs[] = {
5808 "function disabled",
5809 "VF sent command to attention address",
5810 "host sent prod update command",
5811 "read of during interrupt register while in MIMD mode",
5812 "access to PXP BAR reserved address",
5813 "producer update command to attention index",
5815 "SB index not valid",
5816 "SB relative index and FID not found",
5818 "command with error flag asserted (PCI error or CAU discard)",
5819 "VF sent cleanup and RF cleanup is disabled",
5820 "cleanup command on type bigger than 4"
5823 /* IGU FIFO address data */
5824 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
5825 {0x0, 0x101, "MSI-X Memory", NULL,
5826 IGU_ADDR_TYPE_MSIX_MEM},
5827 {0x102, 0x1ff, "reserved", NULL,
5828 IGU_ADDR_TYPE_RESERVED},
5829 {0x200, 0x200, "Write PBA[0:63]", NULL,
5830 IGU_ADDR_TYPE_WRITE_PBA},
5831 {0x201, 0x201, "Write PBA[64:127]", "reserved",
5832 IGU_ADDR_TYPE_WRITE_PBA},
5833 {0x202, 0x202, "Write PBA[128]", "reserved",
5834 IGU_ADDR_TYPE_WRITE_PBA},
5835 {0x203, 0x3ff, "reserved", NULL,
5836 IGU_ADDR_TYPE_RESERVED},
5837 {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
5838 IGU_ADDR_TYPE_WRITE_INT_ACK},
5839 {0x5f0, 0x5f0, "Attention bits update", NULL,
5840 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5841 {0x5f1, 0x5f1, "Attention bits set", NULL,
5842 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5843 {0x5f2, 0x5f2, "Attention bits clear", NULL,
5844 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5845 {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
5846 IGU_ADDR_TYPE_READ_INT},
5847 {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
5848 IGU_ADDR_TYPE_READ_INT},
5849 {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
5850 IGU_ADDR_TYPE_READ_INT},
5851 {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
5852 IGU_ADDR_TYPE_READ_INT},
5853 {0x5f7, 0x5ff, "reserved", NULL,
5854 IGU_ADDR_TYPE_RESERVED},
5855 {0x600, 0x7ff, "Producer update", NULL,
5856 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
5859 /******************************** Variables **********************************/
5861 /* Temporary buffer, used for print size calculations */
5862 static char s_temp_buf[MAX_MSG_LEN];
5864 /**************************** Private Functions ******************************/
5866 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
5868 return (a + b) % size;
5871 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
5873 return (size + a - b) % size;
5876 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
5877 * bytes) and returns them as a dword value. the specified buffer offset is
5880 static u32 qed_read_from_cyclic_buf(void *buf,
5882 u32 buf_size, u8 num_bytes_to_read)
5884 u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
5887 val_ptr = (u8 *)&val;
5889 /* Assume running on a LITTLE ENDIAN and the buffer is network order
5890 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
5892 for (i = 0; i < num_bytes_to_read; i++) {
5893 val_ptr[i] = bytes_buf[*offset];
5894 *offset = qed_cyclic_add(*offset, 1, buf_size);
5900 /* Reads and returns the next byte from the specified buffer.
5901 * The specified buffer offset is updated.
5903 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
5905 return ((u8 *)buf)[(*offset)++];
5908 /* Reads and returns the next dword from the specified buffer.
5909 * The specified buffer offset is updated.
5911 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
5913 u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
5920 /* Reads the next string from the specified buffer, and copies it to the
5921 * specified pointer. The specified buffer offset is updated.
5923 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
5925 const char *source_str = &((const char *)buf)[*offset];
5927 strncpy(dest, source_str, size);
5928 dest[size - 1] = '\0';
5932 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
5933 * If the specified buffer in NULL, a temporary buffer pointer is returned.
5935 static char *qed_get_buf_ptr(void *buf, u32 offset)
5937 return buf ? (char *)buf + offset : s_temp_buf;
5940 /* Reads a param from the specified buffer. Returns the number of dwords read.
5941 * If the returned str_param is NULL, the param is numeric and its value is
5942 * returned in num_param.
5943 * Otheriwise, the param is a string and its pointer is returned in str_param.
5945 static u32 qed_read_param(u32 *dump_buf,
5946 const char **param_name,
5947 const char **param_str_val, u32 *param_num_val)
5949 char *char_buf = (char *)dump_buf;
5952 /* Extract param name */
5953 *param_name = char_buf;
5954 offset += strlen(*param_name) + 1;
5956 /* Check param type */
5957 if (*(char_buf + offset++)) {
5959 *param_str_val = char_buf + offset;
5961 offset += strlen(*param_str_val) + 1;
5963 offset += (4 - (offset & 0x3));
5966 *param_str_val = NULL;
5968 offset += (4 - (offset & 0x3));
5969 *param_num_val = *(u32 *)(char_buf + offset);
5973 return (u32)offset / 4;
5976 /* Reads a section header from the specified buffer.
5977 * Returns the number of dwords read.
5979 static u32 qed_read_section_hdr(u32 *dump_buf,
5980 const char **section_name,
5981 u32 *num_section_params)
5983 const char *param_str_val;
5985 return qed_read_param(dump_buf,
5986 section_name, ¶m_str_val, num_section_params);
5989 /* Reads section params from the specified buffer and prints them to the results
5990 * buffer. Returns the number of dwords read.
5992 static u32 qed_print_section_params(u32 *dump_buf,
5993 u32 num_section_params,
5994 char *results_buf, u32 *num_chars_printed)
5996 u32 i, dump_offset = 0, results_offset = 0;
5998 for (i = 0; i < num_section_params; i++) {
5999 const char *param_name, *param_str_val;
6000 u32 param_num_val = 0;
6002 dump_offset += qed_read_param(dump_buf + dump_offset,
6004 ¶m_str_val, ¶m_num_val);
6008 sprintf(qed_get_buf_ptr(results_buf,
6010 "%s: %s\n", param_name, param_str_val);
6011 else if (strcmp(param_name, "fw-timestamp"))
6013 sprintf(qed_get_buf_ptr(results_buf,
6015 "%s: %d\n", param_name, param_num_val);
6018 results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6021 *num_chars_printed = results_offset;
6026 /* Returns the block name that matches the specified block ID,
6027 * or NULL if not found.
6029 static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
6030 enum block_id block_id)
6032 const struct dbg_block_user *block =
6033 (const struct dbg_block_user *)
6034 p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
6036 return (const char *)block->name;
6039 static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
6042 return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6045 /* Parses the idle check rules and returns the number of characters printed.
6046 * In case of parsing error, returns 0.
6048 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
6052 bool print_fw_idle_chk,
6054 u32 *num_errors, u32 *num_warnings)
6056 /* Offset in results_buf in bytes */
6057 u32 results_offset = 0;
6065 /* Go over dumped results */
6066 for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6068 const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6069 struct dbg_idle_chk_result_hdr *hdr;
6070 const char *parsing_str, *lsi_msg;
6071 u32 parsing_str_offset;
6075 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6077 (const struct dbg_idle_chk_rule_parsing_data *)
6078 p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
6080 parsing_str_offset =
6081 GET_FIELD(rule_parsing_data->data,
6082 DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6084 GET_FIELD(rule_parsing_data->data,
6085 DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6086 parsing_str = (const char *)
6087 p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
6089 lsi_msg = parsing_str;
6092 if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6095 /* Skip rule header */
6096 dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6098 /* Update errors/warnings count */
6099 if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6100 hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6105 /* Print rule severity */
6107 sprintf(qed_get_buf_ptr(results_buf,
6108 results_offset), "%s: ",
6109 s_idle_chk_severity_str[hdr->severity]);
6111 /* Print rule message */
6113 parsing_str += strlen(parsing_str) + 1;
6115 sprintf(qed_get_buf_ptr(results_buf,
6116 results_offset), "%s.",
6118 print_fw_idle_chk ? parsing_str : lsi_msg);
6119 parsing_str += strlen(parsing_str) + 1;
6121 /* Print register values */
6123 sprintf(qed_get_buf_ptr(results_buf,
6124 results_offset), " Registers:");
6126 i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6128 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6133 (struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6134 is_mem = GET_FIELD(reg_hdr->data,
6135 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6136 reg_id = GET_FIELD(reg_hdr->data,
6137 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6139 /* Skip reg header */
6140 dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6142 /* Skip register names until the required reg_id is
6145 for (; reg_id > curr_reg_id;
6147 parsing_str += strlen(parsing_str) + 1);
6150 sprintf(qed_get_buf_ptr(results_buf,
6151 results_offset), " %s",
6153 if (i < hdr->num_dumped_cond_regs && is_mem)
6155 sprintf(qed_get_buf_ptr(results_buf,
6157 "[%d]", hdr->mem_entry_id +
6158 reg_hdr->start_entry);
6160 sprintf(qed_get_buf_ptr(results_buf,
6161 results_offset), "=");
6162 for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6164 sprintf(qed_get_buf_ptr(results_buf,
6167 if (j < reg_hdr->size - 1)
6169 sprintf(qed_get_buf_ptr
6171 results_offset), ",");
6176 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6179 /* Check if end of dump buffer was exceeded */
6180 if (dump_buf > dump_buf_end)
6183 return results_offset;
6186 /* Parses an idle check dump buffer.
6187 * If result_buf is not NULL, the idle check results are printed to it.
6188 * In any case, the required results buffer size is assigned to
6189 * parsed_results_bytes.
6190 * The parsing status is returned.
6192 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
6194 u32 num_dumped_dwords,
6196 u32 *parsed_results_bytes,
6200 const char *section_name, *param_name, *param_str_val;
6201 u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6202 u32 num_section_params = 0, num_rules;
6204 /* Offset in results_buf in bytes */
6205 u32 results_offset = 0;
6207 *parsed_results_bytes = 0;
6211 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6212 !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6213 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6215 /* Read global_params section */
6216 dump_buf += qed_read_section_hdr(dump_buf,
6217 §ion_name, &num_section_params);
6218 if (strcmp(section_name, "global_params"))
6219 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6221 /* Print global params */
6222 dump_buf += qed_print_section_params(dump_buf,
6224 results_buf, &results_offset);
6226 /* Read idle_chk section */
6227 dump_buf += qed_read_section_hdr(dump_buf,
6228 §ion_name, &num_section_params);
6229 if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6230 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6231 dump_buf += qed_read_param(dump_buf,
6232 ¶m_name, ¶m_str_val, &num_rules);
6233 if (strcmp(param_name, "num_rules"))
6234 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6237 u32 rules_print_size;
6239 /* Print FW output */
6241 sprintf(qed_get_buf_ptr(results_buf,
6243 "FW_IDLE_CHECK:\n");
6245 qed_parse_idle_chk_dump_rules(p_hwfn,
6256 results_offset += rules_print_size;
6257 if (!rules_print_size)
6258 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6260 /* Print LSI output */
6262 sprintf(qed_get_buf_ptr(results_buf,
6264 "\nLSI_IDLE_CHECK:\n");
6266 qed_parse_idle_chk_dump_rules(p_hwfn,
6277 results_offset += rules_print_size;
6278 if (!rules_print_size)
6279 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6282 /* Print errors/warnings count */
6285 sprintf(qed_get_buf_ptr(results_buf,
6287 "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6288 *num_errors, *num_warnings);
6289 else if (*num_warnings)
6291 sprintf(qed_get_buf_ptr(results_buf,
6293 "\nIdle Check completed successfully (with %d warnings)\n",
6297 sprintf(qed_get_buf_ptr(results_buf,
6299 "\nIdle Check completed successfully\n");
6301 /* Add 1 for string NULL termination */
6302 *parsed_results_bytes = results_offset + 1;
6304 return DBG_STATUS_OK;
6307 /* Allocates and fills MCP Trace meta data based on the specified meta data
6309 * Returns debug status code.
6311 static enum dbg_status
6312 qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6313 const u32 *meta_buf)
6315 struct dbg_tools_user_data *dev_user_data;
6316 u32 offset = 0, signature, i;
6317 struct mcp_trace_meta *meta;
6320 dev_user_data = qed_dbg_get_user_data(p_hwfn);
6321 meta = &dev_user_data->mcp_trace_meta;
6322 meta_buf_bytes = (u8 *)meta_buf;
6324 /* Free the previous meta before loading a new one. */
6325 if (meta->is_allocated)
6326 qed_mcp_trace_free_meta_data(p_hwfn);
6328 memset(meta, 0, sizeof(*meta));
6330 /* Read first signature */
6331 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6332 if (signature != NVM_MAGIC_VALUE)
6333 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6335 /* Read no. of modules and allocate memory for their pointers */
6336 meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6337 meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6340 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6342 /* Allocate and read all module strings */
6343 for (i = 0; i < meta->modules_num; i++) {
6344 u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6346 *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6347 if (!(*(meta->modules + i))) {
6348 /* Update number of modules to be released */
6349 meta->modules_num = i ? i - 1 : 0;
6350 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6353 qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6354 *(meta->modules + i));
6355 if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6356 (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6359 /* Read second signature */
6360 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6361 if (signature != NVM_MAGIC_VALUE)
6362 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6364 /* Read number of formats and allocate memory for all formats */
6365 meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6366 meta->formats = kcalloc(meta->formats_num,
6367 sizeof(struct mcp_trace_format),
6370 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6372 /* Allocate and read all strings */
6373 for (i = 0; i < meta->formats_num; i++) {
6374 struct mcp_trace_format *format_ptr = &meta->formats[i];
6377 format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6379 format_len = GET_MFW_FIELD(format_ptr->data,
6380 MCP_TRACE_FORMAT_LEN);
6381 format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6382 if (!format_ptr->format_str) {
6383 /* Update number of modules to be released */
6384 meta->formats_num = i ? i - 1 : 0;
6385 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6388 qed_read_str_from_buf(meta_buf_bytes,
6390 format_len, format_ptr->format_str);
6393 meta->is_allocated = true;
6394 return DBG_STATUS_OK;
6397 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6398 * are printed to it. The parsing status is returned.
6400 * trace_buf - MCP trace cyclic buffer
6401 * trace_buf_size - MCP trace cyclic buffer size in bytes
6402 * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6404 * data_size - size in bytes of data to parse.
6405 * parsed_buf - destination buffer for parsed data.
6406 * parsed_results_bytes - size of parsed data in bytes.
6408 static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6414 u32 *parsed_results_bytes)
6416 struct dbg_tools_user_data *dev_user_data;
6417 struct mcp_trace_meta *meta;
6418 u32 param_mask, param_shift;
6419 enum dbg_status status;
6421 dev_user_data = qed_dbg_get_user_data(p_hwfn);
6422 meta = &dev_user_data->mcp_trace_meta;
6423 *parsed_results_bytes = 0;
6425 if (!meta->is_allocated)
6426 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6428 status = DBG_STATUS_OK;
6431 struct mcp_trace_format *format_ptr;
6432 u8 format_level, format_module;
6433 u32 params[3] = { 0, 0, 0 };
6434 u32 header, format_idx, i;
6436 if (data_size < MFW_TRACE_ENTRY_SIZE)
6437 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6439 header = qed_read_from_cyclic_buf(trace_buf,
6442 MFW_TRACE_ENTRY_SIZE);
6443 data_size -= MFW_TRACE_ENTRY_SIZE;
6444 format_idx = header & MFW_TRACE_EVENTID_MASK;
6446 /* Skip message if its index doesn't exist in the meta data */
6447 if (format_idx >= meta->formats_num) {
6448 u8 format_size = (u8)GET_MFW_FIELD(header,
6449 MFW_TRACE_PRM_SIZE);
6451 if (data_size < format_size)
6452 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6454 data_offset = qed_cyclic_add(data_offset,
6457 data_size -= format_size;
6461 format_ptr = &meta->formats[format_idx];
6464 param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6465 MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
6466 i < MCP_TRACE_FORMAT_MAX_PARAMS;
6467 i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6468 param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6469 /* Extract param size (0..3) */
6470 u8 param_size = (u8)((format_ptr->data & param_mask) >>
6473 /* If the param size is zero, there are no other
6479 /* Size is encoded using 2 bits, where 3 is used to
6482 if (param_size == 3)
6485 if (data_size < param_size)
6486 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6488 params[i] = qed_read_from_cyclic_buf(trace_buf,
6492 data_size -= param_size;
6495 format_level = (u8)GET_MFW_FIELD(format_ptr->data,
6496 MCP_TRACE_FORMAT_LEVEL);
6497 format_module = (u8)GET_MFW_FIELD(format_ptr->data,
6498 MCP_TRACE_FORMAT_MODULE);
6499 if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6500 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6502 /* Print current message to results buffer */
6503 *parsed_results_bytes +=
6504 sprintf(qed_get_buf_ptr(parsed_buf,
6505 *parsed_results_bytes),
6507 s_mcp_trace_level_str[format_level],
6508 meta->modules[format_module]);
6509 *parsed_results_bytes +=
6510 sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6511 format_ptr->format_str,
6512 params[0], params[1], params[2]);
6515 /* Add string NULL terminator */
6516 (*parsed_results_bytes)++;
6521 /* Parses an MCP Trace dump buffer.
6522 * If result_buf is not NULL, the MCP Trace results are printed to it.
6523 * In any case, the required results buffer size is assigned to
6524 * parsed_results_bytes.
6525 * The parsing status is returned.
6527 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6530 u32 *parsed_results_bytes,
6531 bool free_meta_data)
6533 const char *section_name, *param_name, *param_str_val;
6534 u32 data_size, trace_data_dwords, trace_meta_dwords;
6535 u32 offset, results_offset, results_buf_bytes;
6536 u32 param_num_val, num_section_params;
6537 struct mcp_trace *trace;
6538 enum dbg_status status;
6539 const u32 *meta_buf;
6542 *parsed_results_bytes = 0;
6544 /* Read global_params section */
6545 dump_buf += qed_read_section_hdr(dump_buf,
6546 §ion_name, &num_section_params);
6547 if (strcmp(section_name, "global_params"))
6548 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6550 /* Print global params */
6551 dump_buf += qed_print_section_params(dump_buf,
6553 results_buf, &results_offset);
6555 /* Read trace_data section */
6556 dump_buf += qed_read_section_hdr(dump_buf,
6557 §ion_name, &num_section_params);
6558 if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6559 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6560 dump_buf += qed_read_param(dump_buf,
6561 ¶m_name, ¶m_str_val, ¶m_num_val);
6562 if (strcmp(param_name, "size"))
6563 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6564 trace_data_dwords = param_num_val;
6566 /* Prepare trace info */
6567 trace = (struct mcp_trace *)dump_buf;
6568 if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
6569 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6571 trace_buf = (u8 *)dump_buf + sizeof(*trace);
6572 offset = trace->trace_oldest;
6573 data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6574 dump_buf += trace_data_dwords;
6576 /* Read meta_data section */
6577 dump_buf += qed_read_section_hdr(dump_buf,
6578 §ion_name, &num_section_params);
6579 if (strcmp(section_name, "mcp_trace_meta"))
6580 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6581 dump_buf += qed_read_param(dump_buf,
6582 ¶m_name, ¶m_str_val, ¶m_num_val);
6583 if (strcmp(param_name, "size"))
6584 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6585 trace_meta_dwords = param_num_val;
6587 /* Choose meta data buffer */
6588 if (!trace_meta_dwords) {
6589 /* Dump doesn't include meta data */
6590 struct dbg_tools_user_data *dev_user_data =
6591 qed_dbg_get_user_data(p_hwfn);
6593 if (!dev_user_data->mcp_trace_user_meta_buf)
6594 return DBG_STATUS_MCP_TRACE_NO_META;
6596 meta_buf = dev_user_data->mcp_trace_user_meta_buf;
6598 /* Dump includes meta data */
6599 meta_buf = dump_buf;
6602 /* Allocate meta data memory */
6603 status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
6604 if (status != DBG_STATUS_OK)
6607 status = qed_parse_mcp_trace_buf(p_hwfn,
6613 results_buf + results_offset :
6615 &results_buf_bytes);
6616 if (status != DBG_STATUS_OK)
6620 qed_mcp_trace_free_meta_data(p_hwfn);
6622 *parsed_results_bytes = results_offset + results_buf_bytes;
6624 return DBG_STATUS_OK;
6627 /* Parses a Reg FIFO dump buffer.
6628 * If result_buf is not NULL, the Reg FIFO results are printed to it.
6629 * In any case, the required results buffer size is assigned to
6630 * parsed_results_bytes.
6631 * The parsing status is returned.
6633 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6635 u32 *parsed_results_bytes)
6637 const char *section_name, *param_name, *param_str_val;
6638 u32 param_num_val, num_section_params, num_elements;
6639 struct reg_fifo_element *elements;
6640 u8 i, j, err_code, vf_val;
6641 u32 results_offset = 0;
6644 /* Read global_params section */
6645 dump_buf += qed_read_section_hdr(dump_buf,
6646 §ion_name, &num_section_params);
6647 if (strcmp(section_name, "global_params"))
6648 return DBG_STATUS_REG_FIFO_BAD_DATA;
6650 /* Print global params */
6651 dump_buf += qed_print_section_params(dump_buf,
6653 results_buf, &results_offset);
6655 /* Read reg_fifo_data section */
6656 dump_buf += qed_read_section_hdr(dump_buf,
6657 §ion_name, &num_section_params);
6658 if (strcmp(section_name, "reg_fifo_data"))
6659 return DBG_STATUS_REG_FIFO_BAD_DATA;
6660 dump_buf += qed_read_param(dump_buf,
6661 ¶m_name, ¶m_str_val, ¶m_num_val);
6662 if (strcmp(param_name, "size"))
6663 return DBG_STATUS_REG_FIFO_BAD_DATA;
6664 if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6665 return DBG_STATUS_REG_FIFO_BAD_DATA;
6666 num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6667 elements = (struct reg_fifo_element *)dump_buf;
6669 /* Decode elements */
6670 for (i = 0; i < num_elements; i++) {
6671 const char *err_msg = NULL;
6673 /* Discover if element belongs to a VF or a PF */
6674 vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6675 if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6676 sprintf(vf_str, "%s", "N/A");
6678 sprintf(vf_str, "%d", vf_val);
6680 /* Find error message */
6681 err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
6682 for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
6683 if (err_code == s_reg_fifo_errors[j].err_code)
6684 err_msg = s_reg_fifo_errors[j].err_msg;
6686 /* Add parsed element to parsed buffer */
6688 sprintf(qed_get_buf_ptr(results_buf,
6690 "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
6692 (u32)GET_FIELD(elements[i].data,
6693 REG_FIFO_ELEMENT_ADDRESS) *
6694 REG_FIFO_ELEMENT_ADDR_FACTOR,
6695 s_access_strs[GET_FIELD(elements[i].data,
6696 REG_FIFO_ELEMENT_ACCESS)],
6697 (u32)GET_FIELD(elements[i].data,
6698 REG_FIFO_ELEMENT_PF),
6700 (u32)GET_FIELD(elements[i].data,
6701 REG_FIFO_ELEMENT_PORT),
6702 s_privilege_strs[GET_FIELD(elements[i].data,
6703 REG_FIFO_ELEMENT_PRIVILEGE)],
6704 s_protection_strs[GET_FIELD(elements[i].data,
6705 REG_FIFO_ELEMENT_PROTECTION)],
6706 s_master_strs[GET_FIELD(elements[i].data,
6707 REG_FIFO_ELEMENT_MASTER)],
6708 err_msg ? err_msg : "unknown error code");
6711 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6713 "fifo contained %d elements", num_elements);
6715 /* Add 1 for string NULL termination */
6716 *parsed_results_bytes = results_offset + 1;
6718 return DBG_STATUS_OK;
6721 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6724 u32 *results_offset)
6726 const struct igu_fifo_addr_data *found_addr = NULL;
6727 u8 source, err_type, i, is_cleanup;
6728 char parsed_addr_data[32];
6729 char parsed_wr_data[256];
6730 u32 wr_data, prod_cons;
6731 bool is_wr_cmd, is_pf;
6735 /* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6738 dword12 = ((u64)element->dword2 << 32) | element->dword1;
6739 is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6740 is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6741 cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6742 source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6743 err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6745 if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6746 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6747 if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6748 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6750 /* Find address data */
6751 for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
6752 const struct igu_fifo_addr_data *curr_addr =
6753 &s_igu_fifo_addr_data[i];
6755 if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6756 curr_addr->end_addr)
6757 found_addr = curr_addr;
6761 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6763 /* Prepare parsed address data */
6764 switch (found_addr->type) {
6765 case IGU_ADDR_TYPE_MSIX_MEM:
6766 sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6768 case IGU_ADDR_TYPE_WRITE_INT_ACK:
6769 case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6770 sprintf(parsed_addr_data,
6771 " SB = 0x%x", cmd_addr - found_addr->start_addr);
6774 parsed_addr_data[0] = '\0';
6778 parsed_wr_data[0] = '\0';
6782 /* Prepare parsed write data */
6783 wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6784 prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
6785 is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
6787 if (source == IGU_SRC_ATTN) {
6788 sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
6791 u8 cleanup_val, cleanup_type;
6795 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6798 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6800 sprintf(parsed_wr_data,
6801 "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
6802 cleanup_val ? "set" : "clear",
6805 u8 update_flag, en_dis_int_for_sb, segment;
6808 update_flag = GET_FIELD(wr_data,
6809 IGU_FIFO_WR_DATA_UPDATE_FLAG);
6812 IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6813 segment = GET_FIELD(wr_data,
6814 IGU_FIFO_WR_DATA_SEGMENT);
6815 timer_mask = GET_FIELD(wr_data,
6816 IGU_FIFO_WR_DATA_TIMER_MASK);
6818 sprintf(parsed_wr_data,
6819 "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
6821 update_flag ? "update" : "nop",
6823 (en_dis_int_for_sb == 1 ? "disable" : "nop") :
6825 segment ? "attn" : "regular",
6830 /* Add parsed element to parsed buffer */
6831 *results_offset += sprintf(qed_get_buf_ptr(results_buf,
6833 "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
6834 element->dword2, element->dword1,
6836 is_pf ? "pf" : "vf",
6837 GET_FIELD(element->dword0,
6838 IGU_FIFO_ELEMENT_DWORD0_FID),
6839 s_igu_fifo_source_strs[source],
6840 is_wr_cmd ? "wr" : "rd",
6842 (!is_pf && found_addr->vf_desc)
6843 ? found_addr->vf_desc
6847 s_igu_fifo_error_strs[err_type]);
6849 return DBG_STATUS_OK;
6852 /* Parses an IGU FIFO dump buffer.
6853 * If result_buf is not NULL, the IGU FIFO results are printed to it.
6854 * In any case, the required results buffer size is assigned to
6855 * parsed_results_bytes.
6856 * The parsing status is returned.
6858 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
6860 u32 *parsed_results_bytes)
6862 const char *section_name, *param_name, *param_str_val;
6863 u32 param_num_val, num_section_params, num_elements;
6864 struct igu_fifo_element *elements;
6865 enum dbg_status status;
6866 u32 results_offset = 0;
6869 /* Read global_params section */
6870 dump_buf += qed_read_section_hdr(dump_buf,
6871 §ion_name, &num_section_params);
6872 if (strcmp(section_name, "global_params"))
6873 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6875 /* Print global params */
6876 dump_buf += qed_print_section_params(dump_buf,
6878 results_buf, &results_offset);
6880 /* Read igu_fifo_data section */
6881 dump_buf += qed_read_section_hdr(dump_buf,
6882 §ion_name, &num_section_params);
6883 if (strcmp(section_name, "igu_fifo_data"))
6884 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6885 dump_buf += qed_read_param(dump_buf,
6886 ¶m_name, ¶m_str_val, ¶m_num_val);
6887 if (strcmp(param_name, "size"))
6888 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6889 if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
6890 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6891 num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
6892 elements = (struct igu_fifo_element *)dump_buf;
6894 /* Decode elements */
6895 for (i = 0; i < num_elements; i++) {
6896 status = qed_parse_igu_fifo_element(&elements[i],
6899 if (status != DBG_STATUS_OK)
6903 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6905 "fifo contained %d elements", num_elements);
6907 /* Add 1 for string NULL termination */
6908 *parsed_results_bytes = results_offset + 1;
6910 return DBG_STATUS_OK;
6913 static enum dbg_status
6914 qed_parse_protection_override_dump(u32 *dump_buf,
6916 u32 *parsed_results_bytes)
6918 const char *section_name, *param_name, *param_str_val;
6919 u32 param_num_val, num_section_params, num_elements;
6920 struct protection_override_element *elements;
6921 u32 results_offset = 0;
6924 /* Read global_params section */
6925 dump_buf += qed_read_section_hdr(dump_buf,
6926 §ion_name, &num_section_params);
6927 if (strcmp(section_name, "global_params"))
6928 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6930 /* Print global params */
6931 dump_buf += qed_print_section_params(dump_buf,
6933 results_buf, &results_offset);
6935 /* Read protection_override_data section */
6936 dump_buf += qed_read_section_hdr(dump_buf,
6937 §ion_name, &num_section_params);
6938 if (strcmp(section_name, "protection_override_data"))
6939 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6940 dump_buf += qed_read_param(dump_buf,
6941 ¶m_name, ¶m_str_val, ¶m_num_val);
6942 if (strcmp(param_name, "size"))
6943 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6944 if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
6945 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6946 num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
6947 elements = (struct protection_override_element *)dump_buf;
6949 /* Decode elements */
6950 for (i = 0; i < num_elements; i++) {
6951 u32 address = GET_FIELD(elements[i].data,
6952 PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
6953 PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
6956 sprintf(qed_get_buf_ptr(results_buf,
6958 "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
6960 (u32)GET_FIELD(elements[i].data,
6961 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
6962 (u32)GET_FIELD(elements[i].data,
6963 PROTECTION_OVERRIDE_ELEMENT_READ),
6964 (u32)GET_FIELD(elements[i].data,
6965 PROTECTION_OVERRIDE_ELEMENT_WRITE),
6966 s_protection_strs[GET_FIELD(elements[i].data,
6967 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
6968 s_protection_strs[GET_FIELD(elements[i].data,
6969 PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
6972 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6974 "protection override contained %d elements",
6977 /* Add 1 for string NULL termination */
6978 *parsed_results_bytes = results_offset + 1;
6980 return DBG_STATUS_OK;
6983 /* Parses a FW Asserts dump buffer.
6984 * If result_buf is not NULL, the FW Asserts results are printed to it.
6985 * In any case, the required results buffer size is assigned to
6986 * parsed_results_bytes.
6987 * The parsing status is returned.
6989 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
6991 u32 *parsed_results_bytes)
6993 u32 num_section_params, param_num_val, i, results_offset = 0;
6994 const char *param_name, *param_str_val, *section_name;
6995 bool last_section_found = false;
6997 *parsed_results_bytes = 0;
6999 /* Read global_params section */
7000 dump_buf += qed_read_section_hdr(dump_buf,
7001 §ion_name, &num_section_params);
7002 if (strcmp(section_name, "global_params"))
7003 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7005 /* Print global params */
7006 dump_buf += qed_print_section_params(dump_buf,
7008 results_buf, &results_offset);
7010 while (!last_section_found) {
7011 dump_buf += qed_read_section_hdr(dump_buf,
7013 &num_section_params);
7014 if (!strcmp(section_name, "fw_asserts")) {
7015 /* Extract params */
7016 const char *storm_letter = NULL;
7017 u32 storm_dump_size = 0;
7019 for (i = 0; i < num_section_params; i++) {
7020 dump_buf += qed_read_param(dump_buf,
7024 if (!strcmp(param_name, "storm"))
7025 storm_letter = param_str_val;
7026 else if (!strcmp(param_name, "size"))
7027 storm_dump_size = param_num_val;
7030 DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7033 if (!storm_letter || !storm_dump_size)
7034 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7038 sprintf(qed_get_buf_ptr(results_buf,
7040 "\n%sSTORM_ASSERT: size=%d\n",
7041 storm_letter, storm_dump_size);
7042 for (i = 0; i < storm_dump_size; i++, dump_buf++)
7044 sprintf(qed_get_buf_ptr(results_buf,
7046 "%08x\n", *dump_buf);
7047 } else if (!strcmp(section_name, "last")) {
7048 last_section_found = true;
7050 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7054 /* Add 1 for string NULL termination */
7055 *parsed_results_bytes = results_offset + 1;
7057 return DBG_STATUS_OK;
7060 /***************************** Public Functions *******************************/
7062 enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
7063 const u8 * const bin_ptr)
7065 struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
7068 /* Convert binary data to debug arrays */
7069 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
7070 qed_set_dbg_bin_buf(p_hwfn,
7071 (enum bin_dbg_buffer_type)buf_id,
7072 (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
7073 buf_hdrs[buf_id].length);
7075 return DBG_STATUS_OK;
7078 enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
7079 void **user_data_ptr)
7081 *user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
7083 if (!(*user_data_ptr))
7084 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7086 return DBG_STATUS_OK;
7089 const char *qed_dbg_get_status_str(enum dbg_status status)
7092 MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7095 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7097 u32 num_dumped_dwords,
7098 u32 *results_buf_size)
7100 u32 num_errors, num_warnings;
7102 return qed_parse_idle_chk_dump(p_hwfn,
7107 &num_errors, &num_warnings);
7110 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7112 u32 num_dumped_dwords,
7117 u32 parsed_buf_size;
7119 return qed_parse_idle_chk_dump(p_hwfn,
7124 num_errors, num_warnings);
7127 void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7128 const u32 *meta_buf)
7130 struct dbg_tools_user_data *dev_user_data =
7131 qed_dbg_get_user_data(p_hwfn);
7133 dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7136 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7138 u32 num_dumped_dwords,
7139 u32 *results_buf_size)
7141 return qed_parse_mcp_trace_dump(p_hwfn,
7142 dump_buf, NULL, results_buf_size, true);
7145 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7147 u32 num_dumped_dwords,
7150 u32 parsed_buf_size;
7152 return qed_parse_mcp_trace_dump(p_hwfn,
7154 results_buf, &parsed_buf_size, true);
7157 enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7161 u32 parsed_buf_size;
7163 return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7164 &parsed_buf_size, false);
7167 enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7169 u32 num_dumped_bytes,
7172 u32 parsed_results_bytes;
7174 return qed_parse_mcp_trace_buf(p_hwfn,
7179 results_buf, &parsed_results_bytes);
7182 /* Frees the specified MCP Trace meta data */
7183 void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7185 struct dbg_tools_user_data *dev_user_data;
7186 struct mcp_trace_meta *meta;
7189 dev_user_data = qed_dbg_get_user_data(p_hwfn);
7190 meta = &dev_user_data->mcp_trace_meta;
7191 if (!meta->is_allocated)
7194 /* Release modules */
7195 if (meta->modules) {
7196 for (i = 0; i < meta->modules_num; i++)
7197 kfree(meta->modules[i]);
7198 kfree(meta->modules);
7201 /* Release formats */
7202 if (meta->formats) {
7203 for (i = 0; i < meta->formats_num; i++)
7204 kfree(meta->formats[i].format_str);
7205 kfree(meta->formats);
7208 meta->is_allocated = false;
7211 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7213 u32 num_dumped_dwords,
7214 u32 *results_buf_size)
7216 return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7219 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7221 u32 num_dumped_dwords,
7224 u32 parsed_buf_size;
7226 return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7229 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7231 u32 num_dumped_dwords,
7232 u32 *results_buf_size)
7234 return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7237 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7239 u32 num_dumped_dwords,
7242 u32 parsed_buf_size;
7244 return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7248 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7250 u32 num_dumped_dwords,
7251 u32 *results_buf_size)
7253 return qed_parse_protection_override_dump(dump_buf,
7254 NULL, results_buf_size);
7257 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7259 u32 num_dumped_dwords,
7262 u32 parsed_buf_size;
7264 return qed_parse_protection_override_dump(dump_buf,
7269 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7271 u32 num_dumped_dwords,
7272 u32 *results_buf_size)
7274 return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7277 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7279 u32 num_dumped_dwords,
7282 u32 parsed_buf_size;
7284 return qed_parse_fw_asserts_dump(dump_buf,
7285 results_buf, &parsed_buf_size);
7288 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7289 struct dbg_attn_block_result *results)
7291 const u32 *block_attn_name_offsets;
7292 const char *attn_name_base;
7293 const char *block_name;
7294 enum dbg_attn_type attn_type;
7297 num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7298 attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7299 block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
7301 return DBG_STATUS_INVALID_ARGS;
7303 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7304 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7305 !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7306 return DBG_STATUS_DBG_ARRAY_NOT_SET;
7308 block_attn_name_offsets =
7309 (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
7310 results->names_offset;
7312 attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
7314 /* Go over registers with a non-zero attention status */
7315 for (i = 0; i < num_regs; i++) {
7316 struct dbg_attn_bit_mapping *bit_mapping;
7317 struct dbg_attn_reg_result *reg_result;
7318 u8 num_reg_attn, bit_idx = 0;
7320 reg_result = &results->reg_results[i];
7321 num_reg_attn = GET_FIELD(reg_result->data,
7322 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7323 bit_mapping = (struct dbg_attn_bit_mapping *)
7324 p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
7325 reg_result->block_attn_offset;
7327 /* Go over attention status bits */
7328 for (j = 0; j < num_reg_attn; j++, bit_idx++) {
7329 u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7330 DBG_ATTN_BIT_MAPPING_VAL);
7331 const char *attn_name, *attn_type_str, *masked_str;
7332 u32 attn_name_offset;
7335 /* Check if bit mask should be advanced (due to unused
7338 if (GET_FIELD(bit_mapping[j].data,
7339 DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7340 bit_idx += (u8)attn_idx_val;
7344 /* Check current bit index */
7345 if (!(reg_result->sts_val & BIT(bit_idx)))
7348 /* An attention bit with value=1 was found
7349 * Find attention name
7352 block_attn_name_offsets[attn_idx_val];
7353 attn_name = attn_name_base + attn_name_offset;
7356 ATTN_TYPE_INTERRUPT ? "Interrupt" :
7358 masked_str = reg_result->mask_val & BIT(bit_idx) ?
7360 sts_addr = GET_FIELD(reg_result->data,
7361 DBG_ATTN_REG_RESULT_STS_ADDRESS);
7363 "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7364 block_name, attn_type_str, attn_name,
7365 sts_addr * 4, bit_idx, masked_str);
7369 return DBG_STATUS_OK;
7372 static DEFINE_MUTEX(qed_dbg_lock);
7374 /* Wrapper for unifying the idle_chk and mcp_trace api */
7375 static enum dbg_status
7376 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7378 u32 num_dumped_dwords,
7381 u32 num_errors, num_warnnings;
7383 return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7384 results_buf, &num_errors,
7388 /* Feature meta data lookup table */
7391 enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7392 struct qed_ptt *p_ptt, u32 *size);
7393 enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7394 struct qed_ptt *p_ptt, u32 *dump_buf,
7395 u32 buf_size, u32 *dumped_dwords);
7396 enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7397 u32 *dump_buf, u32 num_dumped_dwords,
7399 enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7401 u32 num_dumped_dwords,
7402 u32 *results_buf_size);
7403 } qed_features_lookup[] = {
7405 "grc", qed_dbg_grc_get_dump_buf_size,
7406 qed_dbg_grc_dump, NULL, NULL}, {
7408 qed_dbg_idle_chk_get_dump_buf_size,
7409 qed_dbg_idle_chk_dump,
7410 qed_print_idle_chk_results_wrapper,
7411 qed_get_idle_chk_results_buf_size}, {
7413 qed_dbg_mcp_trace_get_dump_buf_size,
7414 qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7415 qed_get_mcp_trace_results_buf_size}, {
7417 qed_dbg_reg_fifo_get_dump_buf_size,
7418 qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7419 qed_get_reg_fifo_results_buf_size}, {
7421 qed_dbg_igu_fifo_get_dump_buf_size,
7422 qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7423 qed_get_igu_fifo_results_buf_size}, {
7424 "protection_override",
7425 qed_dbg_protection_override_get_dump_buf_size,
7426 qed_dbg_protection_override_dump,
7427 qed_print_protection_override_results,
7428 qed_get_protection_override_results_buf_size}, {
7430 qed_dbg_fw_asserts_get_dump_buf_size,
7431 qed_dbg_fw_asserts_dump,
7432 qed_print_fw_asserts_results,
7433 qed_get_fw_asserts_results_buf_size}, {
7435 qed_dbg_ilt_get_dump_buf_size,
7436 qed_dbg_ilt_dump, NULL, NULL},};
7438 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7440 u32 i, precision = 80;
7445 pr_notice("\n%.*s", precision, p_text_buf);
7446 for (i = precision; i < text_size; i += precision)
7447 pr_cont("%.*s", precision, p_text_buf + i);
7451 #define QED_RESULTS_BUF_MIN_SIZE 16
7452 /* Generic function for decoding debug feature info */
7453 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7454 enum qed_dbg_features feature_idx)
7456 struct qed_dbg_feature *feature =
7457 &p_hwfn->cdev->dbg_features[feature_idx];
7458 u32 text_size_bytes, null_char_pos, i;
7462 /* Check if feature supports formatting capability */
7463 if (!qed_features_lookup[feature_idx].results_buf_size)
7464 return DBG_STATUS_OK;
7466 /* Obtain size of formatted output */
7467 rc = qed_features_lookup[feature_idx].
7468 results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7469 feature->dumped_dwords, &text_size_bytes);
7470 if (rc != DBG_STATUS_OK)
7473 /* Make sure that the allocated size is a multiple of dword (4 bytes) */
7474 null_char_pos = text_size_bytes - 1;
7475 text_size_bytes = (text_size_bytes + 3) & ~0x3;
7477 if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7478 DP_NOTICE(p_hwfn->cdev,
7479 "formatted size of feature was too small %d. Aborting\n",
7481 return DBG_STATUS_INVALID_ARGS;
7484 /* Allocate temp text buf */
7485 text_buf = vzalloc(text_size_bytes);
7487 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7489 /* Decode feature opcodes to string on temp buf */
7490 rc = qed_features_lookup[feature_idx].
7491 print_results(p_hwfn, (u32 *)feature->dump_buf,
7492 feature->dumped_dwords, text_buf);
7493 if (rc != DBG_STATUS_OK) {
7498 /* Replace the original null character with a '\n' character.
7499 * The bytes that were added as a result of the dword alignment are also
7500 * padded with '\n' characters.
7502 for (i = null_char_pos; i < text_size_bytes; i++)
7505 /* Dump printable feature to log */
7506 if (p_hwfn->cdev->print_dbg_data)
7507 qed_dbg_print_feature(text_buf, text_size_bytes);
7509 /* Just return the original binary buffer if requested */
7510 if (p_hwfn->cdev->dbg_bin_dump) {
7512 return DBG_STATUS_OK;
7515 /* Free the old dump_buf and point the dump_buf to the newly allocagted
7516 * and formatted text buffer.
7518 vfree(feature->dump_buf);
7519 feature->dump_buf = text_buf;
7520 feature->buf_size = text_size_bytes;
7521 feature->dumped_dwords = text_size_bytes / 4;
7525 #define MAX_DBG_FEATURE_SIZE_DWORDS 0x3FFFFFFF
7527 /* Generic function for performing the dump of a debug feature. */
7528 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7529 struct qed_ptt *p_ptt,
7530 enum qed_dbg_features feature_idx)
7532 struct qed_dbg_feature *feature =
7533 &p_hwfn->cdev->dbg_features[feature_idx];
7534 u32 buf_size_dwords;
7537 DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7538 qed_features_lookup[feature_idx].name);
7540 /* Dump_buf was already allocated need to free (this can happen if dump
7541 * was called but file was never read).
7542 * We can't use the buffer as is since size may have changed.
7544 if (feature->dump_buf) {
7545 vfree(feature->dump_buf);
7546 feature->dump_buf = NULL;
7549 /* Get buffer size from hsi, allocate accordingly, and perform the
7552 rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7554 if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7557 if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
7558 feature->buf_size = 0;
7559 DP_NOTICE(p_hwfn->cdev,
7560 "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
7561 qed_features_lookup[feature_idx].name,
7562 buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
7564 return DBG_STATUS_OK;
7567 feature->buf_size = buf_size_dwords * sizeof(u32);
7568 feature->dump_buf = vmalloc(feature->buf_size);
7569 if (!feature->dump_buf)
7570 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7572 rc = qed_features_lookup[feature_idx].
7573 perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7574 feature->buf_size / sizeof(u32),
7575 &feature->dumped_dwords);
7577 /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7578 * In this case the buffer holds valid binary data, but we wont able
7579 * to parse it (since parsing relies on data in NVRAM which is only
7580 * accessible when MFW is responsive). skip the formatting but return
7581 * success so that binary data is provided.
7583 if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7584 return DBG_STATUS_OK;
7586 if (rc != DBG_STATUS_OK)
7590 rc = format_feature(p_hwfn, feature_idx);
7594 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7596 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7599 int qed_dbg_grc_size(struct qed_dev *cdev)
7601 return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7604 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7606 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7610 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7612 return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7615 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7617 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7621 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7623 return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7626 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7628 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7632 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7634 return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7637 static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
7638 enum qed_nvm_images image_id, u32 *length)
7640 struct qed_nvm_image_att image_att;
7644 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7648 *length = image_att.length;
7653 static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
7654 u32 *num_dumped_bytes,
7655 enum qed_nvm_images image_id)
7657 struct qed_hwfn *p_hwfn =
7658 &cdev->hwfns[cdev->engine_for_debug];
7663 *num_dumped_bytes = 0;
7664 rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
7668 DP_NOTICE(p_hwfn->cdev,
7669 "Collecting a debug feature [\"nvram image %d\"]\n",
7672 len_rounded = roundup(len_rounded, sizeof(u32));
7673 rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
7677 /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
7678 if (image_id != QED_NVM_IMAGE_NVM_META)
7679 for (i = 0; i < len_rounded; i += 4) {
7680 val = cpu_to_be32(*(u32 *)(buffer + i));
7681 *(u32 *)(buffer + i) = val;
7684 *num_dumped_bytes = len_rounded;
7689 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7690 u32 *num_dumped_bytes)
7692 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7696 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7698 return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7701 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7702 u32 *num_dumped_bytes)
7704 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7708 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7710 return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7713 int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7715 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
7718 int qed_dbg_ilt_size(struct qed_dev *cdev)
7720 return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
7723 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7724 u32 *num_dumped_bytes)
7726 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7730 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7732 return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7735 /* Defines the amount of bytes allocated for recording the length of debugfs
7738 #define REGDUMP_HEADER_SIZE sizeof(u32)
7739 #define REGDUMP_HEADER_SIZE_SHIFT 0
7740 #define REGDUMP_HEADER_SIZE_MASK 0xffffff
7741 #define REGDUMP_HEADER_FEATURE_SHIFT 24
7742 #define REGDUMP_HEADER_FEATURE_MASK 0x1f
7743 #define REGDUMP_HEADER_BIN_DUMP_SHIFT 29
7744 #define REGDUMP_HEADER_BIN_DUMP_MASK 0x1
7745 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
7746 #define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1
7747 #define REGDUMP_HEADER_ENGINE_SHIFT 31
7748 #define REGDUMP_HEADER_ENGINE_MASK 0x1
7749 #define REGDUMP_MAX_SIZE 0x1000000
7750 #define ILT_DUMP_MAX_SIZE (1024 * 1024 * 15)
7752 enum debug_print_features {
7758 PROTECTION_OVERRIDE = 5,
7769 static u32 qed_calc_regdump_header(struct qed_dev *cdev,
7770 enum debug_print_features feature,
7771 int engine, u32 feature_size, u8 omit_engine)
7775 SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
7776 if (res != feature_size)
7778 "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
7779 feature, feature_size);
7781 SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
7782 SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1);
7783 SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
7784 SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
7789 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7791 u8 cur_engine, omit_engine = 0, org_engine;
7792 struct qed_hwfn *p_hwfn =
7793 &cdev->hwfns[cdev->engine_for_debug];
7794 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
7795 int grc_params[MAX_DBG_GRC_PARAMS], i;
7796 u32 offset = 0, feature_size;
7799 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
7800 grc_params[i] = dev_data->grc.param_val[i];
7802 if (!QED_IS_CMT(cdev))
7805 mutex_lock(&qed_dbg_lock);
7806 cdev->dbg_bin_dump = true;
7808 org_engine = qed_get_debug_engine(cdev);
7809 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7810 /* Collect idle_chks and grcDump for each hw function */
7811 DP_VERBOSE(cdev, QED_MSG_DEBUG,
7812 "obtaining idle_chk and grcdump for current engine\n");
7813 qed_set_debug_engine(cdev, cur_engine);
7815 /* First idle_chk */
7816 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7817 REGDUMP_HEADER_SIZE, &feature_size);
7819 *(u32 *)((u8 *)buffer + offset) =
7820 qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
7821 feature_size, omit_engine);
7822 offset += (feature_size + REGDUMP_HEADER_SIZE);
7824 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7827 /* Second idle_chk */
7828 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7829 REGDUMP_HEADER_SIZE, &feature_size);
7831 *(u32 *)((u8 *)buffer + offset) =
7832 qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
7833 feature_size, omit_engine);
7834 offset += (feature_size + REGDUMP_HEADER_SIZE);
7836 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7840 rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
7841 REGDUMP_HEADER_SIZE, &feature_size);
7843 *(u32 *)((u8 *)buffer + offset) =
7844 qed_calc_regdump_header(cdev, REG_FIFO, cur_engine,
7845 feature_size, omit_engine);
7846 offset += (feature_size + REGDUMP_HEADER_SIZE);
7848 DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7852 rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7853 REGDUMP_HEADER_SIZE, &feature_size);
7855 *(u32 *)((u8 *)buffer + offset) =
7856 qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine,
7857 feature_size, omit_engine);
7858 offset += (feature_size + REGDUMP_HEADER_SIZE);
7860 DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7863 /* protection_override dump */
7864 rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7865 REGDUMP_HEADER_SIZE,
7868 *(u32 *)((u8 *)buffer + offset) =
7869 qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE,
7871 feature_size, omit_engine);
7872 offset += (feature_size + REGDUMP_HEADER_SIZE);
7875 "qed_dbg_protection_override failed. rc = %d\n",
7879 /* fw_asserts dump */
7880 rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7881 REGDUMP_HEADER_SIZE, &feature_size);
7883 *(u32 *)((u8 *)buffer + offset) =
7884 qed_calc_regdump_header(cdev, FW_ASSERTS,
7885 cur_engine, feature_size,
7887 offset += (feature_size + REGDUMP_HEADER_SIZE);
7889 DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7893 feature_size = qed_dbg_ilt_size(cdev);
7894 if (!cdev->disable_ilt_dump &&
7895 feature_size < ILT_DUMP_MAX_SIZE) {
7896 rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
7897 REGDUMP_HEADER_SIZE, &feature_size);
7899 *(u32 *)((u8 *)buffer + offset) =
7900 qed_calc_regdump_header(cdev, ILT_DUMP,
7904 offset += feature_size + REGDUMP_HEADER_SIZE;
7906 DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
7911 /* GRC dump - must be last because when mcp stuck it will
7912 * clutter idle_chk, reg_fifo, ...
7914 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
7915 dev_data->grc.param_val[i] = grc_params[i];
7917 rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7918 REGDUMP_HEADER_SIZE, &feature_size);
7920 *(u32 *)((u8 *)buffer + offset) =
7921 qed_calc_regdump_header(cdev, GRC_DUMP,
7923 feature_size, omit_engine);
7924 offset += (feature_size + REGDUMP_HEADER_SIZE);
7926 DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
7930 qed_set_debug_engine(cdev, org_engine);
7933 rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
7934 REGDUMP_HEADER_SIZE, &feature_size);
7936 *(u32 *)((u8 *)buffer + offset) =
7937 qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
7938 feature_size, omit_engine);
7939 offset += (feature_size + REGDUMP_HEADER_SIZE);
7941 DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7945 rc = qed_dbg_nvm_image(cdev,
7946 (u8 *)buffer + offset +
7947 REGDUMP_HEADER_SIZE, &feature_size,
7948 QED_NVM_IMAGE_NVM_CFG1);
7950 *(u32 *)((u8 *)buffer + offset) =
7951 qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
7952 feature_size, omit_engine);
7953 offset += (feature_size + REGDUMP_HEADER_SIZE);
7954 } else if (rc != -ENOENT) {
7956 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7957 QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
7961 rc = qed_dbg_nvm_image(cdev,
7962 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
7963 &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
7965 *(u32 *)((u8 *)buffer + offset) =
7966 qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine,
7967 feature_size, omit_engine);
7968 offset += (feature_size + REGDUMP_HEADER_SIZE);
7969 } else if (rc != -ENOENT) {
7971 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7972 QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
7977 rc = qed_dbg_nvm_image(cdev,
7978 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
7979 &feature_size, QED_NVM_IMAGE_NVM_META);
7981 *(u32 *)((u8 *)buffer + offset) =
7982 qed_calc_regdump_header(cdev, NVM_META, cur_engine,
7983 feature_size, omit_engine);
7984 offset += (feature_size + REGDUMP_HEADER_SIZE);
7985 } else if (rc != -ENOENT) {
7987 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7988 QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
7992 rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
7993 REGDUMP_HEADER_SIZE, &feature_size,
7994 QED_NVM_IMAGE_MDUMP);
7996 *(u32 *)((u8 *)buffer + offset) =
7997 qed_calc_regdump_header(cdev, MDUMP, cur_engine,
7998 feature_size, omit_engine);
7999 offset += (feature_size + REGDUMP_HEADER_SIZE);
8000 } else if (rc != -ENOENT) {
8002 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8003 QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
8006 cdev->dbg_bin_dump = false;
8007 mutex_unlock(&qed_dbg_lock);
8012 int qed_dbg_all_data_size(struct qed_dev *cdev)
8014 struct qed_hwfn *p_hwfn =
8015 &cdev->hwfns[cdev->engine_for_debug];
8016 u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
8017 u8 cur_engine, org_engine;
8019 cdev->disable_ilt_dump = false;
8020 org_engine = qed_get_debug_engine(cdev);
8021 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8022 /* Engine specific */
8023 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8024 "calculating idle_chk and grcdump register length for current engine\n");
8025 qed_set_debug_engine(cdev, cur_engine);
8026 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8027 REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8028 REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8029 REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8030 REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8031 REGDUMP_HEADER_SIZE +
8032 qed_dbg_protection_override_size(cdev) +
8033 REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8035 ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
8036 if (ilt_len < ILT_DUMP_MAX_SIZE) {
8037 total_ilt_len += ilt_len;
8038 regs_len += ilt_len;
8042 qed_set_debug_engine(cdev, org_engine);
8045 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
8046 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8048 regs_len += REGDUMP_HEADER_SIZE + image_len;
8049 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8051 regs_len += REGDUMP_HEADER_SIZE + image_len;
8052 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8054 regs_len += REGDUMP_HEADER_SIZE + image_len;
8055 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
8057 regs_len += REGDUMP_HEADER_SIZE + image_len;
8059 if (regs_len > REGDUMP_MAX_SIZE) {
8060 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8061 "Dump exceeds max size 0x%x, disable ILT dump\n",
8063 cdev->disable_ilt_dump = true;
8064 regs_len -= total_ilt_len;
8070 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8071 enum qed_dbg_features feature, u32 *num_dumped_bytes)
8073 struct qed_hwfn *p_hwfn =
8074 &cdev->hwfns[cdev->engine_for_debug];
8075 struct qed_dbg_feature *qed_feature =
8076 &cdev->dbg_features[feature];
8077 enum dbg_status dbg_rc;
8078 struct qed_ptt *p_ptt;
8082 p_ptt = qed_ptt_acquire(p_hwfn);
8087 dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8088 if (dbg_rc != DBG_STATUS_OK) {
8089 DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8090 qed_dbg_get_status_str(dbg_rc));
8091 *num_dumped_bytes = 0;
8096 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8097 "copying debugfs feature to external buffer\n");
8098 memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8099 *num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
8103 qed_ptt_release(p_hwfn, p_ptt);
8107 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8109 struct qed_hwfn *p_hwfn =
8110 &cdev->hwfns[cdev->engine_for_debug];
8111 struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8112 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8113 u32 buf_size_dwords;
8119 rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8121 if (rc != DBG_STATUS_OK)
8122 buf_size_dwords = 0;
8124 /* Feature will not be dumped if it exceeds maximum size */
8125 if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
8126 buf_size_dwords = 0;
8128 qed_ptt_release(p_hwfn, p_ptt);
8129 qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8130 return qed_feature->buf_size;
8133 u8 qed_get_debug_engine(struct qed_dev *cdev)
8135 return cdev->engine_for_debug;
8138 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8140 DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8142 cdev->engine_for_debug = engine_number;
8145 void qed_dbg_pf_init(struct qed_dev *cdev)
8147 const u8 *dbg_values = NULL;
8150 /* Debug values are after init values.
8151 * The offset is the first dword of the file.
8153 dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8155 for_each_hwfn(cdev, i) {
8156 qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8157 qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8160 /* Set the hwfn to be 0 as default */
8161 cdev->engine_for_debug = 0;
8164 void qed_dbg_pf_exit(struct qed_dev *cdev)
8166 struct qed_dbg_feature *feature = NULL;
8167 enum qed_dbg_features feature_idx;
8169 /* debug features' buffers may be allocated if debug feature was used
8170 * but dump wasn't called
8172 for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8173 feature = &cdev->dbg_features[feature_idx];
8174 if (feature->dump_buf) {
8175 vfree(feature->dump_buf);
8176 feature->dump_buf = NULL;