1 // SPDX-License-Identifier: GPL-2.0-only
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015 QLogic Corporation
6 #include <linux/module.h>
7 #include <linux/vmalloc.h>
8 #include <linux/crc32.h>
14 #include "qed_reg_addr.h"
16 /* Memory groups enum */
28 MEM_GROUP_CONN_CFC_MEM,
29 MEM_GROUP_TASK_CFC_MEM,
50 /* Memory groups names */
51 static const char * const s_mem_group_names[] = {
83 /* Idle check conditions */
85 static u32 cond5(const u32 *r, const u32 *imm)
87 return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
90 static u32 cond7(const u32 *r, const u32 *imm)
92 return ((r[0] >> imm[0]) & imm[1]) != imm[2];
95 static u32 cond6(const u32 *r, const u32 *imm)
97 return (r[0] & imm[0]) != imm[1];
100 static u32 cond9(const u32 *r, const u32 *imm)
102 return ((r[0] & imm[0]) >> imm[1]) !=
103 (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
106 static u32 cond10(const u32 *r, const u32 *imm)
108 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
111 static u32 cond4(const u32 *r, const u32 *imm)
113 return (r[0] & ~imm[0]) != imm[1];
116 static u32 cond0(const u32 *r, const u32 *imm)
118 return (r[0] & ~r[1]) != imm[0];
121 static u32 cond1(const u32 *r, const u32 *imm)
123 return r[0] != imm[0];
126 static u32 cond11(const u32 *r, const u32 *imm)
128 return r[0] != r[1] && r[2] == imm[0];
131 static u32 cond12(const u32 *r, const u32 *imm)
133 return r[0] != r[1] && r[2] > imm[0];
136 static u32 cond3(const u32 *r, const u32 *imm)
141 static u32 cond13(const u32 *r, const u32 *imm)
143 return r[0] & imm[0];
146 static u32 cond8(const u32 *r, const u32 *imm)
148 return r[0] < (r[1] - imm[0]);
151 static u32 cond2(const u32 *r, const u32 *imm)
153 return r[0] > imm[0];
156 /* Array of Idle Check conditions */
157 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
174 /******************************* Data Types **********************************/
184 /* Chip constant definitions */
190 /* Platform constant definitions */
191 struct platform_defs {
198 /* Storm constant definitions.
199 * Addresses are in bytes, sizes are in quad-regs.
203 enum block_id block_id;
204 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
206 u32 sem_fast_mem_addr;
207 u32 sem_frame_mode_addr;
208 u32 sem_slow_enable_addr;
209 u32 sem_slow_mode_addr;
210 u32 sem_slow_mode1_conf_addr;
211 u32 sem_sync_dbg_empty_addr;
212 u32 sem_slow_dbg_empty_addr;
214 u32 cm_conn_ag_ctx_lid_size;
215 u32 cm_conn_ag_ctx_rd_addr;
216 u32 cm_conn_st_ctx_lid_size;
217 u32 cm_conn_st_ctx_rd_addr;
218 u32 cm_task_ag_ctx_lid_size;
219 u32 cm_task_ag_ctx_rd_addr;
220 u32 cm_task_st_ctx_lid_size;
221 u32 cm_task_st_ctx_rd_addr;
224 /* Block constant definitions */
227 bool exists[MAX_CHIP_IDS];
228 bool associated_to_storm;
230 /* Valid only if associated_to_storm is true */
232 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
236 u32 dbg_force_valid_addr;
237 u32 dbg_force_frame_addr;
240 /* If true, block is taken out of reset before dump */
242 enum dbg_reset_regs reset_reg;
244 /* Bit offset in reset register */
248 /* Reset register definitions */
249 struct reset_reg_defs {
251 bool exists[MAX_CHIP_IDS];
252 u32 unreset_val[MAX_CHIP_IDS];
255 struct grc_param_defs {
256 u32 default_val[MAX_CHIP_IDS];
261 u32 exclude_all_preset_val;
262 u32 crash_preset_val;
265 /* Address is in 128b units. Width is in bits. */
266 struct rss_mem_defs {
267 const char *mem_name;
268 const char *type_name;
271 u32 num_entries[MAX_CHIP_IDS];
274 struct vfc_ram_defs {
275 const char *mem_name;
276 const char *type_name;
281 struct big_ram_defs {
282 const char *instance_name;
283 enum mem_groups mem_group_id;
284 enum mem_groups ram_mem_group_id;
285 enum dbg_grc_params grc_param;
288 u32 is_256b_reg_addr;
289 u32 is_256b_bit_offset[MAX_CHIP_IDS];
290 u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
294 const char *phy_name;
296 /* PHY base GRC address */
299 /* Relative address of indirect TBUS address register (bits 0..7) */
300 u32 tbus_addr_lo_addr;
302 /* Relative address of indirect TBUS address register (bits 8..10) */
303 u32 tbus_addr_hi_addr;
305 /* Relative address of indirect TBUS data register (bits 0..7) */
306 u32 tbus_data_lo_addr;
308 /* Relative address of indirect TBUS data register (bits 8..11) */
309 u32 tbus_data_hi_addr;
312 /* Split type definitions */
313 struct split_type_defs {
317 /******************************** Constants **********************************/
319 #define MAX_LCIDS 320
320 #define MAX_LTIDS 320
322 #define NUM_IOR_SETS 2
323 #define IORS_PER_SET 176
324 #define IOR_SET_OFFSET(set_id) ((set_id) * 256)
326 #define BYTES_IN_DWORD sizeof(u32)
328 /* In the macros below, size and offset are specified in bits */
329 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
330 #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
331 #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
332 #define FIELD_DWORD_OFFSET(type, field) \
333 (int)(FIELD_BIT_OFFSET(type, field) / 32)
334 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
335 #define FIELD_BIT_MASK(type, field) \
336 (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
337 FIELD_DWORD_SHIFT(type, field))
339 #define SET_VAR_FIELD(var, type, field, val) \
341 var[FIELD_DWORD_OFFSET(type, field)] &= \
342 (~FIELD_BIT_MASK(type, field)); \
343 var[FIELD_DWORD_OFFSET(type, field)] |= \
344 (val) << FIELD_DWORD_SHIFT(type, field); \
347 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
349 for (i = 0; i < (arr_size); i++) \
350 qed_wr(dev, ptt, addr, (arr)[i]); \
353 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
355 for (i = 0; i < (arr_size); i++) \
356 (arr)[i] = qed_rd(dev, ptt, addr); \
359 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
360 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
362 /* Extra lines include a signature line + optional latency events line */
363 #define NUM_EXTRA_DBG_LINES(block_desc) \
364 (1 + ((block_desc)->has_latency_events ? 1 : 0))
365 #define NUM_DBG_LINES(block_desc) \
366 ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
368 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
369 #define RAM_LINES_TO_BYTES(lines) \
370 DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
372 #define REG_DUMP_LEN_SHIFT 24
373 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
374 BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
376 #define IDLE_CHK_RULE_SIZE_DWORDS \
377 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
379 #define IDLE_CHK_RESULT_HDR_DWORDS \
380 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
382 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
383 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
385 #define PAGE_MEM_DESC_SIZE_DWORDS \
386 BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
388 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
390 /* The sizes and offsets below are specified in bits */
391 #define VFC_CAM_CMD_STRUCT_SIZE 64
392 #define VFC_CAM_CMD_ROW_OFFSET 48
393 #define VFC_CAM_CMD_ROW_SIZE 9
394 #define VFC_CAM_ADDR_STRUCT_SIZE 16
395 #define VFC_CAM_ADDR_OP_OFFSET 0
396 #define VFC_CAM_ADDR_OP_SIZE 4
397 #define VFC_CAM_RESP_STRUCT_SIZE 256
398 #define VFC_RAM_ADDR_STRUCT_SIZE 16
399 #define VFC_RAM_ADDR_OP_OFFSET 0
400 #define VFC_RAM_ADDR_OP_SIZE 2
401 #define VFC_RAM_ADDR_ROW_OFFSET 2
402 #define VFC_RAM_ADDR_ROW_SIZE 10
403 #define VFC_RAM_RESP_STRUCT_SIZE 256
405 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
406 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
407 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
408 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
409 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
410 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
412 #define NUM_VFC_RAM_TYPES 4
414 #define VFC_CAM_NUM_ROWS 512
416 #define VFC_OPCODE_CAM_RD 14
417 #define VFC_OPCODE_RAM_RD 0
419 #define NUM_RSS_MEM_TYPES 5
421 #define NUM_BIG_RAM_TYPES 3
422 #define BIG_RAM_NAME_LEN 3
424 #define NUM_PHY_TBUS_ADDRESSES 2048
425 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
427 #define RESET_REG_UNRESET_OFFSET 4
429 #define STALL_DELAY_MS 500
431 #define STATIC_DEBUG_LINE_DWORDS 9
433 #define NUM_COMMON_GLOBAL_PARAMS 8
435 #define FW_IMG_MAIN 1
437 #define REG_FIFO_ELEMENT_DWORDS 2
438 #define REG_FIFO_DEPTH_ELEMENTS 32
439 #define REG_FIFO_DEPTH_DWORDS \
440 (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
442 #define IGU_FIFO_ELEMENT_DWORDS 4
443 #define IGU_FIFO_DEPTH_ELEMENTS 64
444 #define IGU_FIFO_DEPTH_DWORDS \
445 (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
447 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
448 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
449 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
450 (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
451 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
453 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
455 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
457 #define EMPTY_FW_VERSION_STR "???_???_???_???"
458 #define EMPTY_FW_IMAGE_STR "???????????????"
460 /***************************** Constant Arrays *******************************/
468 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
470 /* Chip constant definitions array */
471 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
472 {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
473 {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
476 /* Storm constant definitions array */
477 static struct storm_defs s_storm_defs[] = {
480 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
481 DBG_BUS_CLIENT_RBCT}, true,
482 TSEM_REG_FAST_MEMORY,
483 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
484 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
485 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
486 TCM_REG_CTX_RBC_ACCS,
487 4, TCM_REG_AGG_CON_CTX,
488 16, TCM_REG_SM_CON_CTX,
489 2, TCM_REG_AGG_TASK_CTX,
490 4, TCM_REG_SM_TASK_CTX},
494 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
495 DBG_BUS_CLIENT_RBCM}, false,
496 MSEM_REG_FAST_MEMORY,
497 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
498 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
499 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
500 MCM_REG_CTX_RBC_ACCS,
501 1, MCM_REG_AGG_CON_CTX,
502 10, MCM_REG_SM_CON_CTX,
503 2, MCM_REG_AGG_TASK_CTX,
504 7, MCM_REG_SM_TASK_CTX},
508 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
509 DBG_BUS_CLIENT_RBCU}, false,
510 USEM_REG_FAST_MEMORY,
511 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
512 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
513 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
514 UCM_REG_CTX_RBC_ACCS,
515 2, UCM_REG_AGG_CON_CTX,
516 13, UCM_REG_SM_CON_CTX,
517 3, UCM_REG_AGG_TASK_CTX,
518 3, UCM_REG_SM_TASK_CTX},
522 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
523 DBG_BUS_CLIENT_RBCX}, false,
524 XSEM_REG_FAST_MEMORY,
525 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
526 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
527 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
528 XCM_REG_CTX_RBC_ACCS,
529 9, XCM_REG_AGG_CON_CTX,
530 15, XCM_REG_SM_CON_CTX,
536 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
537 DBG_BUS_CLIENT_RBCY}, false,
538 YSEM_REG_FAST_MEMORY,
539 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
540 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
541 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
542 YCM_REG_CTX_RBC_ACCS,
543 2, YCM_REG_AGG_CON_CTX,
544 3, YCM_REG_SM_CON_CTX,
545 2, YCM_REG_AGG_TASK_CTX,
546 12, YCM_REG_SM_TASK_CTX},
550 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
551 DBG_BUS_CLIENT_RBCS}, true,
552 PSEM_REG_FAST_MEMORY,
553 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
554 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
555 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
556 PCM_REG_CTX_RBC_ACCS,
558 10, PCM_REG_SM_CON_CTX,
563 /* Block definitions array */
565 static struct block_defs block_grc_defs = {
567 {true, true, true}, false, 0,
568 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
569 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
570 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
571 GRC_REG_DBG_FORCE_FRAME,
572 true, false, DBG_RESET_REG_MISC_PL_UA, 1
575 static struct block_defs block_miscs_defs = {
576 "miscs", {true, true, true}, false, 0,
577 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
579 false, false, MAX_DBG_RESET_REGS, 0
582 static struct block_defs block_misc_defs = {
583 "misc", {true, true, true}, false, 0,
584 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
586 false, false, MAX_DBG_RESET_REGS, 0
589 static struct block_defs block_dbu_defs = {
590 "dbu", {true, true, true}, false, 0,
591 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
593 false, false, MAX_DBG_RESET_REGS, 0
596 static struct block_defs block_pglue_b_defs = {
598 {true, true, true}, false, 0,
599 {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
600 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
601 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
602 PGLUE_B_REG_DBG_FORCE_FRAME,
603 true, false, DBG_RESET_REG_MISCS_PL_HV, 1
606 static struct block_defs block_cnig_defs = {
608 {true, true, true}, false, 0,
609 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
610 DBG_BUS_CLIENT_RBCW},
611 CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
612 CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
613 CNIG_REG_DBG_FORCE_FRAME_K2_E5,
614 true, false, DBG_RESET_REG_MISCS_PL_HV, 0
617 static struct block_defs block_cpmu_defs = {
618 "cpmu", {true, true, true}, false, 0,
619 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
621 true, false, DBG_RESET_REG_MISCS_PL_HV, 8
624 static struct block_defs block_ncsi_defs = {
626 {true, true, true}, false, 0,
627 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
628 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
629 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
630 NCSI_REG_DBG_FORCE_FRAME,
631 true, false, DBG_RESET_REG_MISCS_PL_HV, 5
634 static struct block_defs block_opte_defs = {
635 "opte", {true, true, false}, false, 0,
636 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
638 true, false, DBG_RESET_REG_MISCS_PL_HV, 4
641 static struct block_defs block_bmb_defs = {
643 {true, true, true}, false, 0,
644 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
645 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
646 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
647 BMB_REG_DBG_FORCE_FRAME,
648 true, false, DBG_RESET_REG_MISCS_PL_UA, 7
651 static struct block_defs block_pcie_defs = {
653 {true, true, true}, false, 0,
654 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
655 DBG_BUS_CLIENT_RBCH},
656 PCIE_REG_DBG_COMMON_SELECT_K2_E5,
657 PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
658 PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
659 PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
660 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
661 false, false, MAX_DBG_RESET_REGS, 0
664 static struct block_defs block_mcp_defs = {
665 "mcp", {true, true, true}, false, 0,
666 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
668 false, false, MAX_DBG_RESET_REGS, 0
671 static struct block_defs block_mcp2_defs = {
673 {true, true, true}, false, 0,
674 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
675 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
676 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
677 MCP2_REG_DBG_FORCE_FRAME,
678 false, false, MAX_DBG_RESET_REGS, 0
681 static struct block_defs block_pswhst_defs = {
683 {true, true, true}, false, 0,
684 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
685 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
686 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
687 PSWHST_REG_DBG_FORCE_FRAME,
688 true, false, DBG_RESET_REG_MISC_PL_HV, 0
691 static struct block_defs block_pswhst2_defs = {
693 {true, true, true}, false, 0,
694 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
695 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
696 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
697 PSWHST2_REG_DBG_FORCE_FRAME,
698 true, false, DBG_RESET_REG_MISC_PL_HV, 0
701 static struct block_defs block_pswrd_defs = {
703 {true, true, true}, false, 0,
704 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
705 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
706 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
707 PSWRD_REG_DBG_FORCE_FRAME,
708 true, false, DBG_RESET_REG_MISC_PL_HV, 2
711 static struct block_defs block_pswrd2_defs = {
713 {true, true, true}, false, 0,
714 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
715 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
716 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
717 PSWRD2_REG_DBG_FORCE_FRAME,
718 true, false, DBG_RESET_REG_MISC_PL_HV, 2
721 static struct block_defs block_pswwr_defs = {
723 {true, true, true}, false, 0,
724 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
725 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
726 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
727 PSWWR_REG_DBG_FORCE_FRAME,
728 true, false, DBG_RESET_REG_MISC_PL_HV, 3
731 static struct block_defs block_pswwr2_defs = {
732 "pswwr2", {true, true, true}, false, 0,
733 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
735 true, false, DBG_RESET_REG_MISC_PL_HV, 3
738 static struct block_defs block_pswrq_defs = {
740 {true, true, true}, false, 0,
741 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
742 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
743 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
744 PSWRQ_REG_DBG_FORCE_FRAME,
745 true, false, DBG_RESET_REG_MISC_PL_HV, 1
748 static struct block_defs block_pswrq2_defs = {
750 {true, true, true}, false, 0,
751 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
752 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
753 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
754 PSWRQ2_REG_DBG_FORCE_FRAME,
755 true, false, DBG_RESET_REG_MISC_PL_HV, 1
758 static struct block_defs block_pglcs_defs = {
760 {true, true, true}, false, 0,
761 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
762 DBG_BUS_CLIENT_RBCH},
763 PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
764 PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
765 PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
766 true, false, DBG_RESET_REG_MISCS_PL_HV, 2
769 static struct block_defs block_ptu_defs = {
771 {true, true, true}, false, 0,
772 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
773 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
774 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
775 PTU_REG_DBG_FORCE_FRAME,
776 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
779 static struct block_defs block_dmae_defs = {
781 {true, true, true}, false, 0,
782 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
783 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
784 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
785 DMAE_REG_DBG_FORCE_FRAME,
786 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
789 static struct block_defs block_tcm_defs = {
791 {true, true, true}, true, DBG_TSTORM_ID,
792 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
793 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
794 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
795 TCM_REG_DBG_FORCE_FRAME,
796 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
799 static struct block_defs block_mcm_defs = {
801 {true, true, true}, true, DBG_MSTORM_ID,
802 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
803 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
804 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
805 MCM_REG_DBG_FORCE_FRAME,
806 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
809 static struct block_defs block_ucm_defs = {
811 {true, true, true}, true, DBG_USTORM_ID,
812 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
813 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
814 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
815 UCM_REG_DBG_FORCE_FRAME,
816 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
819 static struct block_defs block_xcm_defs = {
821 {true, true, true}, true, DBG_XSTORM_ID,
822 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
823 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
824 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
825 XCM_REG_DBG_FORCE_FRAME,
826 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
829 static struct block_defs block_ycm_defs = {
831 {true, true, true}, true, DBG_YSTORM_ID,
832 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
833 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
834 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
835 YCM_REG_DBG_FORCE_FRAME,
836 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
839 static struct block_defs block_pcm_defs = {
841 {true, true, true}, true, DBG_PSTORM_ID,
842 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
843 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
844 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
845 PCM_REG_DBG_FORCE_FRAME,
846 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
849 static struct block_defs block_qm_defs = {
851 {true, true, true}, false, 0,
852 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
853 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
854 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
855 QM_REG_DBG_FORCE_FRAME,
856 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
859 static struct block_defs block_tm_defs = {
861 {true, true, true}, false, 0,
862 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
863 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
864 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
865 TM_REG_DBG_FORCE_FRAME,
866 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
869 static struct block_defs block_dorq_defs = {
871 {true, true, true}, false, 0,
872 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
873 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
874 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
875 DORQ_REG_DBG_FORCE_FRAME,
876 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
879 static struct block_defs block_brb_defs = {
881 {true, true, true}, false, 0,
882 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
883 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
884 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
885 BRB_REG_DBG_FORCE_FRAME,
886 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
889 static struct block_defs block_src_defs = {
891 {true, true, true}, false, 0,
892 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
893 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
894 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
895 SRC_REG_DBG_FORCE_FRAME,
896 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
899 static struct block_defs block_prs_defs = {
901 {true, true, true}, false, 0,
902 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
903 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
904 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
905 PRS_REG_DBG_FORCE_FRAME,
906 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
909 static struct block_defs block_tsdm_defs = {
911 {true, true, true}, true, DBG_TSTORM_ID,
912 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
913 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
914 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
915 TSDM_REG_DBG_FORCE_FRAME,
916 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
919 static struct block_defs block_msdm_defs = {
921 {true, true, true}, true, DBG_MSTORM_ID,
922 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
923 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
924 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
925 MSDM_REG_DBG_FORCE_FRAME,
926 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
929 static struct block_defs block_usdm_defs = {
931 {true, true, true}, true, DBG_USTORM_ID,
932 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
933 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
934 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
935 USDM_REG_DBG_FORCE_FRAME,
936 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
939 static struct block_defs block_xsdm_defs = {
941 {true, true, true}, true, DBG_XSTORM_ID,
942 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
943 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
944 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
945 XSDM_REG_DBG_FORCE_FRAME,
946 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
949 static struct block_defs block_ysdm_defs = {
951 {true, true, true}, true, DBG_YSTORM_ID,
952 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
953 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
954 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
955 YSDM_REG_DBG_FORCE_FRAME,
956 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
959 static struct block_defs block_psdm_defs = {
961 {true, true, true}, true, DBG_PSTORM_ID,
962 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
963 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
964 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
965 PSDM_REG_DBG_FORCE_FRAME,
966 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
969 static struct block_defs block_tsem_defs = {
971 {true, true, true}, true, DBG_TSTORM_ID,
972 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
973 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
974 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
975 TSEM_REG_DBG_FORCE_FRAME,
976 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
979 static struct block_defs block_msem_defs = {
981 {true, true, true}, true, DBG_MSTORM_ID,
982 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
983 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
984 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
985 MSEM_REG_DBG_FORCE_FRAME,
986 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
989 static struct block_defs block_usem_defs = {
991 {true, true, true}, true, DBG_USTORM_ID,
992 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
993 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
994 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
995 USEM_REG_DBG_FORCE_FRAME,
996 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
999 static struct block_defs block_xsem_defs = {
1001 {true, true, true}, true, DBG_XSTORM_ID,
1002 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1003 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1004 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1005 XSEM_REG_DBG_FORCE_FRAME,
1006 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1009 static struct block_defs block_ysem_defs = {
1011 {true, true, true}, true, DBG_YSTORM_ID,
1012 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
1013 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1014 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1015 YSEM_REG_DBG_FORCE_FRAME,
1016 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1019 static struct block_defs block_psem_defs = {
1021 {true, true, true}, true, DBG_PSTORM_ID,
1022 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1023 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1024 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1025 PSEM_REG_DBG_FORCE_FRAME,
1026 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1029 static struct block_defs block_rss_defs = {
1031 {true, true, true}, false, 0,
1032 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1033 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1034 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1035 RSS_REG_DBG_FORCE_FRAME,
1036 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1039 static struct block_defs block_tmld_defs = {
1041 {true, true, true}, false, 0,
1042 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1043 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1044 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1045 TMLD_REG_DBG_FORCE_FRAME,
1046 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1049 static struct block_defs block_muld_defs = {
1051 {true, true, true}, false, 0,
1052 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1053 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1054 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1055 MULD_REG_DBG_FORCE_FRAME,
1056 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1059 static struct block_defs block_yuld_defs = {
1061 {true, true, false}, false, 0,
1062 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
1063 MAX_DBG_BUS_CLIENTS},
1064 YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1065 YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1066 YULD_REG_DBG_FORCE_FRAME_BB_K2,
1067 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1071 static struct block_defs block_xyld_defs = {
1073 {true, true, true}, false, 0,
1074 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1075 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1076 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1077 XYLD_REG_DBG_FORCE_FRAME,
1078 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1081 static struct block_defs block_ptld_defs = {
1083 {false, false, true}, false, 0,
1084 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
1085 PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1086 PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1087 PTLD_REG_DBG_FORCE_FRAME_E5,
1088 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1092 static struct block_defs block_ypld_defs = {
1094 {false, false, true}, false, 0,
1095 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
1096 YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1097 YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1098 YPLD_REG_DBG_FORCE_FRAME_E5,
1099 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1103 static struct block_defs block_prm_defs = {
1105 {true, true, true}, false, 0,
1106 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1107 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1108 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1109 PRM_REG_DBG_FORCE_FRAME,
1110 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1113 static struct block_defs block_pbf_pb1_defs = {
1115 {true, true, true}, false, 0,
1116 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1117 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1118 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1119 PBF_PB1_REG_DBG_FORCE_FRAME,
1120 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1124 static struct block_defs block_pbf_pb2_defs = {
1126 {true, true, true}, false, 0,
1127 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1128 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1129 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1130 PBF_PB2_REG_DBG_FORCE_FRAME,
1131 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1135 static struct block_defs block_rpb_defs = {
1137 {true, true, true}, false, 0,
1138 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1139 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1140 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1141 RPB_REG_DBG_FORCE_FRAME,
1142 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1145 static struct block_defs block_btb_defs = {
1147 {true, true, true}, false, 0,
1148 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1149 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1150 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1151 BTB_REG_DBG_FORCE_FRAME,
1152 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1155 static struct block_defs block_pbf_defs = {
1157 {true, true, true}, false, 0,
1158 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1159 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1160 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1161 PBF_REG_DBG_FORCE_FRAME,
1162 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1165 static struct block_defs block_rdif_defs = {
1167 {true, true, true}, false, 0,
1168 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1169 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1170 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1171 RDIF_REG_DBG_FORCE_FRAME,
1172 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1175 static struct block_defs block_tdif_defs = {
1177 {true, true, true}, false, 0,
1178 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1179 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1180 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1181 TDIF_REG_DBG_FORCE_FRAME,
1182 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1185 static struct block_defs block_cdu_defs = {
1187 {true, true, true}, false, 0,
1188 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1189 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1190 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1191 CDU_REG_DBG_FORCE_FRAME,
1192 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1195 static struct block_defs block_ccfc_defs = {
1197 {true, true, true}, false, 0,
1198 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1199 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1200 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1201 CCFC_REG_DBG_FORCE_FRAME,
1202 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1205 static struct block_defs block_tcfc_defs = {
1207 {true, true, true}, false, 0,
1208 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1209 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1210 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1211 TCFC_REG_DBG_FORCE_FRAME,
1212 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1215 static struct block_defs block_igu_defs = {
1217 {true, true, true}, false, 0,
1218 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1219 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1220 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1221 IGU_REG_DBG_FORCE_FRAME,
1222 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1225 static struct block_defs block_cau_defs = {
1227 {true, true, true}, false, 0,
1228 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1229 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1230 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1231 CAU_REG_DBG_FORCE_FRAME,
1232 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1235 static struct block_defs block_rgfs_defs = {
1236 "rgfs", {false, false, true}, false, 0,
1237 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1239 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
1242 static struct block_defs block_rgsrc_defs = {
1244 {false, false, true}, false, 0,
1245 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1246 RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1247 RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1248 RGSRC_REG_DBG_FORCE_FRAME_E5,
1249 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1253 static struct block_defs block_tgfs_defs = {
1254 "tgfs", {false, false, true}, false, 0,
1255 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1257 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
1260 static struct block_defs block_tgsrc_defs = {
1262 {false, false, true}, false, 0,
1263 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
1264 TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1265 TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1266 TGSRC_REG_DBG_FORCE_FRAME_E5,
1267 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1271 static struct block_defs block_umac_defs = {
1273 {true, true, true}, false, 0,
1274 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
1275 DBG_BUS_CLIENT_RBCZ},
1276 UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1277 UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1278 UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1279 true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1282 static struct block_defs block_xmac_defs = {
1283 "xmac", {true, false, false}, false, 0,
1284 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1286 false, false, MAX_DBG_RESET_REGS, 0
1289 static struct block_defs block_dbg_defs = {
1290 "dbg", {true, true, true}, false, 0,
1291 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1293 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1296 static struct block_defs block_nig_defs = {
1298 {true, true, true}, false, 0,
1299 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1300 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1301 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1302 NIG_REG_DBG_FORCE_FRAME,
1303 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1306 static struct block_defs block_wol_defs = {
1308 {false, true, true}, false, 0,
1309 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1310 WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1311 WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1312 WOL_REG_DBG_FORCE_FRAME_K2_E5,
1313 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1316 static struct block_defs block_bmbn_defs = {
1318 {false, true, true}, false, 0,
1319 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
1320 DBG_BUS_CLIENT_RBCB},
1321 BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1322 BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1323 BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1324 false, false, MAX_DBG_RESET_REGS, 0
1327 static struct block_defs block_ipc_defs = {
1328 "ipc", {true, true, true}, false, 0,
1329 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1331 true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1334 static struct block_defs block_nwm_defs = {
1336 {false, true, true}, false, 0,
1337 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1338 NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1339 NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1340 NWM_REG_DBG_FORCE_FRAME_K2_E5,
1341 true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1344 static struct block_defs block_nws_defs = {
1346 {false, true, true}, false, 0,
1347 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1348 NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1349 NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1350 NWS_REG_DBG_FORCE_FRAME_K2_E5,
1351 true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1354 static struct block_defs block_ms_defs = {
1356 {false, true, true}, false, 0,
1357 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1358 MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1359 MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1360 MS_REG_DBG_FORCE_FRAME_K2_E5,
1361 true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1364 static struct block_defs block_phy_pcie_defs = {
1366 {false, true, true}, false, 0,
1367 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
1368 DBG_BUS_CLIENT_RBCH},
1369 PCIE_REG_DBG_COMMON_SELECT_K2_E5,
1370 PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1371 PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
1372 PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1373 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1374 false, false, MAX_DBG_RESET_REGS, 0
1377 static struct block_defs block_led_defs = {
1378 "led", {false, true, true}, false, 0,
1379 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1381 true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1384 static struct block_defs block_avs_wrap_defs = {
1385 "avs_wrap", {false, true, false}, false, 0,
1386 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1388 true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1391 static struct block_defs block_pxpreqbus_defs = {
1392 "pxpreqbus", {false, false, false}, false, 0,
1393 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1395 false, false, MAX_DBG_RESET_REGS, 0
1398 static struct block_defs block_misc_aeu_defs = {
1399 "misc_aeu", {true, true, true}, false, 0,
1400 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1402 false, false, MAX_DBG_RESET_REGS, 0
1405 static struct block_defs block_bar0_map_defs = {
1406 "bar0_map", {true, true, true}, false, 0,
1407 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1409 false, false, MAX_DBG_RESET_REGS, 0
1412 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1417 &block_pglue_b_defs,
1427 &block_pswhst2_defs,
1469 &block_pbf_pb1_defs,
1470 &block_pbf_pb2_defs,
1495 &block_phy_pcie_defs,
1497 &block_avs_wrap_defs,
1498 &block_pxpreqbus_defs,
1499 &block_misc_aeu_defs,
1500 &block_bar0_map_defs,
1503 static struct platform_defs s_platform_defs[] = {
1504 {"asic", 1, 256, 32768},
1505 {"reserved", 0, 0, 0},
1506 {"reserved2", 0, 0, 0},
1507 {"reserved3", 0, 0, 0}
1510 static struct grc_param_defs s_grc_param_defs[] = {
1511 /* DBG_GRC_PARAM_DUMP_TSTORM */
1512 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1514 /* DBG_GRC_PARAM_DUMP_MSTORM */
1515 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1517 /* DBG_GRC_PARAM_DUMP_USTORM */
1518 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1520 /* DBG_GRC_PARAM_DUMP_XSTORM */
1521 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1523 /* DBG_GRC_PARAM_DUMP_YSTORM */
1524 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1526 /* DBG_GRC_PARAM_DUMP_PSTORM */
1527 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1529 /* DBG_GRC_PARAM_DUMP_REGS */
1530 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1532 /* DBG_GRC_PARAM_DUMP_RAM */
1533 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1535 /* DBG_GRC_PARAM_DUMP_PBUF */
1536 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1538 /* DBG_GRC_PARAM_DUMP_IOR */
1539 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1541 /* DBG_GRC_PARAM_DUMP_VFC */
1542 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1544 /* DBG_GRC_PARAM_DUMP_CM_CTX */
1545 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1547 /* DBG_GRC_PARAM_DUMP_ILT */
1548 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1550 /* DBG_GRC_PARAM_DUMP_RSS */
1551 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1553 /* DBG_GRC_PARAM_DUMP_CAU */
1554 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1556 /* DBG_GRC_PARAM_DUMP_QM */
1557 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1559 /* DBG_GRC_PARAM_DUMP_MCP */
1560 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1562 /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
1563 {{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
1565 /* DBG_GRC_PARAM_DUMP_CFC */
1566 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1568 /* DBG_GRC_PARAM_DUMP_IGU */
1569 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1571 /* DBG_GRC_PARAM_DUMP_BRB */
1572 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1574 /* DBG_GRC_PARAM_DUMP_BTB */
1575 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1577 /* DBG_GRC_PARAM_DUMP_BMB */
1578 {{0, 0, 0}, 0, 1, false, false, 0, 0},
1580 /* DBG_GRC_PARAM_DUMP_NIG */
1581 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1583 /* DBG_GRC_PARAM_DUMP_MULD */
1584 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1586 /* DBG_GRC_PARAM_DUMP_PRS */
1587 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1589 /* DBG_GRC_PARAM_DUMP_DMAE */
1590 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1592 /* DBG_GRC_PARAM_DUMP_TM */
1593 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1595 /* DBG_GRC_PARAM_DUMP_SDM */
1596 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1598 /* DBG_GRC_PARAM_DUMP_DIF */
1599 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1601 /* DBG_GRC_PARAM_DUMP_STATIC */
1602 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1604 /* DBG_GRC_PARAM_UNSTALL */
1605 {{0, 0, 0}, 0, 1, false, false, 0, 0},
1607 /* DBG_GRC_PARAM_NUM_LCIDS */
1608 {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
1609 MAX_LCIDS, MAX_LCIDS},
1611 /* DBG_GRC_PARAM_NUM_LTIDS */
1612 {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
1613 MAX_LTIDS, MAX_LTIDS},
1615 /* DBG_GRC_PARAM_EXCLUDE_ALL */
1616 {{0, 0, 0}, 0, 1, true, false, 0, 0},
1618 /* DBG_GRC_PARAM_CRASH */
1619 {{0, 0, 0}, 0, 1, true, false, 0, 0},
1621 /* DBG_GRC_PARAM_PARITY_SAFE */
1622 {{0, 0, 0}, 0, 1, false, false, 1, 0},
1624 /* DBG_GRC_PARAM_DUMP_CM */
1625 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1627 /* DBG_GRC_PARAM_DUMP_PHY */
1628 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1630 /* DBG_GRC_PARAM_NO_MCP */
1631 {{0, 0, 0}, 0, 1, false, false, 0, 0},
1633 /* DBG_GRC_PARAM_NO_FW_VER */
1634 {{0, 0, 0}, 0, 1, false, false, 0, 0},
1636 /* DBG_GRC_PARAM_RESERVED3 */
1637 {{0, 0, 0}, 0, 1, false, false, 0, 0},
1639 /* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
1640 {{0, 1, 1}, 0, 1, false, false, 0, 0},
1642 /* DBG_GRC_PARAM_DUMP_ILT_CDUC */
1643 {{1, 1, 1}, 0, 1, false, false, 0, 0},
1645 /* DBG_GRC_PARAM_DUMP_ILT_CDUT */
1646 {{1, 1, 1}, 0, 1, false, false, 0, 0},
1648 /* DBG_GRC_PARAM_DUMP_CAU_EXT */
1649 {{0, 0, 0}, 0, 1, false, false, 0, 1}
1652 static struct rss_mem_defs s_rss_mem_defs[] = {
1653 { "rss_mem_cid", "rss_cid", 0, 32,
1656 { "rss_mem_key_msb", "rss_key", 1024, 256,
1659 { "rss_mem_key_lsb", "rss_key", 2048, 64,
1662 { "rss_mem_info", "rss_info", 3072, 16,
1665 { "rss_mem_ind", "rss_ind", 4096, 16,
1666 {16384, 26624, 32768} }
1669 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1670 {"vfc_ram_tt1", "vfc_ram", 0, 512},
1671 {"vfc_ram_mtt2", "vfc_ram", 512, 128},
1672 {"vfc_ram_stt2", "vfc_ram", 640, 32},
1673 {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1676 static struct big_ram_defs s_big_ram_defs[] = {
1677 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1678 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1679 MISC_REG_BLOCK_256B_EN, {0, 0, 0},
1680 {153600, 180224, 282624} },
1682 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1683 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1684 MISC_REG_BLOCK_256B_EN, {0, 1, 1},
1685 {92160, 117760, 168960} },
1687 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1688 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1689 MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
1690 {36864, 36864, 36864} }
1693 static struct reset_reg_defs s_reset_regs_defs[] = {
1694 /* DBG_RESET_REG_MISCS_PL_UA */
1695 { MISCS_REG_RESET_PL_UA,
1696 {true, true, true}, {0x0, 0x0, 0x0} },
1698 /* DBG_RESET_REG_MISCS_PL_HV */
1699 { MISCS_REG_RESET_PL_HV,
1700 {true, true, true}, {0x0, 0x400, 0x600} },
1702 /* DBG_RESET_REG_MISCS_PL_HV_2 */
1703 { MISCS_REG_RESET_PL_HV_2_K2_E5,
1704 {false, true, true}, {0x0, 0x0, 0x0} },
1706 /* DBG_RESET_REG_MISC_PL_UA */
1707 { MISC_REG_RESET_PL_UA,
1708 {true, true, true}, {0x0, 0x0, 0x0} },
1710 /* DBG_RESET_REG_MISC_PL_HV */
1711 { MISC_REG_RESET_PL_HV,
1712 {true, true, true}, {0x0, 0x0, 0x0} },
1714 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1715 { MISC_REG_RESET_PL_PDA_VMAIN_1,
1716 {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
1718 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1719 { MISC_REG_RESET_PL_PDA_VMAIN_2,
1720 {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
1722 /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1723 { MISC_REG_RESET_PL_PDA_VAUX,
1724 {true, true, true}, {0x2, 0x2, 0x2} },
1727 static struct phy_defs s_phy_defs[] = {
1728 {"nw_phy", NWS_REG_NWS_CMU_K2,
1729 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
1730 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
1731 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
1732 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
1733 {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
1734 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1735 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1736 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1737 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1738 {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
1739 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1740 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1741 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1742 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1743 {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
1744 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1745 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1746 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1747 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1750 static struct split_type_defs s_split_type_defs[] = {
1751 /* SPLIT_TYPE_NONE */
1754 /* SPLIT_TYPE_PORT */
1760 /* SPLIT_TYPE_PORT_PF */
1767 /**************************** Private Functions ******************************/
1769 /* Reads and returns a single dword from the specified unaligned buffer */
1770 static u32 qed_read_unaligned_dword(u8 *buf)
1774 memcpy((u8 *)&dword, buf, sizeof(dword));
1778 /* Sets the value of the specified GRC param */
1779 static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
1780 enum dbg_grc_params grc_param, u32 val)
1782 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1784 dev_data->grc.param_val[grc_param] = val;
1787 /* Returns the value of the specified GRC param */
1788 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1789 enum dbg_grc_params grc_param)
1791 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1793 return dev_data->grc.param_val[grc_param];
1796 /* Initializes the GRC parameters */
1797 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1799 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1801 if (!dev_data->grc.params_initialized) {
1802 qed_dbg_grc_set_params_default(p_hwfn);
1803 dev_data->grc.params_initialized = 1;
1807 /* Initializes debug data for the specified device */
1808 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1809 struct qed_ptt *p_ptt)
1811 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1812 u8 num_pfs = 0, max_pfs_per_port = 0;
1814 if (dev_data->initialized)
1815 return DBG_STATUS_OK;
1818 if (QED_IS_K2(p_hwfn->cdev)) {
1819 dev_data->chip_id = CHIP_K2;
1820 dev_data->mode_enable[MODE_K2] = 1;
1821 dev_data->num_vfs = MAX_NUM_VFS_K2;
1822 num_pfs = MAX_NUM_PFS_K2;
1823 max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
1824 } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1825 dev_data->chip_id = CHIP_BB;
1826 dev_data->mode_enable[MODE_BB] = 1;
1827 dev_data->num_vfs = MAX_NUM_VFS_BB;
1828 num_pfs = MAX_NUM_PFS_BB;
1829 max_pfs_per_port = MAX_NUM_PFS_BB;
1831 return DBG_STATUS_UNKNOWN_CHIP;
1835 dev_data->platform_id = PLATFORM_ASIC;
1836 dev_data->mode_enable[MODE_ASIC] = 1;
1839 switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
1841 dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
1844 dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
1847 dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
1852 if (dev_data->chip_id == CHIP_BB &&
1853 qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2)
1854 dev_data->mode_enable[MODE_100G] = 1;
1856 /* Set number of ports */
1857 if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
1858 dev_data->mode_enable[MODE_100G])
1859 dev_data->num_ports = 1;
1860 else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
1861 dev_data->num_ports = 2;
1862 else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
1863 dev_data->num_ports = 4;
1865 /* Set number of PFs per port */
1866 dev_data->num_pfs_per_port = min_t(u32,
1867 num_pfs / dev_data->num_ports,
1870 /* Initializes the GRC parameters */
1871 qed_dbg_grc_init_params(p_hwfn);
1873 dev_data->use_dmae = true;
1874 dev_data->initialized = 1;
1876 return DBG_STATUS_OK;
1879 static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1880 enum block_id block_id)
1882 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1884 return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1889 /* Reads the FW info structure for the specified Storm from the chip,
1890 * and writes it to the specified fw_info pointer.
1892 static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
1893 struct qed_ptt *p_ptt,
1894 u8 storm_id, struct fw_info *fw_info)
1896 struct storm_defs *storm = &s_storm_defs[storm_id];
1897 struct fw_info_location fw_info_location;
1900 memset(&fw_info_location, 0, sizeof(fw_info_location));
1901 memset(fw_info, 0, sizeof(*fw_info));
1903 /* Read first the address that points to fw_info location.
1904 * The address is located in the last line of the Storm RAM.
1906 addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1907 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
1908 sizeof(fw_info_location);
1909 dest = (u32 *)&fw_info_location;
1911 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1912 i++, addr += BYTES_IN_DWORD)
1913 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1915 /* Read FW version info from Storm RAM */
1916 if (fw_info_location.size > 0 && fw_info_location.size <=
1918 addr = fw_info_location.grc_addr;
1919 dest = (u32 *)fw_info;
1920 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1921 i++, addr += BYTES_IN_DWORD)
1922 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1926 /* Dumps the specified string to the specified buffer.
1927 * Returns the dumped size in bytes.
1929 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1932 strcpy(dump_buf, str);
1934 return (u32)strlen(str) + 1;
1937 /* Dumps zeros to align the specified buffer to dwords.
1938 * Returns the dumped size in bytes.
1940 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1942 u8 offset_in_dword, align_size;
1944 offset_in_dword = (u8)(byte_offset & 0x3);
1945 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1947 if (dump && align_size)
1948 memset(dump_buf, 0, align_size);
1953 /* Writes the specified string param to the specified buffer.
1954 * Returns the dumped size in dwords.
1956 static u32 qed_dump_str_param(u32 *dump_buf,
1958 const char *param_name, const char *param_val)
1960 char *char_buf = (char *)dump_buf;
1963 /* Dump param name */
1964 offset += qed_dump_str(char_buf + offset, dump, param_name);
1966 /* Indicate a string param value */
1968 *(char_buf + offset) = 1;
1971 /* Dump param value */
1972 offset += qed_dump_str(char_buf + offset, dump, param_val);
1974 /* Align buffer to next dword */
1975 offset += qed_dump_align(char_buf + offset, dump, offset);
1977 return BYTES_TO_DWORDS(offset);
1980 /* Writes the specified numeric param to the specified buffer.
1981 * Returns the dumped size in dwords.
1983 static u32 qed_dump_num_param(u32 *dump_buf,
1984 bool dump, const char *param_name, u32 param_val)
1986 char *char_buf = (char *)dump_buf;
1989 /* Dump param name */
1990 offset += qed_dump_str(char_buf + offset, dump, param_name);
1992 /* Indicate a numeric param value */
1994 *(char_buf + offset) = 0;
1997 /* Align buffer to next dword */
1998 offset += qed_dump_align(char_buf + offset, dump, offset);
2000 /* Dump param value (and change offset from bytes to dwords) */
2001 offset = BYTES_TO_DWORDS(offset);
2003 *(dump_buf + offset) = param_val;
2009 /* Reads the FW version and writes it as a param to the specified buffer.
2010 * Returns the dumped size in dwords.
2012 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
2013 struct qed_ptt *p_ptt,
2014 u32 *dump_buf, bool dump)
2016 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
2017 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
2018 struct fw_info fw_info = { {0}, {0} };
2021 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2022 /* Read FW info from chip */
2023 qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
2025 /* Create FW version/image strings */
2026 if (snprintf(fw_ver_str, sizeof(fw_ver_str),
2027 "%d_%d_%d_%d", fw_info.ver.num.major,
2028 fw_info.ver.num.minor, fw_info.ver.num.rev,
2029 fw_info.ver.num.eng) < 0)
2031 "Unexpected debug error: invalid FW version string\n");
2032 switch (fw_info.ver.image_id) {
2034 strcpy(fw_img_str, "main");
2037 strcpy(fw_img_str, "unknown");
2042 /* Dump FW version, image and timestamp */
2043 offset += qed_dump_str_param(dump_buf + offset,
2044 dump, "fw-version", fw_ver_str);
2045 offset += qed_dump_str_param(dump_buf + offset,
2046 dump, "fw-image", fw_img_str);
2047 offset += qed_dump_num_param(dump_buf + offset,
2049 "fw-timestamp", fw_info.ver.timestamp);
2054 /* Reads the MFW version and writes it as a param to the specified buffer.
2055 * Returns the dumped size in dwords.
2057 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
2058 struct qed_ptt *p_ptt,
2059 u32 *dump_buf, bool dump)
2061 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2064 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2065 u32 global_section_offsize, global_section_addr, mfw_ver;
2066 u32 public_data_addr, global_section_offsize_addr;
2068 /* Find MCP public data GRC address. Needs to be ORed with
2069 * MCP_REG_SCRATCH due to a HW bug.
2071 public_data_addr = qed_rd(p_hwfn,
2073 MISC_REG_SHARED_MEM_ADDR) |
2076 /* Find MCP public global section offset */
2077 global_section_offsize_addr = public_data_addr +
2078 offsetof(struct mcp_public_data,
2080 sizeof(offsize_t) * PUBLIC_GLOBAL;
2081 global_section_offsize = qed_rd(p_hwfn, p_ptt,
2082 global_section_offsize_addr);
2083 global_section_addr =
2085 (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2087 /* Read MFW version from MCP public global section */
2088 mfw_ver = qed_rd(p_hwfn, p_ptt,
2089 global_section_addr +
2090 offsetof(struct public_global, mfw_ver));
2092 /* Dump MFW version param */
2093 if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2094 (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2095 (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2097 "Unexpected debug error: invalid MFW version string\n");
2100 return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2103 /* Writes a section header to the specified buffer.
2104 * Returns the dumped size in dwords.
2106 static u32 qed_dump_section_hdr(u32 *dump_buf,
2107 bool dump, const char *name, u32 num_params)
2109 return qed_dump_num_param(dump_buf, dump, name, num_params);
2112 /* Writes the common global params to the specified buffer.
2113 * Returns the dumped size in dwords.
2115 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2116 struct qed_ptt *p_ptt,
2119 u8 num_specific_global_params)
2121 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2125 /* Dump global params section header */
2126 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2127 offset += qed_dump_section_hdr(dump_buf + offset,
2128 dump, "global_params", num_params);
2131 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2132 offset += qed_dump_mfw_ver_param(p_hwfn,
2133 p_ptt, dump_buf + offset, dump);
2134 offset += qed_dump_num_param(dump_buf + offset,
2135 dump, "tools-version", TOOLS_VERSION);
2136 offset += qed_dump_str_param(dump_buf + offset,
2139 s_chip_defs[dev_data->chip_id].name);
2140 offset += qed_dump_str_param(dump_buf + offset,
2143 s_platform_defs[dev_data->platform_id].
2146 qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2152 /* Writes the "last" section (including CRC) to the specified buffer at the
2153 * given offset. Returns the dumped size in dwords.
2155 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2157 u32 start_offset = offset;
2159 /* Dump CRC section header */
2160 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2162 /* Calculate CRC32 and add it to the dword after the "last" section */
2164 *(dump_buf + offset) = ~crc32(0xffffffff,
2166 DWORDS_TO_BYTES(offset));
2170 return offset - start_offset;
2173 /* Update blocks reset state */
2174 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2175 struct qed_ptt *p_ptt)
2177 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2178 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2181 /* Read reset registers */
2182 for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2183 if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2184 reg_val[i] = qed_rd(p_hwfn,
2185 p_ptt, s_reset_regs_defs[i].addr);
2187 /* Check if blocks are in reset */
2188 for (i = 0; i < MAX_BLOCK_ID; i++) {
2189 struct block_defs *block = s_block_defs[i];
2191 dev_data->block_in_reset[i] = block->has_reset_bit &&
2192 !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
2196 /* Enable / disable the Debug block */
2197 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2198 struct qed_ptt *p_ptt, bool enable)
2200 qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2203 /* Resets the Debug block */
2204 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2205 struct qed_ptt *p_ptt)
2207 u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2208 struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2210 dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2211 old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2213 old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2215 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2216 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2219 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2220 struct qed_ptt *p_ptt,
2221 enum dbg_bus_frame_modes mode)
2223 qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2226 /* Enable / disable Debug Bus clients according to the specified mask
2227 * (1 = enable, 0 = disable).
2229 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2230 struct qed_ptt *p_ptt, u32 client_mask)
2232 qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2235 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
2237 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2242 /* Get next element from modes tree buffer */
2243 ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2244 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
2247 case INIT_MODE_OP_NOT:
2248 return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2249 case INIT_MODE_OP_OR:
2250 case INIT_MODE_OP_AND:
2251 arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2252 arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2253 return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2254 arg2) : (arg1 && arg2);
2256 return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2260 /* Returns true if the specified entity (indicated by GRC param) should be
2261 * included in the dump, false otherwise.
2263 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2264 enum dbg_grc_params grc_param)
2266 return qed_grc_get_param(p_hwfn, grc_param) > 0;
2269 /* Returns true of the specified Storm should be included in the dump, false
2272 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2273 enum dbg_storms storm)
2275 return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2278 /* Returns true if the specified memory should be included in the dump, false
2281 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2282 enum block_id block_id, u8 mem_group_id)
2284 struct block_defs *block = s_block_defs[block_id];
2287 /* Check Storm match */
2288 if (block->associated_to_storm &&
2289 !qed_grc_is_storm_included(p_hwfn,
2290 (enum dbg_storms)block->storm_id))
2293 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2294 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2296 if (mem_group_id == big_ram->mem_group_id ||
2297 mem_group_id == big_ram->ram_mem_group_id)
2298 return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2301 switch (mem_group_id) {
2302 case MEM_GROUP_PXP_ILT:
2303 case MEM_GROUP_PXP_MEM:
2304 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2306 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2307 case MEM_GROUP_PBUF:
2308 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2309 case MEM_GROUP_CAU_MEM:
2310 case MEM_GROUP_CAU_SB:
2311 case MEM_GROUP_CAU_PI:
2312 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2313 case MEM_GROUP_QM_MEM:
2314 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2315 case MEM_GROUP_CFC_MEM:
2316 case MEM_GROUP_CONN_CFC_MEM:
2317 case MEM_GROUP_TASK_CFC_MEM:
2318 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
2319 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2320 case MEM_GROUP_IGU_MEM:
2321 case MEM_GROUP_IGU_MSIX:
2322 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2323 case MEM_GROUP_MULD_MEM:
2324 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2325 case MEM_GROUP_PRS_MEM:
2326 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2327 case MEM_GROUP_DMAE_MEM:
2328 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2329 case MEM_GROUP_TM_MEM:
2330 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2331 case MEM_GROUP_SDM_MEM:
2332 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2333 case MEM_GROUP_TDIF_CTX:
2334 case MEM_GROUP_RDIF_CTX:
2335 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2336 case MEM_GROUP_CM_MEM:
2337 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2339 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2345 /* Stalls all Storms */
2346 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2347 struct qed_ptt *p_ptt, bool stall)
2352 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2353 if (!qed_grc_is_storm_included(p_hwfn,
2354 (enum dbg_storms)storm_id))
2357 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2358 SEM_FAST_REG_STALL_0_BB_K2;
2359 qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2362 msleep(STALL_DELAY_MS);
2365 /* Takes all blocks out of reset */
2366 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2367 struct qed_ptt *p_ptt)
2369 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2370 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2373 /* Fill reset regs values */
2374 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2375 struct block_defs *block = s_block_defs[block_id];
2377 if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
2379 reg_val[block->reset_reg] |=
2380 BIT(block->reset_bit_offset);
2383 /* Write reset registers */
2384 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2385 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2389 s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2394 s_reset_regs_defs[i].addr +
2395 RESET_REG_UNRESET_OFFSET, reg_val[i]);
2399 /* Returns the attention block data of the specified block */
2400 static const struct dbg_attn_block_type_data *
2401 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2403 const struct dbg_attn_block *base_attn_block_arr =
2404 (const struct dbg_attn_block *)
2405 s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2407 return &base_attn_block_arr[block_id].per_type_data[attn_type];
2410 /* Returns the attention registers of the specified block */
2411 static const struct dbg_attn_reg *
2412 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2415 const struct dbg_attn_block_type_data *block_type_data =
2416 qed_get_block_attn_data(block_id, attn_type);
2418 *num_attn_regs = block_type_data->num_regs;
2420 return &((const struct dbg_attn_reg *)
2421 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2425 /* For each block, clear the status of all parities */
2426 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2427 struct qed_ptt *p_ptt)
2429 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2430 const struct dbg_attn_reg *attn_reg_arr;
2431 u8 reg_idx, num_attn_regs;
2434 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2435 if (dev_data->block_in_reset[block_id])
2438 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2442 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2443 const struct dbg_attn_reg *reg_data =
2444 &attn_reg_arr[reg_idx];
2445 u16 modes_buf_offset;
2449 eval_mode = GET_FIELD(reg_data->mode.data,
2450 DBG_MODE_HDR_EVAL_MODE) > 0;
2452 GET_FIELD(reg_data->mode.data,
2453 DBG_MODE_HDR_MODES_BUF_OFFSET);
2455 /* If Mode match: clear parity status */
2457 qed_is_mode_match(p_hwfn, &modes_buf_offset))
2458 qed_rd(p_hwfn, p_ptt,
2459 DWORDS_TO_BYTES(reg_data->
2465 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2466 * The following parameters are dumped:
2467 * - count: no. of dumped entries
2468 * - split_type: split type
2469 * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
2470 * - param_name: user parameter value (dumped only if param_name != NULL
2471 * and param_val != NULL).
2473 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2475 u32 num_reg_entries,
2476 enum init_split_types split_type,
2478 const char *param_name, const char *param_val)
2481 (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0);
2484 offset += qed_dump_section_hdr(dump_buf + offset,
2485 dump, "grc_regs", num_params);
2486 offset += qed_dump_num_param(dump_buf + offset,
2487 dump, "count", num_reg_entries);
2488 offset += qed_dump_str_param(dump_buf + offset,
2490 s_split_type_defs[split_type].name);
2491 if (split_type != SPLIT_TYPE_NONE)
2492 offset += qed_dump_num_param(dump_buf + offset,
2493 dump, "id", split_id);
2494 if (param_name && param_val)
2495 offset += qed_dump_str_param(dump_buf + offset,
2496 dump, param_name, param_val);
2501 /* Reads the specified registers into the specified buffer.
2502 * The addr and len arguments are specified in dwords.
2504 void qed_read_regs(struct qed_hwfn *p_hwfn,
2505 struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
2509 for (i = 0; i < len; i++)
2510 buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2513 /* Dumps the GRC registers in the specified address range.
2514 * Returns the dumped size in dwords.
2515 * The addr and len arguments are specified in dwords.
2517 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2518 struct qed_ptt *p_ptt,
2520 bool dump, u32 addr, u32 len, bool wide_bus,
2521 enum init_split_types split_type,
2524 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2525 u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
2530 /* Print log if needed */
2531 dev_data->num_regs_read += len;
2532 if (dev_data->num_regs_read >=
2533 s_platform_defs[dev_data->platform_id].log_thresh) {
2536 "Dumping %d registers...\n",
2537 dev_data->num_regs_read);
2538 dev_data->num_regs_read = 0;
2541 switch (split_type) {
2542 case SPLIT_TYPE_PORT:
2548 case SPLIT_TYPE_PORT_PF:
2549 port_id = split_id / dev_data->num_pfs_per_port;
2550 pf_id = port_id + dev_data->num_ports *
2551 (split_id % dev_data->num_pfs_per_port);
2560 /* Try reading using DMAE */
2561 if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE &&
2562 (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
2564 if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
2565 (u64)(uintptr_t)(dump_buf), len, NULL))
2567 dev_data->use_dmae = 0;
2570 "Failed reading from chip using DMAE, using GRC instead\n");
2573 /* If not read using DMAE, read using GRC */
2576 if (split_type != dev_data->pretend.split_type || split_id !=
2577 dev_data->pretend.split_id) {
2578 switch (split_type) {
2579 case SPLIT_TYPE_PORT:
2580 qed_port_pretend(p_hwfn, p_ptt, port_id);
2583 fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2584 qed_fid_pretend(p_hwfn, p_ptt, fid);
2586 case SPLIT_TYPE_PORT_PF:
2587 fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2588 qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
2591 fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) |
2592 (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT);
2593 qed_fid_pretend(p_hwfn, p_ptt, fid);
2599 dev_data->pretend.split_type = (u8)split_type;
2600 dev_data->pretend.split_id = split_id;
2603 /* Read registers using GRC */
2604 qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2609 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2610 * The addr and len arguments are specified in dwords.
2612 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2613 bool dump, u32 addr, u32 len)
2616 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2621 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2622 * The addr and len arguments are specified in dwords.
2624 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2625 struct qed_ptt *p_ptt,
2627 bool dump, u32 addr, u32 len, bool wide_bus,
2628 enum init_split_types split_type, u8 split_id)
2632 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2633 offset += qed_grc_dump_addr_range(p_hwfn,
2636 dump, addr, len, wide_bus,
2637 split_type, split_id);
2642 /* Dumps GRC registers sequence with skip cycle.
2643 * Returns the dumped size in dwords.
2644 * - addr: start GRC address in dwords
2645 * - total_len: total no. of dwords to dump
2646 * - read_len: no. consecutive dwords to read
2647 * - skip_len: no. of dwords to skip (and fill with zeros)
2649 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2650 struct qed_ptt *p_ptt,
2655 u32 read_len, u32 skip_len)
2657 u32 offset = 0, reg_offset = 0;
2659 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2662 return offset + total_len;
2664 while (reg_offset < total_len) {
2665 u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2667 offset += qed_grc_dump_addr_range(p_hwfn,
2670 dump, addr, curr_len, false,
2671 SPLIT_TYPE_NONE, 0);
2672 reg_offset += curr_len;
2675 if (reg_offset < total_len) {
2676 curr_len = min_t(u32, skip_len, total_len - skip_len);
2677 memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2679 reg_offset += curr_len;
2687 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2688 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2689 struct qed_ptt *p_ptt,
2690 struct dbg_array input_regs_arr,
2693 enum init_split_types split_type,
2695 bool block_enable[MAX_BLOCK_ID],
2696 u32 *num_dumped_reg_entries)
2698 u32 i, offset = 0, input_offset = 0;
2699 bool mode_match = true;
2701 *num_dumped_reg_entries = 0;
2703 while (input_offset < input_regs_arr.size_in_dwords) {
2704 const struct dbg_dump_cond_hdr *cond_hdr =
2705 (const struct dbg_dump_cond_hdr *)
2706 &input_regs_arr.ptr[input_offset++];
2707 u16 modes_buf_offset;
2710 /* Check mode/block */
2711 eval_mode = GET_FIELD(cond_hdr->mode.data,
2712 DBG_MODE_HDR_EVAL_MODE) > 0;
2715 GET_FIELD(cond_hdr->mode.data,
2716 DBG_MODE_HDR_MODES_BUF_OFFSET);
2717 mode_match = qed_is_mode_match(p_hwfn,
2721 if (!mode_match || !block_enable[cond_hdr->block_id]) {
2722 input_offset += cond_hdr->data_size;
2726 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2727 const struct dbg_dump_reg *reg =
2728 (const struct dbg_dump_reg *)
2729 &input_regs_arr.ptr[input_offset];
2733 addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2734 len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2735 wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2736 offset += qed_grc_dump_reg_entry(p_hwfn,
2743 split_type, split_id);
2744 (*num_dumped_reg_entries)++;
2751 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2752 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2753 struct qed_ptt *p_ptt,
2754 struct dbg_array input_regs_arr,
2757 bool block_enable[MAX_BLOCK_ID],
2758 enum init_split_types split_type,
2760 const char *param_name,
2761 const char *param_val)
2763 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2764 enum init_split_types hdr_split_type = split_type;
2765 u32 num_dumped_reg_entries, offset;
2766 u8 hdr_split_id = split_id;
2768 /* In PORT_PF split type, print a port split header */
2769 if (split_type == SPLIT_TYPE_PORT_PF) {
2770 hdr_split_type = SPLIT_TYPE_PORT;
2771 hdr_split_id = split_id / dev_data->num_pfs_per_port;
2774 /* Calculate register dump header size (and skip it for now) */
2775 offset = qed_grc_dump_regs_hdr(dump_buf,
2779 hdr_split_id, param_name, param_val);
2781 /* Dump registers */
2782 offset += qed_grc_dump_regs_entries(p_hwfn,
2790 &num_dumped_reg_entries);
2792 /* Write register dump header */
2793 if (dump && num_dumped_reg_entries > 0)
2794 qed_grc_dump_regs_hdr(dump_buf,
2796 num_dumped_reg_entries,
2798 hdr_split_id, param_name, param_val);
2800 return num_dumped_reg_entries > 0 ? offset : 0;
2803 /* Dumps registers according to the input registers array. Returns the dumped
2806 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2807 struct qed_ptt *p_ptt,
2810 bool block_enable[MAX_BLOCK_ID],
2811 const char *param_name, const char *param_val)
2813 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2814 u32 offset = 0, input_offset = 0;
2816 while (input_offset <
2817 s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2818 const struct dbg_dump_split_hdr *split_hdr;
2819 struct dbg_array curr_input_regs_arr;
2820 enum init_split_types split_type;
2821 u16 split_count = 0;
2822 u32 split_data_size;
2826 (const struct dbg_dump_split_hdr *)
2827 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2829 GET_FIELD(split_hdr->hdr,
2830 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2832 GET_FIELD(split_hdr->hdr,
2833 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2834 curr_input_regs_arr.ptr =
2835 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2836 curr_input_regs_arr.size_in_dwords = split_data_size;
2838 switch (split_type) {
2839 case SPLIT_TYPE_NONE:
2842 case SPLIT_TYPE_PORT:
2843 split_count = dev_data->num_ports;
2846 case SPLIT_TYPE_PORT_PF:
2847 split_count = dev_data->num_ports *
2848 dev_data->num_pfs_per_port;
2851 split_count = dev_data->num_vfs;
2857 for (split_id = 0; split_id < split_count; split_id++)
2858 offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2859 curr_input_regs_arr,
2867 input_offset += split_data_size;
2870 /* Cancel pretends (pretend to original PF) */
2872 fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2873 qed_fid_pretend(p_hwfn, p_ptt, fid);
2874 dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2875 dev_data->pretend.split_id = 0;
2881 /* Dump reset registers. Returns the dumped size in dwords. */
2882 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2883 struct qed_ptt *p_ptt,
2884 u32 *dump_buf, bool dump)
2886 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2887 u32 i, offset = 0, num_regs = 0;
2889 /* Calculate header size */
2890 offset += qed_grc_dump_regs_hdr(dump_buf,
2892 SPLIT_TYPE_NONE, 0, NULL, NULL);
2894 /* Write reset registers */
2895 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2896 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2899 offset += qed_grc_dump_reg_entry(p_hwfn,
2904 (s_reset_regs_defs[i].addr), 1,
2905 false, SPLIT_TYPE_NONE, 0);
2911 qed_grc_dump_regs_hdr(dump_buf,
2912 true, num_regs, SPLIT_TYPE_NONE,
2918 /* Dump registers that are modified during GRC Dump and therefore must be
2919 * dumped first. Returns the dumped size in dwords.
2921 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2922 struct qed_ptt *p_ptt,
2923 u32 *dump_buf, bool dump)
2925 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2926 u32 block_id, offset = 0, num_reg_entries = 0;
2927 const struct dbg_attn_reg *attn_reg_arr;
2928 u8 storm_id, reg_idx, num_attn_regs;
2930 /* Calculate header size */
2931 offset += qed_grc_dump_regs_hdr(dump_buf,
2932 false, 0, SPLIT_TYPE_NONE,
2935 /* Write parity registers */
2936 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2937 if (dev_data->block_in_reset[block_id] && dump)
2940 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2944 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2945 const struct dbg_attn_reg *reg_data =
2946 &attn_reg_arr[reg_idx];
2947 u16 modes_buf_offset;
2952 eval_mode = GET_FIELD(reg_data->mode.data,
2953 DBG_MODE_HDR_EVAL_MODE) > 0;
2955 GET_FIELD(reg_data->mode.data,
2956 DBG_MODE_HDR_MODES_BUF_OFFSET);
2958 !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2961 /* Mode match: read & dump registers */
2962 addr = reg_data->mask_address;
2963 offset += qed_grc_dump_reg_entry(p_hwfn,
2969 SPLIT_TYPE_NONE, 0);
2970 addr = GET_FIELD(reg_data->data,
2971 DBG_ATTN_REG_STS_ADDRESS);
2972 offset += qed_grc_dump_reg_entry(p_hwfn,
2978 SPLIT_TYPE_NONE, 0);
2979 num_reg_entries += 2;
2983 /* Write Storm stall status registers */
2984 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2985 struct storm_defs *storm = &s_storm_defs[storm_id];
2988 if (dev_data->block_in_reset[storm->block_id] && dump)
2992 BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2993 SEM_FAST_REG_STALLED);
2994 offset += qed_grc_dump_reg_entry(p_hwfn,
3000 false, SPLIT_TYPE_NONE, 0);
3006 qed_grc_dump_regs_hdr(dump_buf,
3008 num_reg_entries, SPLIT_TYPE_NONE,
3014 /* Dumps registers that can't be represented in the debug arrays */
3015 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
3016 struct qed_ptt *p_ptt,
3017 u32 *dump_buf, bool dump)
3019 u32 offset = 0, addr;
3021 offset += qed_grc_dump_regs_hdr(dump_buf,
3022 dump, 2, SPLIT_TYPE_NONE, 0,
3025 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3028 addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
3029 offset += qed_grc_dump_reg_entry_skip(p_hwfn,
3034 RDIF_REG_DEBUG_ERROR_INFO_SIZE,
3037 addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
3039 qed_grc_dump_reg_entry_skip(p_hwfn,
3044 TDIF_REG_DEBUG_ERROR_INFO_SIZE,
3051 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3052 * dwords. The following parameters are dumped:
3053 * - name: dumped only if it's not NULL.
3054 * - addr: in dwords, dumped only if name is NULL.
3055 * - len: in dwords, always dumped.
3056 * - width: dumped if it's not zero.
3057 * - packed: dumped only if it's not false.
3058 * - mem_group: always dumped.
3059 * - is_storm: true only if the memory is related to a Storm.
3060 * - storm_letter: valid only if is_storm is true.
3063 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
3071 const char *mem_group,
3072 bool is_storm, char storm_letter)
3080 "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3087 /* Dump section header */
3088 offset += qed_dump_section_hdr(dump_buf + offset,
3089 dump, "grc_mem", num_params);
3094 strcpy(buf, "?STORM_");
3095 buf[0] = storm_letter;
3096 strcpy(buf + strlen(buf), name);
3101 offset += qed_dump_str_param(dump_buf + offset,
3105 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3107 offset += qed_dump_num_param(dump_buf + offset,
3108 dump, "addr", addr_in_bytes);
3112 offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
3114 /* Dump bit width */
3116 offset += qed_dump_num_param(dump_buf + offset,
3117 dump, "width", bit_width);
3121 offset += qed_dump_num_param(dump_buf + offset,
3126 strcpy(buf, "?STORM_");
3127 buf[0] = storm_letter;
3128 strcpy(buf + strlen(buf), mem_group);
3130 strcpy(buf, mem_group);
3133 offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3138 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3139 * Returns the dumped size in dwords.
3140 * The addr and len arguments are specified in dwords.
3142 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3143 struct qed_ptt *p_ptt,
3152 const char *mem_group,
3153 bool is_storm, char storm_letter)
3157 offset += qed_grc_dump_mem_hdr(p_hwfn,
3165 mem_group, is_storm, storm_letter);
3166 offset += qed_grc_dump_addr_range(p_hwfn,
3169 dump, addr, len, wide_bus,
3170 SPLIT_TYPE_NONE, 0);
3175 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3176 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3177 struct qed_ptt *p_ptt,
3178 struct dbg_array input_mems_arr,
3179 u32 *dump_buf, bool dump)
3181 u32 i, offset = 0, input_offset = 0;
3182 bool mode_match = true;
3184 while (input_offset < input_mems_arr.size_in_dwords) {
3185 const struct dbg_dump_cond_hdr *cond_hdr;
3186 u16 modes_buf_offset;
3190 cond_hdr = (const struct dbg_dump_cond_hdr *)
3191 &input_mems_arr.ptr[input_offset++];
3192 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3194 /* Check required mode */
3195 eval_mode = GET_FIELD(cond_hdr->mode.data,
3196 DBG_MODE_HDR_EVAL_MODE) > 0;
3199 GET_FIELD(cond_hdr->mode.data,
3200 DBG_MODE_HDR_MODES_BUF_OFFSET);
3201 mode_match = qed_is_mode_match(p_hwfn,
3206 input_offset += cond_hdr->data_size;
3210 for (i = 0; i < num_entries;
3211 i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3212 const struct dbg_dump_mem *mem =
3213 (const struct dbg_dump_mem *)
3214 &input_mems_arr.ptr[input_offset];
3215 u8 mem_group_id = GET_FIELD(mem->dword0,
3216 DBG_DUMP_MEM_MEM_GROUP_ID);
3217 bool is_storm = false, mem_wide_bus;
3218 enum dbg_grc_params grc_param;
3219 char storm_letter = 'a';
3220 enum block_id block_id;
3221 u32 mem_addr, mem_len;
3223 if (mem_group_id >= MEM_GROUPS_NUM) {
3224 DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3228 block_id = (enum block_id)cond_hdr->block_id;
3229 if (!qed_grc_is_mem_included(p_hwfn,
3234 mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3235 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3236 mem_wide_bus = GET_FIELD(mem->dword1,
3237 DBG_DUMP_MEM_WIDE_BUS);
3239 /* Update memory length for CCFC/TCFC memories
3240 * according to number of LCIDs/LTIDs.
3242 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3243 if (mem_len % MAX_LCIDS) {
3245 "Invalid CCFC connection memory size\n");
3249 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3250 mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3251 (mem_len / MAX_LCIDS);
3252 } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3253 if (mem_len % MAX_LTIDS) {
3255 "Invalid TCFC task memory size\n");
3259 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3260 mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3261 (mem_len / MAX_LTIDS);
3264 /* If memory is associated with Storm, update Storm
3268 [cond_hdr->block_id]->associated_to_storm) {
3271 s_storm_defs[s_block_defs
3272 [cond_hdr->block_id]->
3277 offset += qed_grc_dump_mem(p_hwfn,
3287 s_mem_group_names[mem_group_id],
3296 /* Dumps GRC memories according to the input array dump_mem.
3297 * Returns the dumped size in dwords.
3299 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3300 struct qed_ptt *p_ptt,
3301 u32 *dump_buf, bool dump)
3303 u32 offset = 0, input_offset = 0;
3305 while (input_offset <
3306 s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3307 const struct dbg_dump_split_hdr *split_hdr;
3308 struct dbg_array curr_input_mems_arr;
3309 enum init_split_types split_type;
3310 u32 split_data_size;
3312 split_hdr = (const struct dbg_dump_split_hdr *)
3313 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3315 GET_FIELD(split_hdr->hdr,
3316 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3318 GET_FIELD(split_hdr->hdr,
3319 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3320 curr_input_mems_arr.ptr =
3321 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3322 curr_input_mems_arr.size_in_dwords = split_data_size;
3324 if (split_type == SPLIT_TYPE_NONE)
3325 offset += qed_grc_dump_mem_entries(p_hwfn,
3327 curr_input_mems_arr,
3332 "Dumping split memories is currently not supported\n");
3334 input_offset += split_data_size;
3340 /* Dumps GRC context data for the specified Storm.
3341 * Returns the dumped size in dwords.
3342 * The lid_size argument is specified in quad-regs.
3344 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3345 struct qed_ptt *p_ptt,
3354 struct storm_defs *storm = &s_storm_defs[storm_id];
3355 u32 i, lid, total_size, offset = 0;
3360 lid_size *= BYTES_IN_DWORD;
3361 total_size = num_lids * lid_size;
3363 offset += qed_grc_dump_mem_hdr(p_hwfn,
3370 false, name, true, storm->letter);
3373 return offset + total_size;
3375 /* Dump context data */
3376 for (lid = 0; lid < num_lids; lid++) {
3377 for (i = 0; i < lid_size; i++, offset++) {
3379 p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3380 *(dump_buf + offset) = qed_rd(p_hwfn,
3381 p_ptt, rd_reg_addr);
3388 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3389 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3390 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3392 enum dbg_grc_params grc_param;
3396 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3397 struct storm_defs *storm = &s_storm_defs[storm_id];
3399 if (!qed_grc_is_storm_included(p_hwfn,
3400 (enum dbg_storms)storm_id))
3403 /* Dump Conn AG context size */
3404 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3406 qed_grc_dump_ctx_data(p_hwfn,
3411 qed_grc_get_param(p_hwfn,
3413 storm->cm_conn_ag_ctx_lid_size,
3414 storm->cm_conn_ag_ctx_rd_addr,
3417 /* Dump Conn ST context size */
3418 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3420 qed_grc_dump_ctx_data(p_hwfn,
3425 qed_grc_get_param(p_hwfn,
3427 storm->cm_conn_st_ctx_lid_size,
3428 storm->cm_conn_st_ctx_rd_addr,
3431 /* Dump Task AG context size */
3432 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3434 qed_grc_dump_ctx_data(p_hwfn,
3439 qed_grc_get_param(p_hwfn,
3441 storm->cm_task_ag_ctx_lid_size,
3442 storm->cm_task_ag_ctx_rd_addr,
3445 /* Dump Task ST context size */
3446 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3448 qed_grc_dump_ctx_data(p_hwfn,
3453 qed_grc_get_param(p_hwfn,
3455 storm->cm_task_st_ctx_lid_size,
3456 storm->cm_task_st_ctx_rd_addr,
3463 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3464 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3465 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3467 char buf[10] = "IOR_SET_?";
3468 u32 addr, offset = 0;
3469 u8 storm_id, set_id;
3471 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3472 struct storm_defs *storm = &s_storm_defs[storm_id];
3474 if (!qed_grc_is_storm_included(p_hwfn,
3475 (enum dbg_storms)storm_id))
3478 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3479 addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3480 SEM_FAST_REG_STORM_REG_FILE) +
3481 IOR_SET_OFFSET(set_id);
3482 if (strlen(buf) > 0)
3483 buf[strlen(buf) - 1] = '0' + set_id;
3484 offset += qed_grc_dump_mem(p_hwfn,
3503 /* Dump VFC CAM. Returns the dumped size in dwords. */
3504 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3505 struct qed_ptt *p_ptt,
3506 u32 *dump_buf, bool dump, u8 storm_id)
3508 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3509 struct storm_defs *storm = &s_storm_defs[storm_id];
3510 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3511 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3512 u32 row, i, offset = 0;
3514 offset += qed_grc_dump_mem_hdr(p_hwfn,
3521 false, "vfc_cam", true, storm->letter);
3524 return offset + total_size;
3526 /* Prepare CAM address */
3527 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3529 for (row = 0; row < VFC_CAM_NUM_ROWS;
3530 row++, offset += VFC_CAM_RESP_DWORDS) {
3531 /* Write VFC CAM command */
3532 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3535 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3536 cam_cmd, VFC_CAM_CMD_DWORDS);
3538 /* Write VFC CAM address */
3541 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3542 cam_addr, VFC_CAM_ADDR_DWORDS);
3544 /* Read VFC CAM read response */
3547 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3548 dump_buf + offset, VFC_CAM_RESP_DWORDS);
3554 /* Dump VFC RAM. Returns the dumped size in dwords. */
3555 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3556 struct qed_ptt *p_ptt,
3559 u8 storm_id, struct vfc_ram_defs *ram_defs)
3561 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3562 struct storm_defs *storm = &s_storm_defs[storm_id];
3563 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3564 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3565 u32 row, i, offset = 0;
3567 offset += qed_grc_dump_mem_hdr(p_hwfn,
3575 ram_defs->type_name,
3576 true, storm->letter);
3578 /* Prepare RAM address */
3579 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3582 return offset + total_size;
3584 for (row = ram_defs->base_row;
3585 row < ram_defs->base_row + ram_defs->num_rows;
3586 row++, offset += VFC_RAM_RESP_DWORDS) {
3587 /* Write VFC RAM command */
3590 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3591 ram_cmd, VFC_RAM_CMD_DWORDS);
3593 /* Write VFC RAM address */
3594 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3597 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3598 ram_addr, VFC_RAM_ADDR_DWORDS);
3600 /* Read VFC RAM read response */
3603 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3604 dump_buf + offset, VFC_RAM_RESP_DWORDS);
3610 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3611 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3612 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3614 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3618 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3619 if (!qed_grc_is_storm_included(p_hwfn,
3620 (enum dbg_storms)storm_id) ||
3621 !s_storm_defs[storm_id].has_vfc ||
3622 (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3627 offset += qed_grc_dump_vfc_cam(p_hwfn,
3633 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3634 offset += qed_grc_dump_vfc_ram(p_hwfn,
3639 &s_vfc_ram_defs[i]);
3645 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3646 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3647 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3649 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3653 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3654 u32 rss_addr, num_entries, total_dwords;
3655 struct rss_mem_defs *rss_defs;
3656 u32 addr, num_dwords_to_read;
3659 rss_defs = &s_rss_mem_defs[rss_mem_id];
3660 rss_addr = rss_defs->addr;
3661 num_entries = rss_defs->num_entries[dev_data->chip_id];
3662 total_dwords = (num_entries * rss_defs->entry_width) / 32;
3663 packed = (rss_defs->entry_width == 16);
3665 offset += qed_grc_dump_mem_hdr(p_hwfn,
3671 rss_defs->entry_width,
3673 rss_defs->type_name, false, 0);
3677 offset += total_dwords;
3681 addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3682 while (total_dwords) {
3683 num_dwords_to_read = min_t(u32,
3684 RSS_REG_RSS_RAM_DATA_SIZE,
3686 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3687 offset += qed_grc_dump_addr_range(p_hwfn,
3694 SPLIT_TYPE_NONE, 0);
3695 total_dwords -= num_dwords_to_read;
3703 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3704 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3705 struct qed_ptt *p_ptt,
3706 u32 *dump_buf, bool dump, u8 big_ram_id)
3708 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3709 u32 block_size, ram_size, offset = 0, reg_val, i;
3710 char mem_name[12] = "???_BIG_RAM";
3711 char type_name[8] = "???_RAM";
3712 struct big_ram_defs *big_ram;
3714 big_ram = &s_big_ram_defs[big_ram_id];
3715 ram_size = big_ram->ram_size[dev_data->chip_id];
3717 reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3718 block_size = reg_val &
3719 BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3722 strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3723 strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3725 /* Dump memory header */
3726 offset += qed_grc_dump_mem_hdr(p_hwfn,
3733 false, type_name, false, 0);
3735 /* Read and dump Big RAM data */
3737 return offset + ram_size;
3740 for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3744 qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3745 addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3746 len = BRB_REG_BIG_RAM_DATA_SIZE;
3747 offset += qed_grc_dump_addr_range(p_hwfn,
3753 false, SPLIT_TYPE_NONE, 0);
3759 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3760 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3762 bool block_enable[MAX_BLOCK_ID] = { 0 };
3763 u32 offset = 0, addr;
3764 bool halted = false;
3767 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3768 halted = !qed_mcp_halt(p_hwfn, p_ptt);
3770 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3773 /* Dump MCP scratchpad */
3774 offset += qed_grc_dump_mem(p_hwfn,
3779 BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3780 MCP_REG_SCRATCH_SIZE_BB_K2,
3781 false, 0, false, "MCP", false, 0);
3783 /* Dump MCP cpu_reg_file */
3784 offset += qed_grc_dump_mem(p_hwfn,
3789 BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3790 MCP_REG_CPU_REG_FILE_SIZE,
3791 false, 0, false, "MCP", false, 0);
3793 /* Dump MCP registers */
3794 block_enable[BLOCK_MCP] = true;
3795 offset += qed_grc_dump_registers(p_hwfn,
3798 dump, block_enable, "block", "MCP");
3800 /* Dump required non-MCP registers */
3801 offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3802 dump, 1, SPLIT_TYPE_NONE, 0,
3804 addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3805 offset += qed_grc_dump_reg_entry(p_hwfn,
3811 false, SPLIT_TYPE_NONE, 0);
3814 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3815 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3820 /* Dumps the tbus indirect memory for all PHYs. */
3821 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3822 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3824 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3828 for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3829 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3830 struct phy_defs *phy_defs;
3833 phy_defs = &s_phy_defs[phy_id];
3834 addr_lo_addr = phy_defs->base_addr +
3835 phy_defs->tbus_addr_lo_addr;
3836 addr_hi_addr = phy_defs->base_addr +
3837 phy_defs->tbus_addr_hi_addr;
3838 data_lo_addr = phy_defs->base_addr +
3839 phy_defs->tbus_data_lo_addr;
3840 data_hi_addr = phy_defs->base_addr +
3841 phy_defs->tbus_data_hi_addr;
3843 if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3844 phy_defs->phy_name) < 0)
3846 "Unexpected debug error: invalid PHY memory name\n");
3848 offset += qed_grc_dump_mem_hdr(p_hwfn,
3853 PHY_DUMP_SIZE_DWORDS,
3854 16, true, mem_name, false, 0);
3857 offset += PHY_DUMP_SIZE_DWORDS;
3861 bytes_buf = (u8 *)(dump_buf + offset);
3862 for (tbus_hi_offset = 0;
3863 tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3865 qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3866 for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3869 p_ptt, addr_lo_addr, tbus_lo_offset);
3870 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3873 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3879 offset += PHY_DUMP_SIZE_DWORDS;
3885 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3886 struct qed_ptt *p_ptt,
3887 enum block_id block_id,
3891 u8 force_valid_mask, u8 force_frame_mask)
3893 struct block_defs *block = s_block_defs[block_id];
3895 qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3896 qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3897 qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3898 qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3899 qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3902 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3903 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3904 struct qed_ptt *p_ptt,
3905 u32 *dump_buf, bool dump)
3907 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3908 u32 block_id, line_id, offset = 0;
3910 /* Don't dump static debug if a debug bus recording is in progress */
3911 if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3915 /* Disable all blocks debug output */
3916 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3917 struct block_defs *block = s_block_defs[block_id];
3919 if (block->dbg_client_id[dev_data->chip_id] !=
3920 MAX_DBG_BUS_CLIENTS)
3921 qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3925 qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3926 qed_bus_set_framing_mode(p_hwfn,
3927 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3929 p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3930 qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3931 qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3934 /* Dump all static debug lines for each relevant block */
3935 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3936 struct block_defs *block = s_block_defs[block_id];
3937 struct dbg_bus_block *block_desc;
3938 u32 block_dwords, addr, len;
3941 if (block->dbg_client_id[dev_data->chip_id] ==
3942 MAX_DBG_BUS_CLIENTS)
3945 block_desc = get_dbg_bus_block_desc(p_hwfn,
3946 (enum block_id)block_id);
3947 block_dwords = NUM_DBG_LINES(block_desc) *
3948 STATIC_DEBUG_LINE_DWORDS;
3950 /* Dump static section params */
3951 offset += qed_grc_dump_mem_hdr(p_hwfn,
3957 32, false, "STATIC", false, 0);
3960 offset += block_dwords;
3964 /* If all lines are invalid - dump zeros */
3965 if (dev_data->block_in_reset[block_id]) {
3966 memset(dump_buf + offset, 0,
3967 DWORDS_TO_BYTES(block_dwords));
3968 offset += block_dwords;
3972 /* Enable block's client */
3973 dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3974 qed_bus_enable_clients(p_hwfn,
3976 BIT(dbg_client_id));
3978 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3979 len = STATIC_DEBUG_LINE_DWORDS;
3980 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3982 /* Configure debug line ID */
3983 qed_config_dbg_line(p_hwfn,
3985 (enum block_id)block_id,
3986 (u8)line_id, 0xf, 0, 0, 0);
3988 /* Read debug line info */
3989 offset += qed_grc_dump_addr_range(p_hwfn,
3995 true, SPLIT_TYPE_NONE,
3999 /* Disable block's client and debug output */
4000 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
4001 qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
4005 qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
4006 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
4012 /* Performs GRC Dump to the specified buffer.
4013 * Returns the dumped size in dwords.
4015 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
4016 struct qed_ptt *p_ptt,
4018 bool dump, u32 *num_dumped_dwords)
4020 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4021 bool parities_masked = false;
4025 *num_dumped_dwords = 0;
4026 dev_data->num_regs_read = 0;
4028 /* Update reset state */
4030 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4032 /* Dump global params */
4033 offset += qed_dump_common_global_params(p_hwfn,
4035 dump_buf + offset, dump, 4);
4036 offset += qed_dump_str_param(dump_buf + offset,
4037 dump, "dump-type", "grc-dump");
4038 offset += qed_dump_num_param(dump_buf + offset,
4041 qed_grc_get_param(p_hwfn,
4042 DBG_GRC_PARAM_NUM_LCIDS));
4043 offset += qed_dump_num_param(dump_buf + offset,
4046 qed_grc_get_param(p_hwfn,
4047 DBG_GRC_PARAM_NUM_LTIDS));
4048 offset += qed_dump_num_param(dump_buf + offset,
4049 dump, "num-ports", dev_data->num_ports);
4051 /* Dump reset registers (dumped before taking blocks out of reset ) */
4052 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4053 offset += qed_grc_dump_reset_regs(p_hwfn,
4055 dump_buf + offset, dump);
4057 /* Take all blocks out of reset (using reset registers) */
4059 qed_grc_unreset_blocks(p_hwfn, p_ptt);
4060 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4063 /* Disable all parities using MFW command */
4065 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4066 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
4067 if (!parities_masked) {
4069 "Failed to mask parities using MFW\n");
4070 if (qed_grc_get_param
4071 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4072 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4076 /* Dump modified registers (dumped before modifying them) */
4077 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4078 offset += qed_grc_dump_modified_regs(p_hwfn,
4080 dump_buf + offset, dump);
4084 (qed_grc_is_included(p_hwfn,
4085 DBG_GRC_PARAM_DUMP_IOR) ||
4086 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4087 qed_grc_stall_storms(p_hwfn, p_ptt, true);
4090 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4091 bool block_enable[MAX_BLOCK_ID];
4093 /* Dump all blocks except MCP */
4094 for (i = 0; i < MAX_BLOCK_ID; i++)
4095 block_enable[i] = true;
4096 block_enable[BLOCK_MCP] = false;
4097 offset += qed_grc_dump_registers(p_hwfn,
4102 block_enable, NULL, NULL);
4104 /* Dump special registers */
4105 offset += qed_grc_dump_special_regs(p_hwfn,
4107 dump_buf + offset, dump);
4111 offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4114 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4115 offset += qed_grc_dump_mcp(p_hwfn,
4116 p_ptt, dump_buf + offset, dump);
4119 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4120 offset += qed_grc_dump_ctx(p_hwfn,
4121 p_ptt, dump_buf + offset, dump);
4123 /* Dump RSS memories */
4124 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4125 offset += qed_grc_dump_rss(p_hwfn,
4126 p_ptt, dump_buf + offset, dump);
4129 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4130 if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4131 offset += qed_grc_dump_big_ram(p_hwfn,
4137 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4138 offset += qed_grc_dump_iors(p_hwfn,
4139 p_ptt, dump_buf + offset, dump);
4142 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4143 offset += qed_grc_dump_vfc(p_hwfn,
4144 p_ptt, dump_buf + offset, dump);
4147 if (qed_grc_is_included(p_hwfn,
4148 DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4149 CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4150 offset += qed_grc_dump_phy(p_hwfn,
4151 p_ptt, dump_buf + offset, dump);
4153 /* Dump static debug data (only if not during debug bus recording) */
4154 if (qed_grc_is_included(p_hwfn,
4155 DBG_GRC_PARAM_DUMP_STATIC) &&
4156 (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
4157 offset += qed_grc_dump_static_debug(p_hwfn,
4159 dump_buf + offset, dump);
4161 /* Dump last section */
4162 offset += qed_dump_last_section(dump_buf, offset, dump);
4165 /* Unstall storms */
4166 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4167 qed_grc_stall_storms(p_hwfn, p_ptt, false);
4169 /* Clear parity status */
4170 qed_grc_clear_all_prty(p_hwfn, p_ptt);
4172 /* Enable all parities using MFW command */
4173 if (parities_masked)
4174 qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4177 *num_dumped_dwords = offset;
4179 return DBG_STATUS_OK;
4182 /* Writes the specified failing Idle Check rule to the specified buffer.
4183 * Returns the dumped size in dwords.
4185 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4186 struct qed_ptt *p_ptt,
4191 const struct dbg_idle_chk_rule *rule,
4192 u16 fail_entry_id, u32 *cond_reg_values)
4194 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4195 const struct dbg_idle_chk_cond_reg *cond_regs;
4196 const struct dbg_idle_chk_info_reg *info_regs;
4197 u32 i, next_reg_offset = 0, offset = 0;
4198 struct dbg_idle_chk_result_hdr *hdr;
4199 const union dbg_idle_chk_reg *regs;
4202 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4203 regs = &((const union dbg_idle_chk_reg *)
4204 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4205 cond_regs = ®s[0].cond_reg;
4206 info_regs = ®s[rule->num_cond_regs].info_reg;
4208 /* Dump rule data */
4210 memset(hdr, 0, sizeof(*hdr));
4211 hdr->rule_id = rule_id;
4212 hdr->mem_entry_id = fail_entry_id;
4213 hdr->severity = rule->severity;
4214 hdr->num_dumped_cond_regs = rule->num_cond_regs;
4217 offset += IDLE_CHK_RESULT_HDR_DWORDS;
4219 /* Dump condition register values */
4220 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4221 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4222 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4224 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4225 (dump_buf + offset);
4227 /* Write register header */
4229 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4234 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4235 memset(reg_hdr, 0, sizeof(*reg_hdr));
4236 reg_hdr->start_entry = reg->start_entry;
4237 reg_hdr->size = reg->entry_size;
4238 SET_FIELD(reg_hdr->data,
4239 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4240 reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4241 SET_FIELD(reg_hdr->data,
4242 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4244 /* Write register values */
4245 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4246 dump_buf[offset] = cond_reg_values[next_reg_offset];
4249 /* Dump info register values */
4250 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4251 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4254 /* Check if register's block is in reset */
4256 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4260 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4261 if (block_id >= MAX_BLOCK_ID) {
4262 DP_NOTICE(p_hwfn, "Invalid block_id\n");
4266 if (!dev_data->block_in_reset[block_id]) {
4267 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4268 bool wide_bus, eval_mode, mode_match = true;
4269 u16 modes_buf_offset;
4272 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4273 (dump_buf + offset);
4276 eval_mode = GET_FIELD(reg->mode.data,
4277 DBG_MODE_HDR_EVAL_MODE) > 0;
4280 GET_FIELD(reg->mode.data,
4281 DBG_MODE_HDR_MODES_BUF_OFFSET);
4283 qed_is_mode_match(p_hwfn,
4290 addr = GET_FIELD(reg->data,
4291 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4292 wide_bus = GET_FIELD(reg->data,
4293 DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4295 /* Write register header */
4296 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4297 hdr->num_dumped_info_regs++;
4298 memset(reg_hdr, 0, sizeof(*reg_hdr));
4299 reg_hdr->size = reg->size;
4300 SET_FIELD(reg_hdr->data,
4301 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4302 rule->num_cond_regs + reg_id);
4304 /* Write register values */
4305 offset += qed_grc_dump_addr_range(p_hwfn,
4310 reg->size, wide_bus,
4311 SPLIT_TYPE_NONE, 0);
4318 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4320 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4321 u32 *dump_buf, bool dump,
4322 const struct dbg_idle_chk_rule *input_rules,
4323 u32 num_input_rules, u32 *num_failing_rules)
4325 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4326 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4331 *num_failing_rules = 0;
4333 for (i = 0; i < num_input_rules; i++) {
4334 const struct dbg_idle_chk_cond_reg *cond_regs;
4335 const struct dbg_idle_chk_rule *rule;
4336 const union dbg_idle_chk_reg *regs;
4337 u16 num_reg_entries = 1;
4338 bool check_rule = true;
4339 const u32 *imm_values;
4341 rule = &input_rules[i];
4342 regs = &((const union dbg_idle_chk_reg *)
4343 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4345 cond_regs = ®s[0].cond_reg;
4346 imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4349 /* Check if all condition register blocks are out of reset, and
4350 * find maximal number of entries (all condition registers that
4351 * are memories must have the same size, which is > 1).
4353 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4356 GET_FIELD(cond_regs[reg_id].data,
4357 DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4359 if (block_id >= MAX_BLOCK_ID) {
4360 DP_NOTICE(p_hwfn, "Invalid block_id\n");
4364 check_rule = !dev_data->block_in_reset[block_id];
4365 if (cond_regs[reg_id].num_entries > num_reg_entries)
4366 num_reg_entries = cond_regs[reg_id].num_entries;
4369 if (!check_rule && dump)
4373 u32 entry_dump_size =
4374 qed_idle_chk_dump_failure(p_hwfn,
4383 offset += num_reg_entries * entry_dump_size;
4384 (*num_failing_rules) += num_reg_entries;
4388 /* Go over all register entries (number of entries is the same
4389 * for all condition registers).
4391 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4392 u32 next_reg_offset = 0;
4394 /* Read current entry of all condition registers */
4395 for (reg_id = 0; reg_id < rule->num_cond_regs;
4397 const struct dbg_idle_chk_cond_reg *reg =
4399 u32 padded_entry_size, addr;
4402 /* Find GRC address (if it's a memory, the
4403 * address of the specific entry is calculated).
4405 addr = GET_FIELD(reg->data,
4406 DBG_IDLE_CHK_COND_REG_ADDRESS);
4408 GET_FIELD(reg->data,
4409 DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4410 if (reg->num_entries > 1 ||
4411 reg->start_entry > 0) {
4413 reg->entry_size > 1 ?
4414 roundup_pow_of_two(reg->entry_size) :
4416 addr += (reg->start_entry + entry_id) *
4420 /* Read registers */
4421 if (next_reg_offset + reg->entry_size >=
4422 IDLE_CHK_MAX_ENTRIES_SIZE) {
4424 "idle check registers entry is too large\n");
4429 qed_grc_dump_addr_range(p_hwfn, p_ptt,
4435 SPLIT_TYPE_NONE, 0);
4438 /* Call rule condition function.
4439 * If returns true, it's a failure.
4441 if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4443 offset += qed_idle_chk_dump_failure(p_hwfn,
4451 (*num_failing_rules)++;
4459 /* Performs Idle Check Dump to the specified buffer.
4460 * Returns the dumped size in dwords.
4462 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4463 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4465 u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4466 u32 num_failing_rules = 0;
4468 /* Dump global params */
4469 offset += qed_dump_common_global_params(p_hwfn,
4471 dump_buf + offset, dump, 1);
4472 offset += qed_dump_str_param(dump_buf + offset,
4473 dump, "dump-type", "idle-chk");
4475 /* Dump idle check section header with a single parameter */
4476 offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4477 num_failing_rules_offset = offset;
4478 offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4480 while (input_offset <
4481 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4482 const struct dbg_idle_chk_cond_hdr *cond_hdr =
4483 (const struct dbg_idle_chk_cond_hdr *)
4484 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4486 bool eval_mode, mode_match = true;
4487 u32 curr_failing_rules;
4488 u16 modes_buf_offset;
4491 eval_mode = GET_FIELD(cond_hdr->mode.data,
4492 DBG_MODE_HDR_EVAL_MODE) > 0;
4495 GET_FIELD(cond_hdr->mode.data,
4496 DBG_MODE_HDR_MODES_BUF_OFFSET);
4497 mode_match = qed_is_mode_match(p_hwfn,
4503 qed_idle_chk_dump_rule_entries(p_hwfn,
4507 (const struct dbg_idle_chk_rule *)
4508 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4510 cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4511 &curr_failing_rules);
4512 num_failing_rules += curr_failing_rules;
4515 input_offset += cond_hdr->data_size;
4518 /* Overwrite num_rules parameter */
4520 qed_dump_num_param(dump_buf + num_failing_rules_offset,
4521 dump, "num_rules", num_failing_rules);
4523 /* Dump last section */
4524 offset += qed_dump_last_section(dump_buf, offset, dump);
4529 /* Finds the meta data image in NVRAM */
4530 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4531 struct qed_ptt *p_ptt,
4533 u32 *nvram_offset_bytes,
4534 u32 *nvram_size_bytes)
4536 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4537 struct mcp_file_att file_att;
4540 /* Call NVRAM get file command */
4541 nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4543 DRV_MSG_CODE_NVM_GET_FILE_ATT,
4547 &ret_txn_size, (u32 *)&file_att);
4549 /* Check response */
4551 (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4552 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4554 /* Update return values */
4555 *nvram_offset_bytes = file_att.nvm_start_addr;
4556 *nvram_size_bytes = file_att.len;
4560 "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4561 image_type, *nvram_offset_bytes, *nvram_size_bytes);
4563 /* Check alignment */
4564 if (*nvram_size_bytes & 0x3)
4565 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4567 return DBG_STATUS_OK;
4570 /* Reads data from NVRAM */
4571 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4572 struct qed_ptt *p_ptt,
4573 u32 nvram_offset_bytes,
4574 u32 nvram_size_bytes, u32 *ret_buf)
4576 u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4577 s32 bytes_left = nvram_size_bytes;
4578 u32 read_offset = 0;
4582 "nvram_read: reading image of size %d bytes from NVRAM\n",
4588 MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4590 /* Call NVRAM read command */
4591 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4592 DRV_MSG_CODE_NVM_READ_NVRAM,
4593 (nvram_offset_bytes +
4596 DRV_MB_PARAM_NVM_LEN_OFFSET),
4597 &ret_mcp_resp, &ret_mcp_param,
4599 (u32 *)((u8 *)ret_buf + read_offset)))
4600 return DBG_STATUS_NVRAM_READ_FAILED;
4602 /* Check response */
4603 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4604 return DBG_STATUS_NVRAM_READ_FAILED;
4606 /* Update read offset */
4607 read_offset += ret_read_size;
4608 bytes_left -= ret_read_size;
4609 } while (bytes_left > 0);
4611 return DBG_STATUS_OK;
4614 /* Get info on the MCP Trace data in the scratchpad:
4615 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4616 * - trace_data_size (OUT): trace data size in bytes (without the header)
4618 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4619 struct qed_ptt *p_ptt,
4620 u32 *trace_data_grc_addr,
4621 u32 *trace_data_size)
4623 u32 spad_trace_offsize, signature;
4625 /* Read trace section offsize structure from MCP scratchpad */
4626 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4628 /* Extract trace section address from offsize (in scratchpad) */
4629 *trace_data_grc_addr =
4630 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4632 /* Read signature from MCP trace section */
4633 signature = qed_rd(p_hwfn, p_ptt,
4634 *trace_data_grc_addr +
4635 offsetof(struct mcp_trace, signature));
4637 if (signature != MFW_TRACE_SIGNATURE)
4638 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4640 /* Read trace size from MCP trace section */
4641 *trace_data_size = qed_rd(p_hwfn,
4643 *trace_data_grc_addr +
4644 offsetof(struct mcp_trace, size));
4646 return DBG_STATUS_OK;
4649 /* Reads MCP trace meta data image from NVRAM
4650 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4651 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4652 * loaded from file).
4653 * - trace_meta_size (OUT): size in bytes of the trace meta data.
4655 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4656 struct qed_ptt *p_ptt,
4657 u32 trace_data_size_bytes,
4658 u32 *running_bundle_id,
4659 u32 *trace_meta_offset,
4660 u32 *trace_meta_size)
4662 u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4664 /* Read MCP trace section offsize structure from MCP scratchpad */
4665 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4667 /* Find running bundle ID */
4669 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4670 QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4671 *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4672 if (*running_bundle_id > 1)
4673 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4675 /* Find image in NVRAM */
4677 (*running_bundle_id ==
4678 DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4679 return qed_find_nvram_image(p_hwfn,
4682 trace_meta_offset, trace_meta_size);
4685 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4686 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4687 struct qed_ptt *p_ptt,
4688 u32 nvram_offset_in_bytes,
4689 u32 size_in_bytes, u32 *buf)
4691 u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4692 enum dbg_status status;
4695 /* Read meta data from NVRAM */
4696 status = qed_nvram_read(p_hwfn,
4698 nvram_offset_in_bytes, size_in_bytes, buf);
4699 if (status != DBG_STATUS_OK)
4702 /* Extract and check first signature */
4703 signature = qed_read_unaligned_dword(byte_buf);
4704 byte_buf += sizeof(signature);
4705 if (signature != NVM_MAGIC_VALUE)
4706 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4708 /* Extract number of modules */
4709 modules_num = *(byte_buf++);
4711 /* Skip all modules */
4712 for (i = 0; i < modules_num; i++) {
4713 module_len = *(byte_buf++);
4714 byte_buf += module_len;
4717 /* Extract and check second signature */
4718 signature = qed_read_unaligned_dword(byte_buf);
4719 byte_buf += sizeof(signature);
4720 if (signature != NVM_MAGIC_VALUE)
4721 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4723 return DBG_STATUS_OK;
4726 /* Dump MCP Trace */
4727 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4728 struct qed_ptt *p_ptt,
4730 bool dump, u32 *num_dumped_dwords)
4732 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4733 u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4734 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4735 enum dbg_status status;
4739 *num_dumped_dwords = 0;
4741 mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4743 /* Get trace data info */
4744 status = qed_mcp_trace_get_data_info(p_hwfn,
4746 &trace_data_grc_addr,
4747 &trace_data_size_bytes);
4748 if (status != DBG_STATUS_OK)
4751 /* Dump global params */
4752 offset += qed_dump_common_global_params(p_hwfn,
4754 dump_buf + offset, dump, 1);
4755 offset += qed_dump_str_param(dump_buf + offset,
4756 dump, "dump-type", "mcp-trace");
4758 /* Halt MCP while reading from scratchpad so the read data will be
4759 * consistent. if halt fails, MCP trace is taken anyway, with a small
4760 * risk that it may be corrupt.
4762 if (dump && mcp_access) {
4763 halted = !qed_mcp_halt(p_hwfn, p_ptt);
4765 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4768 /* Find trace data size */
4769 trace_data_size_dwords =
4770 DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4773 /* Dump trace data section header and param */
4774 offset += qed_dump_section_hdr(dump_buf + offset,
4775 dump, "mcp_trace_data", 1);
4776 offset += qed_dump_num_param(dump_buf + offset,
4777 dump, "size", trace_data_size_dwords);
4779 /* Read trace data from scratchpad into dump buffer */
4780 offset += qed_grc_dump_addr_range(p_hwfn,
4784 BYTES_TO_DWORDS(trace_data_grc_addr),
4785 trace_data_size_dwords, false,
4786 SPLIT_TYPE_NONE, 0);
4788 /* Resume MCP (only if halt succeeded) */
4789 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4790 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4792 /* Dump trace meta section header */
4793 offset += qed_dump_section_hdr(dump_buf + offset,
4794 dump, "mcp_trace_meta", 1);
4796 /* If MCP Trace meta size parameter was set, use it.
4797 * Otherwise, read trace meta.
4798 * trace_meta_size_bytes is dword-aligned.
4800 trace_meta_size_bytes =
4801 qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4802 if ((!trace_meta_size_bytes || dump) && mcp_access) {
4803 status = qed_mcp_trace_get_meta_info(p_hwfn,
4805 trace_data_size_bytes,
4807 &trace_meta_offset_bytes,
4808 &trace_meta_size_bytes);
4809 if (status == DBG_STATUS_OK)
4810 trace_meta_size_dwords =
4811 BYTES_TO_DWORDS(trace_meta_size_bytes);
4814 /* Dump trace meta size param */
4815 offset += qed_dump_num_param(dump_buf + offset,
4816 dump, "size", trace_meta_size_dwords);
4818 /* Read trace meta image into dump buffer */
4819 if (dump && trace_meta_size_dwords)
4820 status = qed_mcp_trace_read_meta(p_hwfn,
4822 trace_meta_offset_bytes,
4823 trace_meta_size_bytes,
4825 if (status == DBG_STATUS_OK)
4826 offset += trace_meta_size_dwords;
4828 /* Dump last section */
4829 offset += qed_dump_last_section(dump_buf, offset, dump);
4831 *num_dumped_dwords = offset;
4833 /* If no mcp access, indicate that the dump doesn't contain the meta
4836 return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4840 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4841 struct qed_ptt *p_ptt,
4843 bool dump, u32 *num_dumped_dwords)
4845 u32 dwords_read, size_param_offset, offset = 0, addr, len;
4848 *num_dumped_dwords = 0;
4850 /* Dump global params */
4851 offset += qed_dump_common_global_params(p_hwfn,
4853 dump_buf + offset, dump, 1);
4854 offset += qed_dump_str_param(dump_buf + offset,
4855 dump, "dump-type", "reg-fifo");
4857 /* Dump fifo data section header and param. The size param is 0 for
4858 * now, and is overwritten after reading the FIFO.
4860 offset += qed_dump_section_hdr(dump_buf + offset,
4861 dump, "reg_fifo_data", 1);
4862 size_param_offset = offset;
4863 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4866 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4867 * test how much data is available, except for reading it.
4869 offset += REG_FIFO_DEPTH_DWORDS;
4873 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4874 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4876 /* Pull available data from fifo. Use DMAE since this is widebus memory
4877 * and must be accessed atomically. Test for dwords_read not passing
4878 * buffer size since more entries could be added to the buffer as we are
4881 addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4882 len = REG_FIFO_ELEMENT_DWORDS;
4883 for (dwords_read = 0;
4884 fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4885 dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4886 offset += qed_grc_dump_addr_range(p_hwfn,
4892 true, SPLIT_TYPE_NONE,
4894 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4895 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4898 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4901 /* Dump last section */
4902 offset += qed_dump_last_section(dump_buf, offset, dump);
4904 *num_dumped_dwords = offset;
4906 return DBG_STATUS_OK;
4910 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4911 struct qed_ptt *p_ptt,
4913 bool dump, u32 *num_dumped_dwords)
4915 u32 dwords_read, size_param_offset, offset = 0, addr, len;
4918 *num_dumped_dwords = 0;
4920 /* Dump global params */
4921 offset += qed_dump_common_global_params(p_hwfn,
4923 dump_buf + offset, dump, 1);
4924 offset += qed_dump_str_param(dump_buf + offset,
4925 dump, "dump-type", "igu-fifo");
4927 /* Dump fifo data section header and param. The size param is 0 for
4928 * now, and is overwritten after reading the FIFO.
4930 offset += qed_dump_section_hdr(dump_buf + offset,
4931 dump, "igu_fifo_data", 1);
4932 size_param_offset = offset;
4933 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4936 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4937 * test how much data is available, except for reading it.
4939 offset += IGU_FIFO_DEPTH_DWORDS;
4943 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4944 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4946 /* Pull available data from fifo. Use DMAE since this is widebus memory
4947 * and must be accessed atomically. Test for dwords_read not passing
4948 * buffer size since more entries could be added to the buffer as we are
4951 addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4952 len = IGU_FIFO_ELEMENT_DWORDS;
4953 for (dwords_read = 0;
4954 fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4955 dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4956 offset += qed_grc_dump_addr_range(p_hwfn,
4962 true, SPLIT_TYPE_NONE,
4964 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4965 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4968 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4971 /* Dump last section */
4972 offset += qed_dump_last_section(dump_buf, offset, dump);
4974 *num_dumped_dwords = offset;
4976 return DBG_STATUS_OK;
4979 /* Protection Override dump */
4980 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4981 struct qed_ptt *p_ptt,
4984 u32 *num_dumped_dwords)
4986 u32 size_param_offset, override_window_dwords, offset = 0, addr;
4988 *num_dumped_dwords = 0;
4990 /* Dump global params */
4991 offset += qed_dump_common_global_params(p_hwfn,
4993 dump_buf + offset, dump, 1);
4994 offset += qed_dump_str_param(dump_buf + offset,
4995 dump, "dump-type", "protection-override");
4997 /* Dump data section header and param. The size param is 0 for now,
4998 * and is overwritten after reading the data.
5000 offset += qed_dump_section_hdr(dump_buf + offset,
5001 dump, "protection_override_data", 1);
5002 size_param_offset = offset;
5003 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
5006 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
5010 /* Add override window info to buffer */
5011 override_window_dwords =
5012 qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
5013 PROTECTION_OVERRIDE_ELEMENT_DWORDS;
5014 if (override_window_dwords) {
5015 addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
5016 offset += qed_grc_dump_addr_range(p_hwfn,
5021 override_window_dwords,
5022 true, SPLIT_TYPE_NONE, 0);
5023 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
5024 override_window_dwords);
5027 /* Dump last section */
5028 offset += qed_dump_last_section(dump_buf, offset, dump);
5030 *num_dumped_dwords = offset;
5032 return DBG_STATUS_OK;
5035 /* Performs FW Asserts Dump to the specified buffer.
5036 * Returns the dumped size in dwords.
5038 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5039 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
5041 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5042 struct fw_asserts_ram_section *asserts;
5043 char storm_letter_str[2] = "?";
5044 struct fw_info fw_info;
5048 /* Dump global params */
5049 offset += qed_dump_common_global_params(p_hwfn,
5051 dump_buf + offset, dump, 1);
5052 offset += qed_dump_str_param(dump_buf + offset,
5053 dump, "dump-type", "fw-asserts");
5055 /* Find Storm dump size */
5056 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5057 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
5058 struct storm_defs *storm = &s_storm_defs[storm_id];
5059 u32 last_list_idx, addr;
5061 if (dev_data->block_in_reset[storm->block_id])
5064 /* Read FW info for the current Storm */
5065 qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
5067 asserts = &fw_info.fw_asserts_section;
5069 /* Dump FW Asserts section header and params */
5070 storm_letter_str[0] = storm->letter;
5071 offset += qed_dump_section_hdr(dump_buf + offset,
5072 dump, "fw_asserts", 2);
5073 offset += qed_dump_str_param(dump_buf + offset,
5074 dump, "storm", storm_letter_str);
5075 offset += qed_dump_num_param(dump_buf + offset,
5078 asserts->list_element_dword_size);
5080 /* Read and dump FW Asserts data */
5082 offset += asserts->list_element_dword_size;
5086 fw_asserts_section_addr = storm->sem_fast_mem_addr +
5087 SEM_FAST_REG_INT_RAM +
5088 RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
5089 next_list_idx_addr = fw_asserts_section_addr +
5090 DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
5091 next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
5092 last_list_idx = (next_list_idx > 0 ?
5094 asserts->list_num_elements) - 1;
5095 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
5096 asserts->list_dword_offset +
5097 last_list_idx * asserts->list_element_dword_size;
5099 qed_grc_dump_addr_range(p_hwfn, p_ptt,
5102 asserts->list_element_dword_size,
5103 false, SPLIT_TYPE_NONE, 0);
5106 /* Dump last section */
5107 offset += qed_dump_last_section(dump_buf, offset, dump);
5112 /* Dumps the specified ILT pages to the specified buffer.
5113 * Returns the dumped size in dwords.
5115 static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
5119 struct phys_mem_desc *ilt_pages,
5122 u32 page_id, end_page_id, offset = 0;
5127 end_page_id = start_page_id + num_pages - 1;
5129 for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
5130 struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
5134 * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
5138 if (!ilt_pages[page_id].virt_addr)
5141 if (dump_page_ids) {
5142 /* Copy page ID to dump buffer */
5144 *(dump_buf + offset) = page_id;
5147 /* Copy page memory to dump buffer */
5149 memcpy(dump_buf + offset,
5150 mem_desc->virt_addr, mem_desc->size);
5151 offset += BYTES_TO_DWORDS(mem_desc->size);
5158 /* Dumps a section containing the dumped ILT pages.
5159 * Returns the dumped size in dwords.
5161 static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
5164 u32 valid_conn_pf_pages,
5165 u32 valid_conn_vf_pages,
5166 struct phys_mem_desc *ilt_pages,
5169 struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
5170 u32 pf_start_line, start_page_id, offset = 0;
5171 u32 cdut_pf_init_pages, cdut_vf_init_pages;
5172 u32 cdut_pf_work_pages, cdut_vf_work_pages;
5173 u32 base_data_offset, size_param_offset;
5174 u32 cdut_pf_pages, cdut_vf_pages;
5175 const char *section_name;
5178 section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
5179 cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
5180 cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
5181 cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
5182 cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
5183 cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
5184 cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
5185 pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
5188 qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
5190 /* Dump size parameter (0 for now, overwritten with real size later) */
5191 size_param_offset = offset;
5192 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
5193 base_data_offset = offset;
5195 /* CDUC pages are ordered as follows:
5196 * - PF pages - valid section (included in PF connection type mapping)
5197 * - PF pages - invalid section (not dumped)
5198 * - For each VF in the PF:
5199 * - VF pages - valid section (included in VF connection type mapping)
5200 * - VF pages - invalid section (not dumped)
5202 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
5203 /* Dump connection PF pages */
5204 start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
5205 offset += qed_ilt_dump_pages_range(dump_buf + offset,
5208 valid_conn_pf_pages,
5209 ilt_pages, dump_page_ids);
5211 /* Dump connection VF pages */
5212 start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
5213 for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
5214 i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
5215 offset += qed_ilt_dump_pages_range(dump_buf + offset,
5218 valid_conn_vf_pages,
5223 /* CDUT pages are ordered as follows:
5224 * - PF init pages (not dumped)
5226 * - For each VF in the PF:
5227 * - VF init pages (not dumped)
5230 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
5231 /* Dump task PF pages */
5232 start_page_id = clients[ILT_CLI_CDUT].first.val +
5233 cdut_pf_init_pages - pf_start_line;
5234 offset += qed_ilt_dump_pages_range(dump_buf + offset,
5238 ilt_pages, dump_page_ids);
5240 /* Dump task VF pages */
5241 start_page_id = clients[ILT_CLI_CDUT].first.val +
5242 cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
5243 for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
5244 i++, start_page_id += cdut_vf_pages)
5245 offset += qed_ilt_dump_pages_range(dump_buf + offset,
5253 /* Overwrite size param */
5255 qed_dump_num_param(dump_buf + size_param_offset,
5256 dump, "size", offset - base_data_offset);
5261 /* Performs ILT Dump to the specified buffer.
5262 * Returns the dumped size in dwords.
5264 static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
5265 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
5267 struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
5268 u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
5269 u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
5270 u32 num_cids_per_page, conn_ctx_size;
5271 u32 cduc_page_size, cdut_page_size;
5272 struct phys_mem_desc *ilt_pages;
5275 cduc_page_size = 1 <<
5276 (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
5277 cdut_page_size = 1 <<
5278 (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
5279 conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
5280 num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
5281 ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
5283 /* Dump global params - 22 must match number of params below */
5284 offset += qed_dump_common_global_params(p_hwfn, p_ptt,
5285 dump_buf + offset, dump, 22);
5286 offset += qed_dump_str_param(dump_buf + offset,
5287 dump, "dump-type", "ilt-dump");
5288 offset += qed_dump_num_param(dump_buf + offset,
5290 "cduc-page-size", cduc_page_size);
5291 offset += qed_dump_num_param(dump_buf + offset,
5293 "cduc-first-page-id",
5294 clients[ILT_CLI_CDUC].first.val);
5295 offset += qed_dump_num_param(dump_buf + offset,
5297 "cduc-last-page-id",
5298 clients[ILT_CLI_CDUC].last.val);
5299 offset += qed_dump_num_param(dump_buf + offset,
5301 "cduc-num-pf-pages",
5303 [ILT_CLI_CDUC].pf_total_lines);
5304 offset += qed_dump_num_param(dump_buf + offset,
5306 "cduc-num-vf-pages",
5308 [ILT_CLI_CDUC].vf_total_lines);
5309 offset += qed_dump_num_param(dump_buf + offset,
5311 "max-conn-ctx-size",
5313 offset += qed_dump_num_param(dump_buf + offset,
5315 "cdut-page-size", cdut_page_size);
5316 offset += qed_dump_num_param(dump_buf + offset,
5318 "cdut-first-page-id",
5319 clients[ILT_CLI_CDUT].first.val);
5320 offset += qed_dump_num_param(dump_buf + offset,
5322 "cdut-last-page-id",
5323 clients[ILT_CLI_CDUT].last.val);
5324 offset += qed_dump_num_param(dump_buf + offset,
5326 "cdut-num-pf-init-pages",
5327 qed_get_cdut_num_pf_init_pages(p_hwfn));
5328 offset += qed_dump_num_param(dump_buf + offset,
5330 "cdut-num-vf-init-pages",
5331 qed_get_cdut_num_vf_init_pages(p_hwfn));
5332 offset += qed_dump_num_param(dump_buf + offset,
5334 "cdut-num-pf-work-pages",
5335 qed_get_cdut_num_pf_work_pages(p_hwfn));
5336 offset += qed_dump_num_param(dump_buf + offset,
5338 "cdut-num-vf-work-pages",
5339 qed_get_cdut_num_vf_work_pages(p_hwfn));
5340 offset += qed_dump_num_param(dump_buf + offset,
5342 "max-task-ctx-size",
5343 p_hwfn->p_cxt_mngr->task_ctx_size);
5344 offset += qed_dump_num_param(dump_buf + offset,
5347 p_hwfn->p_cxt_mngr->task_type_id);
5348 offset += qed_dump_num_param(dump_buf + offset,
5350 "first-vf-id-in-pf",
5351 p_hwfn->p_cxt_mngr->first_vf_in_pf);
5352 offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
5355 p_hwfn->p_cxt_mngr->vf_count);
5356 offset += qed_dump_num_param(dump_buf + offset,
5358 "ptr-size-bytes", sizeof(void *));
5359 offset += qed_dump_num_param(dump_buf + offset,
5362 p_hwfn->p_cxt_mngr->pf_start_line);
5363 offset += qed_dump_num_param(dump_buf + offset,
5365 "page-mem-desc-size-dwords",
5366 PAGE_MEM_DESC_SIZE_DWORDS);
5367 offset += qed_dump_num_param(dump_buf + offset,
5370 p_hwfn->p_cxt_mngr->ilt_shadow_size);
5371 /* Additional/Less parameters require matching of number in call to
5372 * dump_common_global_params()
5375 /* Dump section containing number of PF CIDs per connection type */
5376 offset += qed_dump_section_hdr(dump_buf + offset,
5377 dump, "num_pf_cids_per_conn_type", 1);
5378 offset += qed_dump_num_param(dump_buf + offset,
5379 dump, "size", NUM_OF_CONNECTION_TYPES_E4);
5380 for (conn_type = 0, valid_conn_pf_cids = 0;
5381 conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
5383 p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
5386 *(dump_buf + offset) = num_pf_cids;
5387 valid_conn_pf_cids += num_pf_cids;
5390 /* Dump section containing number of VF CIDs per connection type */
5391 offset += qed_dump_section_hdr(dump_buf + offset,
5392 dump, "num_vf_cids_per_conn_type", 1);
5393 offset += qed_dump_num_param(dump_buf + offset,
5394 dump, "size", NUM_OF_CONNECTION_TYPES_E4);
5395 for (conn_type = 0, valid_conn_vf_cids = 0;
5396 conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
5398 p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
5401 *(dump_buf + offset) = num_vf_cids;
5402 valid_conn_vf_cids += num_vf_cids;
5405 /* Dump section containing physical memory descs for each ILT page */
5406 num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
5407 offset += qed_dump_section_hdr(dump_buf + offset,
5408 dump, "ilt_page_desc", 1);
5409 offset += qed_dump_num_param(dump_buf + offset,
5412 num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
5414 /* Copy memory descriptors to dump buffer */
5418 for (page_id = 0; page_id < num_pages;
5419 page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
5420 memcpy(dump_buf + offset,
5421 &ilt_pages[page_id],
5422 DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
5424 offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
5427 valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
5429 valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
5432 /* Dump ILT pages IDs */
5433 offset += qed_ilt_dump_pages_section(p_hwfn,
5436 valid_conn_pf_pages,
5437 valid_conn_vf_pages,
5440 /* Dump ILT pages memory */
5441 offset += qed_ilt_dump_pages_section(p_hwfn,
5444 valid_conn_pf_pages,
5445 valid_conn_vf_pages,
5448 /* Dump last section */
5449 offset += qed_dump_last_section(dump_buf, offset, dump);
5454 /***************************** Public Functions *******************************/
5456 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
5458 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5461 /* convert binary data to debug arrays */
5462 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5463 s_dbg_arrays[buf_id].ptr =
5464 (u32 *)(bin_ptr + buf_array[buf_id].offset);
5465 s_dbg_arrays[buf_id].size_in_dwords =
5466 BYTES_TO_DWORDS(buf_array[buf_id].length);
5469 return DBG_STATUS_OK;
5472 bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
5473 struct qed_ptt *p_ptt, struct fw_info *fw_info)
5475 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5478 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5479 struct storm_defs *storm = &s_storm_defs[storm_id];
5481 /* Skip Storm if it's in reset */
5482 if (dev_data->block_in_reset[storm->block_id])
5485 /* Read FW info for the current Storm */
5486 qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
5494 enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
5495 struct qed_ptt *p_ptt,
5496 enum dbg_grc_params grc_param, u32 val)
5498 enum dbg_status status;
5501 DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
5502 "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
5504 status = qed_dbg_dev_init(p_hwfn, p_ptt);
5505 if (status != DBG_STATUS_OK)
5508 /* Initializes the GRC parameters (if not initialized). Needed in order
5509 * to set the default parameter values for the first time.
5511 qed_dbg_grc_init_params(p_hwfn);
5513 if (grc_param >= MAX_DBG_GRC_PARAMS)
5514 return DBG_STATUS_INVALID_ARGS;
5515 if (val < s_grc_param_defs[grc_param].min ||
5516 val > s_grc_param_defs[grc_param].max)
5517 return DBG_STATUS_INVALID_ARGS;
5519 if (s_grc_param_defs[grc_param].is_preset) {
5522 /* Disabling a preset is not allowed. Call
5523 * dbg_grc_set_params_default instead.
5526 return DBG_STATUS_INVALID_ARGS;
5528 /* Update all params with the preset values */
5529 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
5532 /* Skip persistent params */
5533 if (s_grc_param_defs[i].is_persistent)
5536 /* Find preset value */
5537 if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
5539 s_grc_param_defs[i].exclude_all_preset_val;
5540 else if (grc_param == DBG_GRC_PARAM_CRASH)
5542 s_grc_param_defs[i].crash_preset_val;
5544 return DBG_STATUS_INVALID_ARGS;
5546 qed_grc_set_param(p_hwfn,
5547 (enum dbg_grc_params)i, preset_val);
5550 /* Regular param - set its value */
5551 qed_grc_set_param(p_hwfn, grc_param, val);
5554 return DBG_STATUS_OK;
5557 /* Assign default GRC param values */
5558 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5560 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5563 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5564 if (!s_grc_param_defs[i].is_persistent)
5565 dev_data->grc.param_val[i] =
5566 s_grc_param_defs[i].default_val[dev_data->chip_id];
5569 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5570 struct qed_ptt *p_ptt,
5573 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5577 if (status != DBG_STATUS_OK)
5580 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5581 !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5582 !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5583 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5584 !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5585 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5587 return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5590 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5591 struct qed_ptt *p_ptt,
5593 u32 buf_size_in_dwords,
5594 u32 *num_dumped_dwords)
5596 u32 needed_buf_size_in_dwords;
5597 enum dbg_status status;
5599 *num_dumped_dwords = 0;
5601 status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5603 &needed_buf_size_in_dwords);
5604 if (status != DBG_STATUS_OK)
5607 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5608 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5611 status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5613 /* Revert GRC params to their default */
5614 qed_dbg_grc_set_params_default(p_hwfn);
5619 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5620 struct qed_ptt *p_ptt,
5623 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5624 struct idle_chk_data *idle_chk;
5625 enum dbg_status status;
5627 idle_chk = &dev_data->idle_chk;
5630 status = qed_dbg_dev_init(p_hwfn, p_ptt);
5631 if (status != DBG_STATUS_OK)
5634 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5635 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5636 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5637 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5638 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5640 if (!idle_chk->buf_size_set) {
5641 idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5642 p_ptt, NULL, false);
5643 idle_chk->buf_size_set = true;
5646 *buf_size = idle_chk->buf_size;
5648 return DBG_STATUS_OK;
5651 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5652 struct qed_ptt *p_ptt,
5654 u32 buf_size_in_dwords,
5655 u32 *num_dumped_dwords)
5657 u32 needed_buf_size_in_dwords;
5658 enum dbg_status status;
5660 *num_dumped_dwords = 0;
5662 status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5664 &needed_buf_size_in_dwords);
5665 if (status != DBG_STATUS_OK)
5668 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5669 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5671 /* Update reset state */
5672 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5674 /* Idle Check Dump */
5675 *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5677 /* Revert GRC params to their default */
5678 qed_dbg_grc_set_params_default(p_hwfn);
5680 return DBG_STATUS_OK;
5683 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5684 struct qed_ptt *p_ptt,
5687 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5691 if (status != DBG_STATUS_OK)
5694 return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5697 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5698 struct qed_ptt *p_ptt,
5700 u32 buf_size_in_dwords,
5701 u32 *num_dumped_dwords)
5703 u32 needed_buf_size_in_dwords;
5704 enum dbg_status status;
5707 qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5709 &needed_buf_size_in_dwords);
5710 if (status != DBG_STATUS_OK && status !=
5711 DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5714 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5715 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5717 /* Update reset state */
5718 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5721 status = qed_mcp_trace_dump(p_hwfn,
5722 p_ptt, dump_buf, true, num_dumped_dwords);
5724 /* Revert GRC params to their default */
5725 qed_dbg_grc_set_params_default(p_hwfn);
5730 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5731 struct qed_ptt *p_ptt,
5734 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5738 if (status != DBG_STATUS_OK)
5741 return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5744 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5745 struct qed_ptt *p_ptt,
5747 u32 buf_size_in_dwords,
5748 u32 *num_dumped_dwords)
5750 u32 needed_buf_size_in_dwords;
5751 enum dbg_status status;
5753 *num_dumped_dwords = 0;
5755 status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5757 &needed_buf_size_in_dwords);
5758 if (status != DBG_STATUS_OK)
5761 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5762 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5764 /* Update reset state */
5765 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5767 status = qed_reg_fifo_dump(p_hwfn,
5768 p_ptt, dump_buf, true, num_dumped_dwords);
5770 /* Revert GRC params to their default */
5771 qed_dbg_grc_set_params_default(p_hwfn);
5776 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5777 struct qed_ptt *p_ptt,
5780 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5784 if (status != DBG_STATUS_OK)
5787 return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5790 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5791 struct qed_ptt *p_ptt,
5793 u32 buf_size_in_dwords,
5794 u32 *num_dumped_dwords)
5796 u32 needed_buf_size_in_dwords;
5797 enum dbg_status status;
5799 *num_dumped_dwords = 0;
5801 status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5803 &needed_buf_size_in_dwords);
5804 if (status != DBG_STATUS_OK)
5807 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5808 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5810 /* Update reset state */
5811 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5813 status = qed_igu_fifo_dump(p_hwfn,
5814 p_ptt, dump_buf, true, num_dumped_dwords);
5815 /* Revert GRC params to their default */
5816 qed_dbg_grc_set_params_default(p_hwfn);
5822 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5823 struct qed_ptt *p_ptt,
5826 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5830 if (status != DBG_STATUS_OK)
5833 return qed_protection_override_dump(p_hwfn,
5834 p_ptt, NULL, false, buf_size);
5837 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5838 struct qed_ptt *p_ptt,
5840 u32 buf_size_in_dwords,
5841 u32 *num_dumped_dwords)
5843 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5844 enum dbg_status status;
5846 *num_dumped_dwords = 0;
5849 qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5852 if (status != DBG_STATUS_OK)
5855 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5856 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5858 /* Update reset state */
5859 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5861 status = qed_protection_override_dump(p_hwfn,
5864 true, num_dumped_dwords);
5866 /* Revert GRC params to their default */
5867 qed_dbg_grc_set_params_default(p_hwfn);
5872 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5873 struct qed_ptt *p_ptt,
5876 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5880 if (status != DBG_STATUS_OK)
5883 /* Update reset state */
5884 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5886 *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5888 return DBG_STATUS_OK;
5891 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5892 struct qed_ptt *p_ptt,
5894 u32 buf_size_in_dwords,
5895 u32 *num_dumped_dwords)
5897 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5898 enum dbg_status status;
5900 *num_dumped_dwords = 0;
5903 qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5906 if (status != DBG_STATUS_OK)
5909 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5910 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5912 *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5914 /* Revert GRC params to their default */
5915 qed_dbg_grc_set_params_default(p_hwfn);
5917 return DBG_STATUS_OK;
5920 static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5921 struct qed_ptt *p_ptt,
5924 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5928 if (status != DBG_STATUS_OK)
5931 *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
5933 return DBG_STATUS_OK;
5936 static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
5937 struct qed_ptt *p_ptt,
5939 u32 buf_size_in_dwords,
5940 u32 *num_dumped_dwords)
5942 u32 needed_buf_size_in_dwords;
5943 enum dbg_status status;
5945 *num_dumped_dwords = 0;
5947 status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
5949 &needed_buf_size_in_dwords);
5950 if (status != DBG_STATUS_OK)
5953 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5954 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5956 *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
5958 /* Reveret GRC params to their default */
5959 qed_dbg_grc_set_params_default(p_hwfn);
5961 return DBG_STATUS_OK;
5964 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5965 struct qed_ptt *p_ptt,
5966 enum block_id block_id,
5967 enum dbg_attn_type attn_type,
5969 struct dbg_attn_block_result *results)
5971 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5972 u8 reg_idx, num_attn_regs, num_result_regs = 0;
5973 const struct dbg_attn_reg *attn_reg_arr;
5975 if (status != DBG_STATUS_OK)
5978 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5979 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5980 !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5981 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5983 attn_reg_arr = qed_get_block_attn_regs(block_id,
5984 attn_type, &num_attn_regs);
5986 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5987 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5988 struct dbg_attn_reg_result *reg_result;
5989 u32 sts_addr, sts_val;
5990 u16 modes_buf_offset;
5994 eval_mode = GET_FIELD(reg_data->mode.data,
5995 DBG_MODE_HDR_EVAL_MODE) > 0;
5996 modes_buf_offset = GET_FIELD(reg_data->mode.data,
5997 DBG_MODE_HDR_MODES_BUF_OFFSET);
5998 if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
6001 /* Mode match - read attention status register */
6002 sts_addr = DWORDS_TO_BYTES(clear_status ?
6003 reg_data->sts_clr_address :
6004 GET_FIELD(reg_data->data,
6005 DBG_ATTN_REG_STS_ADDRESS));
6006 sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
6010 /* Non-zero attention status - add to results */
6011 reg_result = &results->reg_results[num_result_regs];
6012 SET_FIELD(reg_result->data,
6013 DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
6014 SET_FIELD(reg_result->data,
6015 DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
6016 GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
6017 reg_result->block_attn_offset = reg_data->block_attn_offset;
6018 reg_result->sts_val = sts_val;
6019 reg_result->mask_val = qed_rd(p_hwfn,
6022 (reg_data->mask_address));
6026 results->block_id = (u8)block_id;
6027 results->names_offset =
6028 qed_get_block_attn_data(block_id, attn_type)->names_offset;
6029 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
6030 SET_FIELD(results->data,
6031 DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
6033 return DBG_STATUS_OK;
6036 /******************************* Data Types **********************************/
6043 /* REG fifo element */
6044 struct reg_fifo_element {
6046 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
6047 #define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
6048 #define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
6049 #define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
6050 #define REG_FIFO_ELEMENT_PF_SHIFT 24
6051 #define REG_FIFO_ELEMENT_PF_MASK 0xf
6052 #define REG_FIFO_ELEMENT_VF_SHIFT 28
6053 #define REG_FIFO_ELEMENT_VF_MASK 0xff
6054 #define REG_FIFO_ELEMENT_PORT_SHIFT 36
6055 #define REG_FIFO_ELEMENT_PORT_MASK 0x3
6056 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
6057 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
6058 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
6059 #define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
6060 #define REG_FIFO_ELEMENT_MASTER_SHIFT 43
6061 #define REG_FIFO_ELEMENT_MASTER_MASK 0xf
6062 #define REG_FIFO_ELEMENT_ERROR_SHIFT 47
6063 #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
6066 /* IGU fifo element */
6067 struct igu_fifo_element {
6069 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
6070 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
6071 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
6072 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
6073 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
6074 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
6075 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
6076 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
6077 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
6078 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
6081 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
6082 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
6083 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
6084 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
6088 struct igu_fifo_wr_data {
6090 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
6091 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
6092 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
6093 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
6094 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
6095 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
6096 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
6097 #define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
6098 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
6099 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
6100 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
6101 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
6104 struct igu_fifo_cleanup_wr_data {
6106 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
6107 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
6108 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
6109 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
6110 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
6111 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
6112 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
6113 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
6116 /* Protection override element */
6117 struct protection_override_element {
6119 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
6120 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
6121 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
6122 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
6123 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
6124 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
6125 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
6126 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
6127 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
6128 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
6129 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
6130 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
6133 enum igu_fifo_sources {
6147 enum igu_fifo_addr_types {
6148 IGU_ADDR_TYPE_MSIX_MEM,
6149 IGU_ADDR_TYPE_WRITE_PBA,
6150 IGU_ADDR_TYPE_WRITE_INT_ACK,
6151 IGU_ADDR_TYPE_WRITE_ATTN_BITS,
6152 IGU_ADDR_TYPE_READ_INT,
6153 IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
6154 IGU_ADDR_TYPE_RESERVED
6157 struct igu_fifo_addr_data {
6162 enum igu_fifo_addr_types type;
6165 struct mcp_trace_meta {
6169 struct mcp_trace_format *formats;
6173 /* Debug Tools user data */
6174 struct dbg_tools_user_data {
6175 struct mcp_trace_meta mcp_trace_meta;
6176 const u32 *mcp_trace_user_meta_buf;
6179 /******************************** Constants **********************************/
6181 #define MAX_MSG_LEN 1024
6183 #define MCP_TRACE_MAX_MODULE_LEN 8
6184 #define MCP_TRACE_FORMAT_MAX_PARAMS 3
6185 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
6186 (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
6188 #define REG_FIFO_ELEMENT_ADDR_FACTOR 4
6189 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
6191 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
6193 /***************************** Constant Arrays *******************************/
6195 struct user_dbg_array {
6201 static struct user_dbg_array
6202 s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
6204 /* Block names array */
6205 static struct block_info s_block_info_arr[] = {
6207 {"miscs", BLOCK_MISCS},
6208 {"misc", BLOCK_MISC},
6210 {"pglue_b", BLOCK_PGLUE_B},
6211 {"cnig", BLOCK_CNIG},
6212 {"cpmu", BLOCK_CPMU},
6213 {"ncsi", BLOCK_NCSI},
6214 {"opte", BLOCK_OPTE},
6216 {"pcie", BLOCK_PCIE},
6218 {"mcp2", BLOCK_MCP2},
6219 {"pswhst", BLOCK_PSWHST},
6220 {"pswhst2", BLOCK_PSWHST2},
6221 {"pswrd", BLOCK_PSWRD},
6222 {"pswrd2", BLOCK_PSWRD2},
6223 {"pswwr", BLOCK_PSWWR},
6224 {"pswwr2", BLOCK_PSWWR2},
6225 {"pswrq", BLOCK_PSWRQ},
6226 {"pswrq2", BLOCK_PSWRQ2},
6227 {"pglcs", BLOCK_PGLCS},
6229 {"dmae", BLOCK_DMAE},
6238 {"dorq", BLOCK_DORQ},
6242 {"tsdm", BLOCK_TSDM},
6243 {"msdm", BLOCK_MSDM},
6244 {"usdm", BLOCK_USDM},
6245 {"xsdm", BLOCK_XSDM},
6246 {"ysdm", BLOCK_YSDM},
6247 {"psdm", BLOCK_PSDM},
6248 {"tsem", BLOCK_TSEM},
6249 {"msem", BLOCK_MSEM},
6250 {"usem", BLOCK_USEM},
6251 {"xsem", BLOCK_XSEM},
6252 {"ysem", BLOCK_YSEM},
6253 {"psem", BLOCK_PSEM},
6255 {"tmld", BLOCK_TMLD},
6256 {"muld", BLOCK_MULD},
6257 {"yuld", BLOCK_YULD},
6258 {"xyld", BLOCK_XYLD},
6259 {"ptld", BLOCK_PTLD},
6260 {"ypld", BLOCK_YPLD},
6262 {"pbf_pb1", BLOCK_PBF_PB1},
6263 {"pbf_pb2", BLOCK_PBF_PB2},
6267 {"rdif", BLOCK_RDIF},
6268 {"tdif", BLOCK_TDIF},
6270 {"ccfc", BLOCK_CCFC},
6271 {"tcfc", BLOCK_TCFC},
6274 {"rgfs", BLOCK_RGFS},
6275 {"rgsrc", BLOCK_RGSRC},
6276 {"tgfs", BLOCK_TGFS},
6277 {"tgsrc", BLOCK_TGSRC},
6278 {"umac", BLOCK_UMAC},
6279 {"xmac", BLOCK_XMAC},
6283 {"bmbn", BLOCK_BMBN},
6288 {"phy_pcie", BLOCK_PHY_PCIE},
6290 {"avs_wrap", BLOCK_AVS_WRAP},
6291 {"pxpreqbus", BLOCK_PXPREQBUS},
6292 {"misc_aeu", BLOCK_MISC_AEU},
6293 {"bar0_map", BLOCK_BAR0_MAP}
6296 /* Status string array */
6297 static const char * const s_status_str[] = {
6299 "Operation completed successfully",
6301 /* DBG_STATUS_APP_VERSION_NOT_SET */
6302 "Debug application version wasn't set",
6304 /* DBG_STATUS_UNSUPPORTED_APP_VERSION */
6305 "Unsupported debug application version",
6307 /* DBG_STATUS_DBG_BLOCK_NOT_RESET */
6308 "The debug block wasn't reset since the last recording",
6310 /* DBG_STATUS_INVALID_ARGS */
6311 "Invalid arguments",
6313 /* DBG_STATUS_OUTPUT_ALREADY_SET */
6314 "The debug output was already set",
6316 /* DBG_STATUS_INVALID_PCI_BUF_SIZE */
6317 "Invalid PCI buffer size",
6319 /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
6320 "PCI buffer allocation failed",
6322 /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
6323 "A PCI buffer wasn't allocated",
6325 /* DBG_STATUS_TOO_MANY_INPUTS */
6326 "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
6328 /* DBG_STATUS_INPUT_OVERLAP */
6329 "Overlapping debug bus inputs",
6331 /* DBG_STATUS_HW_ONLY_RECORDING */
6332 "Cannot record Storm data since the entire recording cycle is used by HW",
6334 /* DBG_STATUS_STORM_ALREADY_ENABLED */
6335 "The Storm was already enabled",
6337 /* DBG_STATUS_STORM_NOT_ENABLED */
6338 "The specified Storm wasn't enabled",
6340 /* DBG_STATUS_BLOCK_ALREADY_ENABLED */
6341 "The block was already enabled",
6343 /* DBG_STATUS_BLOCK_NOT_ENABLED */
6344 "The specified block wasn't enabled",
6346 /* DBG_STATUS_NO_INPUT_ENABLED */
6347 "No input was enabled for recording",
6349 /* DBG_STATUS_NO_FILTER_TRIGGER_64B */
6350 "Filters and triggers are not allowed when recording in 64b units",
6352 /* DBG_STATUS_FILTER_ALREADY_ENABLED */
6353 "The filter was already enabled",
6355 /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
6356 "The trigger was already enabled",
6358 /* DBG_STATUS_TRIGGER_NOT_ENABLED */
6359 "The trigger wasn't enabled",
6361 /* DBG_STATUS_CANT_ADD_CONSTRAINT */
6362 "A constraint can be added only after a filter was enabled or a trigger state was added",
6364 /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
6365 "Cannot add more than 3 trigger states",
6367 /* DBG_STATUS_TOO_MANY_CONSTRAINTS */
6368 "Cannot add more than 4 constraints per filter or trigger state",
6370 /* DBG_STATUS_RECORDING_NOT_STARTED */
6371 "The recording wasn't started",
6373 /* DBG_STATUS_DATA_DIDNT_TRIGGER */
6374 "A trigger was configured, but it didn't trigger",
6376 /* DBG_STATUS_NO_DATA_RECORDED */
6377 "No data was recorded",
6379 /* DBG_STATUS_DUMP_BUF_TOO_SMALL */
6380 "Dump buffer is too small",
6382 /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
6383 "Dumped data is not aligned to chunks",
6385 /* DBG_STATUS_UNKNOWN_CHIP */
6388 /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
6389 "Failed allocating virtual memory",
6391 /* DBG_STATUS_BLOCK_IN_RESET */
6392 "The input block is in reset",
6394 /* DBG_STATUS_INVALID_TRACE_SIGNATURE */
6395 "Invalid MCP trace signature found in NVRAM",
6397 /* DBG_STATUS_INVALID_NVRAM_BUNDLE */
6398 "Invalid bundle ID found in NVRAM",
6400 /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
6401 "Failed getting NVRAM image",
6403 /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
6404 "NVRAM image is not dword-aligned",
6406 /* DBG_STATUS_NVRAM_READ_FAILED */
6407 "Failed reading from NVRAM",
6409 /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
6410 "Idle check parsing failed",
6412 /* DBG_STATUS_MCP_TRACE_BAD_DATA */
6413 "MCP Trace data is corrupt",
6415 /* DBG_STATUS_MCP_TRACE_NO_META */
6416 "Dump doesn't contain meta data - it must be provided in image file",
6418 /* DBG_STATUS_MCP_COULD_NOT_HALT */
6419 "Failed to halt MCP",
6421 /* DBG_STATUS_MCP_COULD_NOT_RESUME */
6422 "Failed to resume MCP after halt",
6424 /* DBG_STATUS_RESERVED2 */
6425 "Reserved debug status - shouldn't be returned",
6427 /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
6428 "Failed to empty SEMI sync FIFO",
6430 /* DBG_STATUS_IGU_FIFO_BAD_DATA */
6431 "IGU FIFO data is corrupt",
6433 /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
6434 "MCP failed to mask parities",
6436 /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
6437 "FW Asserts parsing failed",
6439 /* DBG_STATUS_REG_FIFO_BAD_DATA */
6440 "GRC FIFO data is corrupt",
6442 /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
6443 "Protection Override data is corrupt",
6445 /* DBG_STATUS_DBG_ARRAY_NOT_SET */
6446 "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
6448 /* DBG_STATUS_FILTER_BUG */
6449 "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
6451 /* DBG_STATUS_NON_MATCHING_LINES */
6452 "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
6454 /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
6455 "The selected trigger dword offset wasn't enabled in the recorded HW block",
6457 /* DBG_STATUS_DBG_BUS_IN_USE */
6458 "The debug bus is in use"
6461 /* Idle check severity names array */
6462 static const char * const s_idle_chk_severity_str[] = {
6464 "Error if no traffic",
6468 /* MCP Trace level names array */
6469 static const char * const s_mcp_trace_level_str[] = {
6475 /* Access type names array */
6476 static const char * const s_access_strs[] = {
6481 /* Privilege type names array */
6482 static const char * const s_privilege_strs[] = {
6489 /* Protection type names array */
6490 static const char * const s_protection_strs[] = {
6501 /* Master type names array */
6502 static const char * const s_master_strs[] = {
6521 /* REG FIFO error messages array */
6522 static const char * const s_reg_fifo_error_strs[] = {
6524 "address doesn't belong to any block",
6525 "reserved address in block or write to read-only address",
6526 "privilege/protection mismatch",
6527 "path isolation error"
6530 /* IGU FIFO sources array */
6531 static const char * const s_igu_fifo_source_strs[] = {
6545 /* IGU FIFO error messages */
6546 static const char * const s_igu_fifo_error_strs[] = {
6549 "function disabled",
6550 "VF sent command to attention address",
6551 "host sent prod update command",
6552 "read of during interrupt register while in MIMD mode",
6553 "access to PXP BAR reserved address",
6554 "producer update command to attention index",
6556 "SB index not valid",
6557 "SB relative index and FID not found",
6559 "command with error flag asserted (PCI error or CAU discard)",
6560 "VF sent cleanup and RF cleanup is disabled",
6561 "cleanup command on type bigger than 4"
6564 /* IGU FIFO address data */
6565 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6566 {0x0, 0x101, "MSI-X Memory", NULL,
6567 IGU_ADDR_TYPE_MSIX_MEM},
6568 {0x102, 0x1ff, "reserved", NULL,
6569 IGU_ADDR_TYPE_RESERVED},
6570 {0x200, 0x200, "Write PBA[0:63]", NULL,
6571 IGU_ADDR_TYPE_WRITE_PBA},
6572 {0x201, 0x201, "Write PBA[64:127]", "reserved",
6573 IGU_ADDR_TYPE_WRITE_PBA},
6574 {0x202, 0x202, "Write PBA[128]", "reserved",
6575 IGU_ADDR_TYPE_WRITE_PBA},
6576 {0x203, 0x3ff, "reserved", NULL,
6577 IGU_ADDR_TYPE_RESERVED},
6578 {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6579 IGU_ADDR_TYPE_WRITE_INT_ACK},
6580 {0x5f0, 0x5f0, "Attention bits update", NULL,
6581 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6582 {0x5f1, 0x5f1, "Attention bits set", NULL,
6583 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6584 {0x5f2, 0x5f2, "Attention bits clear", NULL,
6585 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6586 {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6587 IGU_ADDR_TYPE_READ_INT},
6588 {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6589 IGU_ADDR_TYPE_READ_INT},
6590 {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6591 IGU_ADDR_TYPE_READ_INT},
6592 {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6593 IGU_ADDR_TYPE_READ_INT},
6594 {0x5f7, 0x5ff, "reserved", NULL,
6595 IGU_ADDR_TYPE_RESERVED},
6596 {0x600, 0x7ff, "Producer update", NULL,
6597 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6600 /******************************** Variables **********************************/
6602 /* Temporary buffer, used for print size calculations */
6603 static char s_temp_buf[MAX_MSG_LEN];
6605 /**************************** Private Functions ******************************/
6607 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6609 return (a + b) % size;
6612 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6614 return (size + a - b) % size;
6617 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6618 * bytes) and returns them as a dword value. the specified buffer offset is
6621 static u32 qed_read_from_cyclic_buf(void *buf,
6623 u32 buf_size, u8 num_bytes_to_read)
6625 u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6628 val_ptr = (u8 *)&val;
6630 /* Assume running on a LITTLE ENDIAN and the buffer is network order
6631 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6633 for (i = 0; i < num_bytes_to_read; i++) {
6634 val_ptr[i] = bytes_buf[*offset];
6635 *offset = qed_cyclic_add(*offset, 1, buf_size);
6641 /* Reads and returns the next byte from the specified buffer.
6642 * The specified buffer offset is updated.
6644 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6646 return ((u8 *)buf)[(*offset)++];
6649 /* Reads and returns the next dword from the specified buffer.
6650 * The specified buffer offset is updated.
6652 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6654 u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6661 /* Reads the next string from the specified buffer, and copies it to the
6662 * specified pointer. The specified buffer offset is updated.
6664 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6666 const char *source_str = &((const char *)buf)[*offset];
6668 strncpy(dest, source_str, size);
6669 dest[size - 1] = '\0';
6673 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6674 * If the specified buffer in NULL, a temporary buffer pointer is returned.
6676 static char *qed_get_buf_ptr(void *buf, u32 offset)
6678 return buf ? (char *)buf + offset : s_temp_buf;
6681 /* Reads a param from the specified buffer. Returns the number of dwords read.
6682 * If the returned str_param is NULL, the param is numeric and its value is
6683 * returned in num_param.
6684 * Otheriwise, the param is a string and its pointer is returned in str_param.
6686 static u32 qed_read_param(u32 *dump_buf,
6687 const char **param_name,
6688 const char **param_str_val, u32 *param_num_val)
6690 char *char_buf = (char *)dump_buf;
6693 /* Extract param name */
6694 *param_name = char_buf;
6695 offset += strlen(*param_name) + 1;
6697 /* Check param type */
6698 if (*(char_buf + offset++)) {
6700 *param_str_val = char_buf + offset;
6702 offset += strlen(*param_str_val) + 1;
6704 offset += (4 - (offset & 0x3));
6707 *param_str_val = NULL;
6709 offset += (4 - (offset & 0x3));
6710 *param_num_val = *(u32 *)(char_buf + offset);
6714 return (u32)offset / 4;
6717 /* Reads a section header from the specified buffer.
6718 * Returns the number of dwords read.
6720 static u32 qed_read_section_hdr(u32 *dump_buf,
6721 const char **section_name,
6722 u32 *num_section_params)
6724 const char *param_str_val;
6726 return qed_read_param(dump_buf,
6727 section_name, ¶m_str_val, num_section_params);
6730 /* Reads section params from the specified buffer and prints them to the results
6731 * buffer. Returns the number of dwords read.
6733 static u32 qed_print_section_params(u32 *dump_buf,
6734 u32 num_section_params,
6735 char *results_buf, u32 *num_chars_printed)
6737 u32 i, dump_offset = 0, results_offset = 0;
6739 for (i = 0; i < num_section_params; i++) {
6740 const char *param_name, *param_str_val;
6741 u32 param_num_val = 0;
6743 dump_offset += qed_read_param(dump_buf + dump_offset,
6745 ¶m_str_val, ¶m_num_val);
6749 sprintf(qed_get_buf_ptr(results_buf,
6751 "%s: %s\n", param_name, param_str_val);
6752 else if (strcmp(param_name, "fw-timestamp"))
6754 sprintf(qed_get_buf_ptr(results_buf,
6756 "%s: %d\n", param_name, param_num_val);
6759 results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6762 *num_chars_printed = results_offset;
6767 static struct dbg_tools_user_data *
6768 qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
6770 return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6773 /* Parses the idle check rules and returns the number of characters printed.
6774 * In case of parsing error, returns 0.
6776 static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
6779 bool print_fw_idle_chk,
6781 u32 *num_errors, u32 *num_warnings)
6783 /* Offset in results_buf in bytes */
6784 u32 results_offset = 0;
6792 /* Go over dumped results */
6793 for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6795 const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6796 struct dbg_idle_chk_result_hdr *hdr;
6797 const char *parsing_str, *lsi_msg;
6798 u32 parsing_str_offset;
6802 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6804 (const struct dbg_idle_chk_rule_parsing_data *)
6805 &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6807 parsing_str_offset =
6808 GET_FIELD(rule_parsing_data->data,
6809 DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6811 GET_FIELD(rule_parsing_data->data,
6812 DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6815 s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6816 [parsing_str_offset];
6817 lsi_msg = parsing_str;
6820 if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6823 /* Skip rule header */
6824 dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6826 /* Update errors/warnings count */
6827 if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6828 hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6833 /* Print rule severity */
6835 sprintf(qed_get_buf_ptr(results_buf,
6836 results_offset), "%s: ",
6837 s_idle_chk_severity_str[hdr->severity]);
6839 /* Print rule message */
6841 parsing_str += strlen(parsing_str) + 1;
6843 sprintf(qed_get_buf_ptr(results_buf,
6844 results_offset), "%s.",
6846 print_fw_idle_chk ? parsing_str : lsi_msg);
6847 parsing_str += strlen(parsing_str) + 1;
6849 /* Print register values */
6851 sprintf(qed_get_buf_ptr(results_buf,
6852 results_offset), " Registers:");
6854 i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6856 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6861 (struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6862 is_mem = GET_FIELD(reg_hdr->data,
6863 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6864 reg_id = GET_FIELD(reg_hdr->data,
6865 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6867 /* Skip reg header */
6868 dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6870 /* Skip register names until the required reg_id is
6873 for (; reg_id > curr_reg_id;
6875 parsing_str += strlen(parsing_str) + 1);
6878 sprintf(qed_get_buf_ptr(results_buf,
6879 results_offset), " %s",
6881 if (i < hdr->num_dumped_cond_regs && is_mem)
6883 sprintf(qed_get_buf_ptr(results_buf,
6885 "[%d]", hdr->mem_entry_id +
6886 reg_hdr->start_entry);
6888 sprintf(qed_get_buf_ptr(results_buf,
6889 results_offset), "=");
6890 for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6892 sprintf(qed_get_buf_ptr(results_buf,
6895 if (j < reg_hdr->size - 1)
6897 sprintf(qed_get_buf_ptr
6899 results_offset), ",");
6904 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6907 /* Check if end of dump buffer was exceeded */
6908 if (dump_buf > dump_buf_end)
6911 return results_offset;
6914 /* Parses an idle check dump buffer.
6915 * If result_buf is not NULL, the idle check results are printed to it.
6916 * In any case, the required results buffer size is assigned to
6917 * parsed_results_bytes.
6918 * The parsing status is returned.
6920 static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
6921 u32 num_dumped_dwords,
6923 u32 *parsed_results_bytes,
6927 const char *section_name, *param_name, *param_str_val;
6928 u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6929 u32 num_section_params = 0, num_rules;
6931 /* Offset in results_buf in bytes */
6932 u32 results_offset = 0;
6934 *parsed_results_bytes = 0;
6938 if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6939 !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6940 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6942 /* Read global_params section */
6943 dump_buf += qed_read_section_hdr(dump_buf,
6944 §ion_name, &num_section_params);
6945 if (strcmp(section_name, "global_params"))
6946 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6948 /* Print global params */
6949 dump_buf += qed_print_section_params(dump_buf,
6951 results_buf, &results_offset);
6953 /* Read idle_chk section */
6954 dump_buf += qed_read_section_hdr(dump_buf,
6955 §ion_name, &num_section_params);
6956 if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6957 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6958 dump_buf += qed_read_param(dump_buf,
6959 ¶m_name, ¶m_str_val, &num_rules);
6960 if (strcmp(param_name, "num_rules"))
6961 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6964 u32 rules_print_size;
6966 /* Print FW output */
6968 sprintf(qed_get_buf_ptr(results_buf,
6970 "FW_IDLE_CHECK:\n");
6972 qed_parse_idle_chk_dump_rules(dump_buf,
6982 results_offset += rules_print_size;
6983 if (!rules_print_size)
6984 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6986 /* Print LSI output */
6988 sprintf(qed_get_buf_ptr(results_buf,
6990 "\nLSI_IDLE_CHECK:\n");
6992 qed_parse_idle_chk_dump_rules(dump_buf,
7002 results_offset += rules_print_size;
7003 if (!rules_print_size)
7004 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
7007 /* Print errors/warnings count */
7010 sprintf(qed_get_buf_ptr(results_buf,
7012 "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
7013 *num_errors, *num_warnings);
7014 else if (*num_warnings)
7016 sprintf(qed_get_buf_ptr(results_buf,
7018 "\nIdle Check completed successfully (with %d warnings)\n",
7022 sprintf(qed_get_buf_ptr(results_buf,
7024 "\nIdle Check completed successfully\n");
7026 /* Add 1 for string NULL termination */
7027 *parsed_results_bytes = results_offset + 1;
7029 return DBG_STATUS_OK;
7032 /* Allocates and fills MCP Trace meta data based on the specified meta data
7034 * Returns debug status code.
7036 static enum dbg_status
7037 qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
7038 const u32 *meta_buf)
7040 struct dbg_tools_user_data *dev_user_data;
7041 u32 offset = 0, signature, i;
7042 struct mcp_trace_meta *meta;
7045 dev_user_data = qed_dbg_get_user_data(p_hwfn);
7046 meta = &dev_user_data->mcp_trace_meta;
7047 meta_buf_bytes = (u8 *)meta_buf;
7049 /* Free the previous meta before loading a new one. */
7050 if (meta->is_allocated)
7051 qed_mcp_trace_free_meta_data(p_hwfn);
7053 memset(meta, 0, sizeof(*meta));
7055 /* Read first signature */
7056 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
7057 if (signature != NVM_MAGIC_VALUE)
7058 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
7060 /* Read no. of modules and allocate memory for their pointers */
7061 meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
7062 meta->modules = kcalloc(meta->modules_num, sizeof(char *),
7065 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7067 /* Allocate and read all module strings */
7068 for (i = 0; i < meta->modules_num; i++) {
7069 u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
7071 *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
7072 if (!(*(meta->modules + i))) {
7073 /* Update number of modules to be released */
7074 meta->modules_num = i ? i - 1 : 0;
7075 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7078 qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
7079 *(meta->modules + i));
7080 if (module_len > MCP_TRACE_MAX_MODULE_LEN)
7081 (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
7084 /* Read second signature */
7085 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
7086 if (signature != NVM_MAGIC_VALUE)
7087 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
7089 /* Read number of formats and allocate memory for all formats */
7090 meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
7091 meta->formats = kcalloc(meta->formats_num,
7092 sizeof(struct mcp_trace_format),
7095 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7097 /* Allocate and read all strings */
7098 for (i = 0; i < meta->formats_num; i++) {
7099 struct mcp_trace_format *format_ptr = &meta->formats[i];
7102 format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
7106 MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
7107 format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
7108 if (!format_ptr->format_str) {
7109 /* Update number of modules to be released */
7110 meta->formats_num = i ? i - 1 : 0;
7111 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7114 qed_read_str_from_buf(meta_buf_bytes,
7116 format_len, format_ptr->format_str);
7119 meta->is_allocated = true;
7120 return DBG_STATUS_OK;
7123 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
7124 * are printed to it. The parsing status is returned.
7126 * trace_buf - MCP trace cyclic buffer
7127 * trace_buf_size - MCP trace cyclic buffer size in bytes
7128 * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
7130 * data_size - size in bytes of data to parse.
7131 * parsed_buf - destination buffer for parsed data.
7132 * parsed_results_bytes - size of parsed data in bytes.
7134 static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
7140 u32 *parsed_results_bytes)
7142 struct dbg_tools_user_data *dev_user_data;
7143 struct mcp_trace_meta *meta;
7144 u32 param_mask, param_shift;
7145 enum dbg_status status;
7147 dev_user_data = qed_dbg_get_user_data(p_hwfn);
7148 meta = &dev_user_data->mcp_trace_meta;
7149 *parsed_results_bytes = 0;
7151 if (!meta->is_allocated)
7152 return DBG_STATUS_MCP_TRACE_BAD_DATA;
7154 status = DBG_STATUS_OK;
7157 struct mcp_trace_format *format_ptr;
7158 u8 format_level, format_module;
7159 u32 params[3] = { 0, 0, 0 };
7160 u32 header, format_idx, i;
7162 if (data_size < MFW_TRACE_ENTRY_SIZE)
7163 return DBG_STATUS_MCP_TRACE_BAD_DATA;
7165 header = qed_read_from_cyclic_buf(trace_buf,
7168 MFW_TRACE_ENTRY_SIZE);
7169 data_size -= MFW_TRACE_ENTRY_SIZE;
7170 format_idx = header & MFW_TRACE_EVENTID_MASK;
7172 /* Skip message if its index doesn't exist in the meta data */
7173 if (format_idx >= meta->formats_num) {
7175 (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
7176 MFW_TRACE_PRM_SIZE_SHIFT);
7178 if (data_size < format_size)
7179 return DBG_STATUS_MCP_TRACE_BAD_DATA;
7181 data_offset = qed_cyclic_add(data_offset,
7184 data_size -= format_size;
7188 format_ptr = &meta->formats[format_idx];
7191 param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
7192 param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
7193 i < MCP_TRACE_FORMAT_MAX_PARAMS;
7195 param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
7196 param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
7197 /* Extract param size (0..3) */
7198 u8 param_size = (u8)((format_ptr->data & param_mask) >>
7201 /* If the param size is zero, there are no other
7207 /* Size is encoded using 2 bits, where 3 is used to
7210 if (param_size == 3)
7213 if (data_size < param_size)
7214 return DBG_STATUS_MCP_TRACE_BAD_DATA;
7216 params[i] = qed_read_from_cyclic_buf(trace_buf,
7220 data_size -= param_size;
7223 format_level = (u8)((format_ptr->data &
7224 MCP_TRACE_FORMAT_LEVEL_MASK) >>
7225 MCP_TRACE_FORMAT_LEVEL_SHIFT);
7226 format_module = (u8)((format_ptr->data &
7227 MCP_TRACE_FORMAT_MODULE_MASK) >>
7228 MCP_TRACE_FORMAT_MODULE_SHIFT);
7229 if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
7230 return DBG_STATUS_MCP_TRACE_BAD_DATA;
7232 /* Print current message to results buffer */
7233 *parsed_results_bytes +=
7234 sprintf(qed_get_buf_ptr(parsed_buf,
7235 *parsed_results_bytes),
7237 s_mcp_trace_level_str[format_level],
7238 meta->modules[format_module]);
7239 *parsed_results_bytes +=
7240 sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
7241 format_ptr->format_str,
7242 params[0], params[1], params[2]);
7245 /* Add string NULL terminator */
7246 (*parsed_results_bytes)++;
7251 /* Parses an MCP Trace dump buffer.
7252 * If result_buf is not NULL, the MCP Trace results are printed to it.
7253 * In any case, the required results buffer size is assigned to
7254 * parsed_results_bytes.
7255 * The parsing status is returned.
7257 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
7260 u32 *parsed_results_bytes,
7261 bool free_meta_data)
7263 const char *section_name, *param_name, *param_str_val;
7264 u32 data_size, trace_data_dwords, trace_meta_dwords;
7265 u32 offset, results_offset, results_buf_bytes;
7266 u32 param_num_val, num_section_params;
7267 struct mcp_trace *trace;
7268 enum dbg_status status;
7269 const u32 *meta_buf;
7272 *parsed_results_bytes = 0;
7274 /* Read global_params section */
7275 dump_buf += qed_read_section_hdr(dump_buf,
7276 §ion_name, &num_section_params);
7277 if (strcmp(section_name, "global_params"))
7278 return DBG_STATUS_MCP_TRACE_BAD_DATA;
7280 /* Print global params */
7281 dump_buf += qed_print_section_params(dump_buf,
7283 results_buf, &results_offset);
7285 /* Read trace_data section */
7286 dump_buf += qed_read_section_hdr(dump_buf,
7287 §ion_name, &num_section_params);
7288 if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
7289 return DBG_STATUS_MCP_TRACE_BAD_DATA;
7290 dump_buf += qed_read_param(dump_buf,
7291 ¶m_name, ¶m_str_val, ¶m_num_val);
7292 if (strcmp(param_name, "size"))
7293 return DBG_STATUS_MCP_TRACE_BAD_DATA;
7294 trace_data_dwords = param_num_val;
7296 /* Prepare trace info */
7297 trace = (struct mcp_trace *)dump_buf;
7298 if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
7299 return DBG_STATUS_MCP_TRACE_BAD_DATA;
7301 trace_buf = (u8 *)dump_buf + sizeof(*trace);
7302 offset = trace->trace_oldest;
7303 data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
7304 dump_buf += trace_data_dwords;
7306 /* Read meta_data section */
7307 dump_buf += qed_read_section_hdr(dump_buf,
7308 §ion_name, &num_section_params);
7309 if (strcmp(section_name, "mcp_trace_meta"))
7310 return DBG_STATUS_MCP_TRACE_BAD_DATA;
7311 dump_buf += qed_read_param(dump_buf,
7312 ¶m_name, ¶m_str_val, ¶m_num_val);
7313 if (strcmp(param_name, "size"))
7314 return DBG_STATUS_MCP_TRACE_BAD_DATA;
7315 trace_meta_dwords = param_num_val;
7317 /* Choose meta data buffer */
7318 if (!trace_meta_dwords) {
7319 /* Dump doesn't include meta data */
7320 struct dbg_tools_user_data *dev_user_data =
7321 qed_dbg_get_user_data(p_hwfn);
7323 if (!dev_user_data->mcp_trace_user_meta_buf)
7324 return DBG_STATUS_MCP_TRACE_NO_META;
7326 meta_buf = dev_user_data->mcp_trace_user_meta_buf;
7328 /* Dump includes meta data */
7329 meta_buf = dump_buf;
7332 /* Allocate meta data memory */
7333 status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
7334 if (status != DBG_STATUS_OK)
7337 status = qed_parse_mcp_trace_buf(p_hwfn,
7343 results_buf + results_offset :
7345 &results_buf_bytes);
7346 if (status != DBG_STATUS_OK)
7350 qed_mcp_trace_free_meta_data(p_hwfn);
7352 *parsed_results_bytes = results_offset + results_buf_bytes;
7354 return DBG_STATUS_OK;
7357 /* Parses a Reg FIFO dump buffer.
7358 * If result_buf is not NULL, the Reg FIFO results are printed to it.
7359 * In any case, the required results buffer size is assigned to
7360 * parsed_results_bytes.
7361 * The parsing status is returned.
7363 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
7365 u32 *parsed_results_bytes)
7367 const char *section_name, *param_name, *param_str_val;
7368 u32 param_num_val, num_section_params, num_elements;
7369 struct reg_fifo_element *elements;
7370 u8 i, j, err_val, vf_val;
7371 u32 results_offset = 0;
7374 /* Read global_params section */
7375 dump_buf += qed_read_section_hdr(dump_buf,
7376 §ion_name, &num_section_params);
7377 if (strcmp(section_name, "global_params"))
7378 return DBG_STATUS_REG_FIFO_BAD_DATA;
7380 /* Print global params */
7381 dump_buf += qed_print_section_params(dump_buf,
7383 results_buf, &results_offset);
7385 /* Read reg_fifo_data section */
7386 dump_buf += qed_read_section_hdr(dump_buf,
7387 §ion_name, &num_section_params);
7388 if (strcmp(section_name, "reg_fifo_data"))
7389 return DBG_STATUS_REG_FIFO_BAD_DATA;
7390 dump_buf += qed_read_param(dump_buf,
7391 ¶m_name, ¶m_str_val, ¶m_num_val);
7392 if (strcmp(param_name, "size"))
7393 return DBG_STATUS_REG_FIFO_BAD_DATA;
7394 if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
7395 return DBG_STATUS_REG_FIFO_BAD_DATA;
7396 num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
7397 elements = (struct reg_fifo_element *)dump_buf;
7399 /* Decode elements */
7400 for (i = 0; i < num_elements; i++) {
7401 bool err_printed = false;
7403 /* Discover if element belongs to a VF or a PF */
7404 vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
7405 if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
7406 sprintf(vf_str, "%s", "N/A");
7408 sprintf(vf_str, "%d", vf_val);
7410 /* Add parsed element to parsed buffer */
7412 sprintf(qed_get_buf_ptr(results_buf,
7414 "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
7416 (u32)GET_FIELD(elements[i].data,
7417 REG_FIFO_ELEMENT_ADDRESS) *
7418 REG_FIFO_ELEMENT_ADDR_FACTOR,
7419 s_access_strs[GET_FIELD(elements[i].data,
7420 REG_FIFO_ELEMENT_ACCESS)],
7421 (u32)GET_FIELD(elements[i].data,
7422 REG_FIFO_ELEMENT_PF),
7424 (u32)GET_FIELD(elements[i].data,
7425 REG_FIFO_ELEMENT_PORT),
7426 s_privilege_strs[GET_FIELD(elements[i].data,
7427 REG_FIFO_ELEMENT_PRIVILEGE)],
7428 s_protection_strs[GET_FIELD(elements[i].data,
7429 REG_FIFO_ELEMENT_PROTECTION)],
7430 s_master_strs[GET_FIELD(elements[i].data,
7431 REG_FIFO_ELEMENT_MASTER)]);
7435 err_val = GET_FIELD(elements[i].data,
7436 REG_FIFO_ELEMENT_ERROR);
7437 j < ARRAY_SIZE(s_reg_fifo_error_strs);
7438 j++, err_val >>= 1) {
7439 if (err_val & 0x1) {
7442 sprintf(qed_get_buf_ptr
7444 results_offset), ", ");
7446 sprintf(qed_get_buf_ptr
7447 (results_buf, results_offset), "%s",
7448 s_reg_fifo_error_strs[j]);
7454 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
7457 results_offset += sprintf(qed_get_buf_ptr(results_buf,
7459 "fifo contained %d elements", num_elements);
7461 /* Add 1 for string NULL termination */
7462 *parsed_results_bytes = results_offset + 1;
7464 return DBG_STATUS_OK;
7467 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
7470 u32 *results_offset)
7472 const struct igu_fifo_addr_data *found_addr = NULL;
7473 u8 source, err_type, i, is_cleanup;
7474 char parsed_addr_data[32];
7475 char parsed_wr_data[256];
7476 u32 wr_data, prod_cons;
7477 bool is_wr_cmd, is_pf;
7481 /* Dword12 (dword index 1 and 2) contains bits 32..95 of the
7484 dword12 = ((u64)element->dword2 << 32) | element->dword1;
7485 is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
7486 is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
7487 cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
7488 source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
7489 err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
7491 if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
7492 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7493 if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
7494 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7496 /* Find address data */
7497 for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
7498 const struct igu_fifo_addr_data *curr_addr =
7499 &s_igu_fifo_addr_data[i];
7501 if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
7502 curr_addr->end_addr)
7503 found_addr = curr_addr;
7507 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7509 /* Prepare parsed address data */
7510 switch (found_addr->type) {
7511 case IGU_ADDR_TYPE_MSIX_MEM:
7512 sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
7514 case IGU_ADDR_TYPE_WRITE_INT_ACK:
7515 case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
7516 sprintf(parsed_addr_data,
7517 " SB = 0x%x", cmd_addr - found_addr->start_addr);
7520 parsed_addr_data[0] = '\0';
7524 parsed_wr_data[0] = '\0';
7528 /* Prepare parsed write data */
7529 wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7530 prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7531 is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7533 if (source == IGU_SRC_ATTN) {
7534 sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7537 u8 cleanup_val, cleanup_type;
7541 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7544 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7546 sprintf(parsed_wr_data,
7547 "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7548 cleanup_val ? "set" : "clear",
7551 u8 update_flag, en_dis_int_for_sb, segment;
7554 update_flag = GET_FIELD(wr_data,
7555 IGU_FIFO_WR_DATA_UPDATE_FLAG);
7558 IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7559 segment = GET_FIELD(wr_data,
7560 IGU_FIFO_WR_DATA_SEGMENT);
7561 timer_mask = GET_FIELD(wr_data,
7562 IGU_FIFO_WR_DATA_TIMER_MASK);
7564 sprintf(parsed_wr_data,
7565 "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7567 update_flag ? "update" : "nop",
7569 (en_dis_int_for_sb == 1 ? "disable" : "nop") :
7571 segment ? "attn" : "regular",
7576 /* Add parsed element to parsed buffer */
7577 *results_offset += sprintf(qed_get_buf_ptr(results_buf,
7579 "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7580 element->dword2, element->dword1,
7582 is_pf ? "pf" : "vf",
7583 GET_FIELD(element->dword0,
7584 IGU_FIFO_ELEMENT_DWORD0_FID),
7585 s_igu_fifo_source_strs[source],
7586 is_wr_cmd ? "wr" : "rd",
7588 (!is_pf && found_addr->vf_desc)
7589 ? found_addr->vf_desc
7593 s_igu_fifo_error_strs[err_type]);
7595 return DBG_STATUS_OK;
7598 /* Parses an IGU FIFO dump buffer.
7599 * If result_buf is not NULL, the IGU FIFO results are printed to it.
7600 * In any case, the required results buffer size is assigned to
7601 * parsed_results_bytes.
7602 * The parsing status is returned.
7604 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7606 u32 *parsed_results_bytes)
7608 const char *section_name, *param_name, *param_str_val;
7609 u32 param_num_val, num_section_params, num_elements;
7610 struct igu_fifo_element *elements;
7611 enum dbg_status status;
7612 u32 results_offset = 0;
7615 /* Read global_params section */
7616 dump_buf += qed_read_section_hdr(dump_buf,
7617 §ion_name, &num_section_params);
7618 if (strcmp(section_name, "global_params"))
7619 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7621 /* Print global params */
7622 dump_buf += qed_print_section_params(dump_buf,
7624 results_buf, &results_offset);
7626 /* Read igu_fifo_data section */
7627 dump_buf += qed_read_section_hdr(dump_buf,
7628 §ion_name, &num_section_params);
7629 if (strcmp(section_name, "igu_fifo_data"))
7630 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7631 dump_buf += qed_read_param(dump_buf,
7632 ¶m_name, ¶m_str_val, ¶m_num_val);
7633 if (strcmp(param_name, "size"))
7634 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7635 if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7636 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7637 num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7638 elements = (struct igu_fifo_element *)dump_buf;
7640 /* Decode elements */
7641 for (i = 0; i < num_elements; i++) {
7642 status = qed_parse_igu_fifo_element(&elements[i],
7645 if (status != DBG_STATUS_OK)
7649 results_offset += sprintf(qed_get_buf_ptr(results_buf,
7651 "fifo contained %d elements", num_elements);
7653 /* Add 1 for string NULL termination */
7654 *parsed_results_bytes = results_offset + 1;
7656 return DBG_STATUS_OK;
7659 static enum dbg_status
7660 qed_parse_protection_override_dump(u32 *dump_buf,
7662 u32 *parsed_results_bytes)
7664 const char *section_name, *param_name, *param_str_val;
7665 u32 param_num_val, num_section_params, num_elements;
7666 struct protection_override_element *elements;
7667 u32 results_offset = 0;
7670 /* Read global_params section */
7671 dump_buf += qed_read_section_hdr(dump_buf,
7672 §ion_name, &num_section_params);
7673 if (strcmp(section_name, "global_params"))
7674 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7676 /* Print global params */
7677 dump_buf += qed_print_section_params(dump_buf,
7679 results_buf, &results_offset);
7681 /* Read protection_override_data section */
7682 dump_buf += qed_read_section_hdr(dump_buf,
7683 §ion_name, &num_section_params);
7684 if (strcmp(section_name, "protection_override_data"))
7685 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7686 dump_buf += qed_read_param(dump_buf,
7687 ¶m_name, ¶m_str_val, ¶m_num_val);
7688 if (strcmp(param_name, "size"))
7689 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7690 if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7691 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7692 num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7693 elements = (struct protection_override_element *)dump_buf;
7695 /* Decode elements */
7696 for (i = 0; i < num_elements; i++) {
7697 u32 address = GET_FIELD(elements[i].data,
7698 PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7699 PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7702 sprintf(qed_get_buf_ptr(results_buf,
7704 "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7706 (u32)GET_FIELD(elements[i].data,
7707 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7708 (u32)GET_FIELD(elements[i].data,
7709 PROTECTION_OVERRIDE_ELEMENT_READ),
7710 (u32)GET_FIELD(elements[i].data,
7711 PROTECTION_OVERRIDE_ELEMENT_WRITE),
7712 s_protection_strs[GET_FIELD(elements[i].data,
7713 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7714 s_protection_strs[GET_FIELD(elements[i].data,
7715 PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7718 results_offset += sprintf(qed_get_buf_ptr(results_buf,
7720 "protection override contained %d elements",
7723 /* Add 1 for string NULL termination */
7724 *parsed_results_bytes = results_offset + 1;
7726 return DBG_STATUS_OK;
7729 /* Parses a FW Asserts dump buffer.
7730 * If result_buf is not NULL, the FW Asserts results are printed to it.
7731 * In any case, the required results buffer size is assigned to
7732 * parsed_results_bytes.
7733 * The parsing status is returned.
7735 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7737 u32 *parsed_results_bytes)
7739 u32 num_section_params, param_num_val, i, results_offset = 0;
7740 const char *param_name, *param_str_val, *section_name;
7741 bool last_section_found = false;
7743 *parsed_results_bytes = 0;
7745 /* Read global_params section */
7746 dump_buf += qed_read_section_hdr(dump_buf,
7747 §ion_name, &num_section_params);
7748 if (strcmp(section_name, "global_params"))
7749 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7751 /* Print global params */
7752 dump_buf += qed_print_section_params(dump_buf,
7754 results_buf, &results_offset);
7756 while (!last_section_found) {
7757 dump_buf += qed_read_section_hdr(dump_buf,
7759 &num_section_params);
7760 if (!strcmp(section_name, "fw_asserts")) {
7761 /* Extract params */
7762 const char *storm_letter = NULL;
7763 u32 storm_dump_size = 0;
7765 for (i = 0; i < num_section_params; i++) {
7766 dump_buf += qed_read_param(dump_buf,
7770 if (!strcmp(param_name, "storm"))
7771 storm_letter = param_str_val;
7772 else if (!strcmp(param_name, "size"))
7773 storm_dump_size = param_num_val;
7776 DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7779 if (!storm_letter || !storm_dump_size)
7780 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7784 sprintf(qed_get_buf_ptr(results_buf,
7786 "\n%sSTORM_ASSERT: size=%d\n",
7787 storm_letter, storm_dump_size);
7788 for (i = 0; i < storm_dump_size; i++, dump_buf++)
7790 sprintf(qed_get_buf_ptr(results_buf,
7792 "%08x\n", *dump_buf);
7793 } else if (!strcmp(section_name, "last")) {
7794 last_section_found = true;
7796 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7800 /* Add 1 for string NULL termination */
7801 *parsed_results_bytes = results_offset + 1;
7803 return DBG_STATUS_OK;
7806 /***************************** Public Functions *******************************/
7808 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
7810 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7813 /* Convert binary data to debug arrays */
7814 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7815 s_user_dbg_arrays[buf_id].ptr =
7816 (u32 *)(bin_ptr + buf_array[buf_id].offset);
7817 s_user_dbg_arrays[buf_id].size_in_dwords =
7818 BYTES_TO_DWORDS(buf_array[buf_id].length);
7821 return DBG_STATUS_OK;
7824 enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn)
7826 p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data),
7828 if (!p_hwfn->dbg_user_info)
7829 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7831 return DBG_STATUS_OK;
7834 const char *qed_dbg_get_status_str(enum dbg_status status)
7837 MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7840 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7842 u32 num_dumped_dwords,
7843 u32 *results_buf_size)
7845 u32 num_errors, num_warnings;
7847 return qed_parse_idle_chk_dump(dump_buf,
7851 &num_errors, &num_warnings);
7854 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7856 u32 num_dumped_dwords,
7861 u32 parsed_buf_size;
7863 return qed_parse_idle_chk_dump(dump_buf,
7867 num_errors, num_warnings);
7870 void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7871 const u32 *meta_buf)
7873 struct dbg_tools_user_data *dev_user_data =
7874 qed_dbg_get_user_data(p_hwfn);
7876 dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7879 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7881 u32 num_dumped_dwords,
7882 u32 *results_buf_size)
7884 return qed_parse_mcp_trace_dump(p_hwfn,
7885 dump_buf, NULL, results_buf_size, true);
7888 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7890 u32 num_dumped_dwords,
7893 u32 parsed_buf_size;
7895 return qed_parse_mcp_trace_dump(p_hwfn,
7897 results_buf, &parsed_buf_size, true);
7900 enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7904 u32 parsed_buf_size;
7906 return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7907 &parsed_buf_size, false);
7910 enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7912 u32 num_dumped_bytes,
7915 u32 parsed_results_bytes;
7917 return qed_parse_mcp_trace_buf(p_hwfn,
7922 results_buf, &parsed_results_bytes);
7925 /* Frees the specified MCP Trace meta data */
7926 void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7928 struct dbg_tools_user_data *dev_user_data;
7929 struct mcp_trace_meta *meta;
7932 dev_user_data = qed_dbg_get_user_data(p_hwfn);
7933 meta = &dev_user_data->mcp_trace_meta;
7934 if (!meta->is_allocated)
7937 /* Release modules */
7938 if (meta->modules) {
7939 for (i = 0; i < meta->modules_num; i++)
7940 kfree(meta->modules[i]);
7941 kfree(meta->modules);
7944 /* Release formats */
7945 if (meta->formats) {
7946 for (i = 0; i < meta->formats_num; i++)
7947 kfree(meta->formats[i].format_str);
7948 kfree(meta->formats);
7951 meta->is_allocated = false;
7954 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7956 u32 num_dumped_dwords,
7957 u32 *results_buf_size)
7959 return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7962 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7964 u32 num_dumped_dwords,
7967 u32 parsed_buf_size;
7969 return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7972 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7974 u32 num_dumped_dwords,
7975 u32 *results_buf_size)
7977 return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7980 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7982 u32 num_dumped_dwords,
7985 u32 parsed_buf_size;
7987 return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7991 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7993 u32 num_dumped_dwords,
7994 u32 *results_buf_size)
7996 return qed_parse_protection_override_dump(dump_buf,
7997 NULL, results_buf_size);
8000 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
8002 u32 num_dumped_dwords,
8005 u32 parsed_buf_size;
8007 return qed_parse_protection_override_dump(dump_buf,
8012 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
8014 u32 num_dumped_dwords,
8015 u32 *results_buf_size)
8017 return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
8020 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
8022 u32 num_dumped_dwords,
8025 u32 parsed_buf_size;
8027 return qed_parse_fw_asserts_dump(dump_buf,
8028 results_buf, &parsed_buf_size);
8031 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
8032 struct dbg_attn_block_result *results)
8034 struct user_dbg_array *block_attn, *pstrings;
8035 const u32 *block_attn_name_offsets;
8036 enum dbg_attn_type attn_type;
8037 const char *block_name;
8040 num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
8041 attn_type = (enum dbg_attn_type)
8042 GET_FIELD(results->data,
8043 DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
8044 block_name = s_block_info_arr[results->block_id].name;
8046 if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
8047 !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
8048 !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
8049 return DBG_STATUS_DBG_ARRAY_NOT_SET;
8051 block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
8052 block_attn_name_offsets = &block_attn->ptr[results->names_offset];
8054 /* Go over registers with a non-zero attention status */
8055 for (i = 0; i < num_regs; i++) {
8056 struct dbg_attn_bit_mapping *bit_mapping;
8057 struct dbg_attn_reg_result *reg_result;
8058 u8 num_reg_attn, bit_idx = 0;
8060 reg_result = &results->reg_results[i];
8061 num_reg_attn = GET_FIELD(reg_result->data,
8062 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
8063 block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
8064 bit_mapping = &((struct dbg_attn_bit_mapping *)
8065 block_attn->ptr)[reg_result->block_attn_offset];
8067 pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
8069 /* Go over attention status bits */
8070 for (j = 0; j < num_reg_attn; j++) {
8071 u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
8072 DBG_ATTN_BIT_MAPPING_VAL);
8073 const char *attn_name, *attn_type_str, *masked_str;
8074 u32 attn_name_offset, sts_addr;
8076 /* Check if bit mask should be advanced (due to unused
8079 if (GET_FIELD(bit_mapping[j].data,
8080 DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
8081 bit_idx += (u8)attn_idx_val;
8085 /* Check current bit index */
8086 if (!(reg_result->sts_val & BIT(bit_idx))) {
8091 /* Find attention name */
8093 block_attn_name_offsets[attn_idx_val];
8094 attn_name = &((const char *)
8095 pstrings->ptr)[attn_name_offset];
8096 attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
8097 "Interrupt" : "Parity";
8098 masked_str = reg_result->mask_val & BIT(bit_idx) ?
8100 sts_addr = GET_FIELD(reg_result->data,
8101 DBG_ATTN_REG_RESULT_STS_ADDRESS);
8103 "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
8104 block_name, attn_type_str, attn_name,
8105 sts_addr, bit_idx, masked_str);
8111 return DBG_STATUS_OK;
8114 /* Wrapper for unifying the idle_chk and mcp_trace api */
8115 static enum dbg_status
8116 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
8118 u32 num_dumped_dwords,
8121 u32 num_errors, num_warnnings;
8123 return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
8124 results_buf, &num_errors,
8128 /* Feature meta data lookup table */
8131 enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
8132 struct qed_ptt *p_ptt, u32 *size);
8133 enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
8134 struct qed_ptt *p_ptt, u32 *dump_buf,
8135 u32 buf_size, u32 *dumped_dwords);
8136 enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
8137 u32 *dump_buf, u32 num_dumped_dwords,
8139 enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
8141 u32 num_dumped_dwords,
8142 u32 *results_buf_size);
8143 } qed_features_lookup[] = {
8145 "grc", qed_dbg_grc_get_dump_buf_size,
8146 qed_dbg_grc_dump, NULL, NULL}, {
8148 qed_dbg_idle_chk_get_dump_buf_size,
8149 qed_dbg_idle_chk_dump,
8150 qed_print_idle_chk_results_wrapper,
8151 qed_get_idle_chk_results_buf_size}, {
8153 qed_dbg_mcp_trace_get_dump_buf_size,
8154 qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
8155 qed_get_mcp_trace_results_buf_size}, {
8157 qed_dbg_reg_fifo_get_dump_buf_size,
8158 qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
8159 qed_get_reg_fifo_results_buf_size}, {
8161 qed_dbg_igu_fifo_get_dump_buf_size,
8162 qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
8163 qed_get_igu_fifo_results_buf_size}, {
8164 "protection_override",
8165 qed_dbg_protection_override_get_dump_buf_size,
8166 qed_dbg_protection_override_dump,
8167 qed_print_protection_override_results,
8168 qed_get_protection_override_results_buf_size}, {
8170 qed_dbg_fw_asserts_get_dump_buf_size,
8171 qed_dbg_fw_asserts_dump,
8172 qed_print_fw_asserts_results,
8173 qed_get_fw_asserts_results_buf_size}, {
8175 qed_dbg_ilt_get_dump_buf_size,
8176 qed_dbg_ilt_dump, NULL, NULL},};
8178 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
8180 u32 i, precision = 80;
8185 pr_notice("\n%.*s", precision, p_text_buf);
8186 for (i = precision; i < text_size; i += precision)
8187 pr_cont("%.*s", precision, p_text_buf + i);
8191 #define QED_RESULTS_BUF_MIN_SIZE 16
8192 /* Generic function for decoding debug feature info */
8193 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
8194 enum qed_dbg_features feature_idx)
8196 struct qed_dbg_feature *feature =
8197 &p_hwfn->cdev->dbg_params.features[feature_idx];
8198 u32 text_size_bytes, null_char_pos, i;
8202 /* Check if feature supports formatting capability */
8203 if (!qed_features_lookup[feature_idx].results_buf_size)
8204 return DBG_STATUS_OK;
8206 /* Obtain size of formatted output */
8207 rc = qed_features_lookup[feature_idx].
8208 results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
8209 feature->dumped_dwords, &text_size_bytes);
8210 if (rc != DBG_STATUS_OK)
8213 /* Make sure that the allocated size is a multiple of dword (4 bytes) */
8214 null_char_pos = text_size_bytes - 1;
8215 text_size_bytes = (text_size_bytes + 3) & ~0x3;
8217 if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
8218 DP_NOTICE(p_hwfn->cdev,
8219 "formatted size of feature was too small %d. Aborting\n",
8221 return DBG_STATUS_INVALID_ARGS;
8224 /* Allocate temp text buf */
8225 text_buf = vzalloc(text_size_bytes);
8227 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
8229 /* Decode feature opcodes to string on temp buf */
8230 rc = qed_features_lookup[feature_idx].
8231 print_results(p_hwfn, (u32 *)feature->dump_buf,
8232 feature->dumped_dwords, text_buf);
8233 if (rc != DBG_STATUS_OK) {
8238 /* Replace the original null character with a '\n' character.
8239 * The bytes that were added as a result of the dword alignment are also
8240 * padded with '\n' characters.
8242 for (i = null_char_pos; i < text_size_bytes; i++)
8245 /* Dump printable feature to log */
8246 if (p_hwfn->cdev->dbg_params.print_data)
8247 qed_dbg_print_feature(text_buf, text_size_bytes);
8249 /* Free the old dump_buf and point the dump_buf to the newly allocagted
8250 * and formatted text buffer.
8252 vfree(feature->dump_buf);
8253 feature->dump_buf = text_buf;
8254 feature->buf_size = text_size_bytes;
8255 feature->dumped_dwords = text_size_bytes / 4;
8259 #define MAX_DBG_FEATURE_SIZE_DWORDS 0x3FFFFFFF
8261 /* Generic function for performing the dump of a debug feature. */
8262 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
8263 struct qed_ptt *p_ptt,
8264 enum qed_dbg_features feature_idx)
8266 struct qed_dbg_feature *feature =
8267 &p_hwfn->cdev->dbg_params.features[feature_idx];
8268 u32 buf_size_dwords;
8271 DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
8272 qed_features_lookup[feature_idx].name);
8274 /* Dump_buf was already allocated need to free (this can happen if dump
8275 * was called but file was never read).
8276 * We can't use the buffer as is since size may have changed.
8278 if (feature->dump_buf) {
8279 vfree(feature->dump_buf);
8280 feature->dump_buf = NULL;
8283 /* Get buffer size from hsi, allocate accordingly, and perform the
8286 rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
8288 if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
8290 feature->buf_size = buf_size_dwords * sizeof(u32);
8291 feature->dump_buf = vmalloc(feature->buf_size);
8292 if (!feature->dump_buf)
8293 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
8295 rc = qed_features_lookup[feature_idx].
8296 perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
8297 feature->buf_size / sizeof(u32),
8298 &feature->dumped_dwords);
8300 /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
8301 * In this case the buffer holds valid binary data, but we wont able
8302 * to parse it (since parsing relies on data in NVRAM which is only
8303 * accessible when MFW is responsive). skip the formatting but return
8304 * success so that binary data is provided.
8306 if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
8307 return DBG_STATUS_OK;
8309 if (rc != DBG_STATUS_OK)
8313 rc = format_feature(p_hwfn, feature_idx);
8317 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8319 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
8322 int qed_dbg_grc_size(struct qed_dev *cdev)
8324 return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
8327 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8329 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
8333 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
8335 return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
8338 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8340 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
8344 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
8346 return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
8349 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8351 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
8355 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
8357 return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
8360 static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
8361 enum qed_nvm_images image_id, u32 *length)
8363 struct qed_nvm_image_att image_att;
8367 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
8371 *length = image_att.length;
8376 static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
8377 u32 *num_dumped_bytes,
8378 enum qed_nvm_images image_id)
8380 struct qed_hwfn *p_hwfn =
8381 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8386 *num_dumped_bytes = 0;
8387 rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
8391 DP_NOTICE(p_hwfn->cdev,
8392 "Collecting a debug feature [\"nvram image %d\"]\n",
8395 len_rounded = roundup(len_rounded, sizeof(u32));
8396 rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
8400 /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
8401 if (image_id != QED_NVM_IMAGE_NVM_META)
8402 for (i = 0; i < len_rounded; i += 4) {
8403 val = cpu_to_be32(*(u32 *)(buffer + i));
8404 *(u32 *)(buffer + i) = val;
8407 *num_dumped_bytes = len_rounded;
8412 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
8413 u32 *num_dumped_bytes)
8415 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
8419 int qed_dbg_protection_override_size(struct qed_dev *cdev)
8421 return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
8424 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
8425 u32 *num_dumped_bytes)
8427 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
8431 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
8433 return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
8436 int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8438 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
8441 int qed_dbg_ilt_size(struct qed_dev *cdev)
8443 return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
8446 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
8447 u32 *num_dumped_bytes)
8449 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
8453 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
8455 return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
8458 /* Defines the amount of bytes allocated for recording the length of debugfs
8461 #define REGDUMP_HEADER_SIZE sizeof(u32)
8462 #define REGDUMP_HEADER_SIZE_SHIFT 0
8463 #define REGDUMP_HEADER_SIZE_MASK 0xffffff
8464 #define REGDUMP_HEADER_FEATURE_SHIFT 24
8465 #define REGDUMP_HEADER_FEATURE_MASK 0x3f
8466 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
8467 #define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1
8468 #define REGDUMP_HEADER_ENGINE_SHIFT 31
8469 #define REGDUMP_HEADER_ENGINE_MASK 0x1
8470 #define REGDUMP_MAX_SIZE 0x1000000
8471 #define ILT_DUMP_MAX_SIZE (1024 * 1024 * 15)
8473 enum debug_print_features {
8479 PROTECTION_OVERRIDE = 5,
8490 static u32 qed_calc_regdump_header(enum debug_print_features feature,
8491 int engine, u32 feature_size, u8 omit_engine)
8493 /* Insert the engine, feature and mode inside the header and combine it
8494 * with feature size.
8496 return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
8497 (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
8498 (engine << REGDUMP_HEADER_ENGINE_SHIFT);
8501 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
8503 u8 cur_engine, omit_engine = 0, org_engine;
8504 struct qed_hwfn *p_hwfn =
8505 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8506 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
8507 int grc_params[MAX_DBG_GRC_PARAMS], i;
8508 u32 offset = 0, feature_size;
8511 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8512 grc_params[i] = dev_data->grc.param_val[i];
8514 if (cdev->num_hwfns == 1)
8517 org_engine = qed_get_debug_engine(cdev);
8518 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8519 /* Collect idle_chks and grcDump for each hw function */
8520 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8521 "obtaining idle_chk and grcdump for current engine\n");
8522 qed_set_debug_engine(cdev, cur_engine);
8524 /* First idle_chk */
8525 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8526 REGDUMP_HEADER_SIZE, &feature_size);
8528 *(u32 *)((u8 *)buffer + offset) =
8529 qed_calc_regdump_header(IDLE_CHK, cur_engine,
8530 feature_size, omit_engine);
8531 offset += (feature_size + REGDUMP_HEADER_SIZE);
8533 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8536 /* Second idle_chk */
8537 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8538 REGDUMP_HEADER_SIZE, &feature_size);
8540 *(u32 *)((u8 *)buffer + offset) =
8541 qed_calc_regdump_header(IDLE_CHK, cur_engine,
8542 feature_size, omit_engine);
8543 offset += (feature_size + REGDUMP_HEADER_SIZE);
8545 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8549 rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
8550 REGDUMP_HEADER_SIZE, &feature_size);
8552 *(u32 *)((u8 *)buffer + offset) =
8553 qed_calc_regdump_header(REG_FIFO, cur_engine,
8554 feature_size, omit_engine);
8555 offset += (feature_size + REGDUMP_HEADER_SIZE);
8557 DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
8561 rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
8562 REGDUMP_HEADER_SIZE, &feature_size);
8564 *(u32 *)((u8 *)buffer + offset) =
8565 qed_calc_regdump_header(IGU_FIFO, cur_engine,
8566 feature_size, omit_engine);
8567 offset += (feature_size + REGDUMP_HEADER_SIZE);
8569 DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
8572 /* protection_override dump */
8573 rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
8574 REGDUMP_HEADER_SIZE,
8577 *(u32 *)((u8 *)buffer + offset) =
8578 qed_calc_regdump_header(PROTECTION_OVERRIDE,
8580 feature_size, omit_engine);
8581 offset += (feature_size + REGDUMP_HEADER_SIZE);
8584 "qed_dbg_protection_override failed. rc = %d\n",
8588 /* fw_asserts dump */
8589 rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
8590 REGDUMP_HEADER_SIZE, &feature_size);
8592 *(u32 *)((u8 *)buffer + offset) =
8593 qed_calc_regdump_header(FW_ASSERTS, cur_engine,
8594 feature_size, omit_engine);
8595 offset += (feature_size + REGDUMP_HEADER_SIZE);
8597 DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
8601 feature_size = qed_dbg_ilt_size(cdev);
8602 if (!cdev->disable_ilt_dump &&
8603 feature_size < ILT_DUMP_MAX_SIZE) {
8604 rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
8605 REGDUMP_HEADER_SIZE, &feature_size);
8607 *(u32 *)((u8 *)buffer + offset) =
8608 qed_calc_regdump_header(ILT_DUMP,
8612 offset += feature_size + REGDUMP_HEADER_SIZE;
8614 DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
8619 /* GRC dump - must be last because when mcp stuck it will
8620 * clutter idle_chk, reg_fifo, ...
8622 rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
8623 REGDUMP_HEADER_SIZE, &feature_size);
8625 *(u32 *)((u8 *)buffer + offset) =
8626 qed_calc_regdump_header(GRC_DUMP, cur_engine,
8627 feature_size, omit_engine);
8628 offset += (feature_size + REGDUMP_HEADER_SIZE);
8630 DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
8634 qed_set_debug_engine(cdev, org_engine);
8636 rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
8637 REGDUMP_HEADER_SIZE, &feature_size);
8639 *(u32 *)((u8 *)buffer + offset) =
8640 qed_calc_regdump_header(MCP_TRACE, cur_engine,
8641 feature_size, omit_engine);
8642 offset += (feature_size + REGDUMP_HEADER_SIZE);
8644 DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
8648 rc = qed_dbg_nvm_image(cdev,
8649 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8650 &feature_size, QED_NVM_IMAGE_NVM_CFG1);
8652 *(u32 *)((u8 *)buffer + offset) =
8653 qed_calc_regdump_header(NVM_CFG1, cur_engine,
8654 feature_size, omit_engine);
8655 offset += (feature_size + REGDUMP_HEADER_SIZE);
8656 } else if (rc != -ENOENT) {
8658 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8659 QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
8663 rc = qed_dbg_nvm_image(cdev,
8664 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8665 &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
8667 *(u32 *)((u8 *)buffer + offset) =
8668 qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
8669 feature_size, omit_engine);
8670 offset += (feature_size + REGDUMP_HEADER_SIZE);
8671 } else if (rc != -ENOENT) {
8673 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8674 QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
8679 rc = qed_dbg_nvm_image(cdev,
8680 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8681 &feature_size, QED_NVM_IMAGE_NVM_META);
8683 *(u32 *)((u8 *)buffer + offset) =
8684 qed_calc_regdump_header(NVM_META, cur_engine,
8685 feature_size, omit_engine);
8686 offset += (feature_size + REGDUMP_HEADER_SIZE);
8687 } else if (rc != -ENOENT) {
8689 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8690 QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
8694 rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
8695 REGDUMP_HEADER_SIZE, &feature_size,
8696 QED_NVM_IMAGE_MDUMP);
8698 *(u32 *)((u8 *)buffer + offset) =
8699 qed_calc_regdump_header(MDUMP, cur_engine,
8700 feature_size, omit_engine);
8701 offset += (feature_size + REGDUMP_HEADER_SIZE);
8702 } else if (rc != -ENOENT) {
8704 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8705 QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
8711 int qed_dbg_all_data_size(struct qed_dev *cdev)
8713 struct qed_hwfn *p_hwfn =
8714 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8715 u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
8716 u8 cur_engine, org_engine;
8718 org_engine = qed_get_debug_engine(cdev);
8719 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8720 /* Engine specific */
8721 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8722 "calculating idle_chk and grcdump register length for current engine\n");
8723 qed_set_debug_engine(cdev, cur_engine);
8724 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8725 REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8726 REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8727 REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8728 REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8729 REGDUMP_HEADER_SIZE +
8730 qed_dbg_protection_override_size(cdev) +
8731 REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8733 ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
8734 if (ilt_len < ILT_DUMP_MAX_SIZE) {
8735 total_ilt_len += ilt_len;
8736 regs_len += ilt_len;
8740 qed_set_debug_engine(cdev, org_engine);
8743 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
8744 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8746 regs_len += REGDUMP_HEADER_SIZE + image_len;
8747 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8749 regs_len += REGDUMP_HEADER_SIZE + image_len;
8750 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8752 regs_len += REGDUMP_HEADER_SIZE + image_len;
8753 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
8755 regs_len += REGDUMP_HEADER_SIZE + image_len;
8757 if (regs_len > REGDUMP_MAX_SIZE) {
8758 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8759 "Dump exceeds max size 0x%x, disable ILT dump\n",
8761 cdev->disable_ilt_dump = true;
8762 regs_len -= total_ilt_len;
8768 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8769 enum qed_dbg_features feature, u32 *num_dumped_bytes)
8771 struct qed_hwfn *p_hwfn =
8772 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8773 struct qed_dbg_feature *qed_feature =
8774 &cdev->dbg_params.features[feature];
8775 enum dbg_status dbg_rc;
8776 struct qed_ptt *p_ptt;
8780 p_ptt = qed_ptt_acquire(p_hwfn);
8785 dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8786 if (dbg_rc != DBG_STATUS_OK) {
8787 DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8788 qed_dbg_get_status_str(dbg_rc));
8789 *num_dumped_bytes = 0;
8794 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8795 "copying debugfs feature to external buffer\n");
8796 memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8797 *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
8801 qed_ptt_release(p_hwfn, p_ptt);
8805 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8807 struct qed_hwfn *p_hwfn =
8808 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8809 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8810 struct qed_dbg_feature *qed_feature =
8811 &cdev->dbg_params.features[feature];
8812 u32 buf_size_dwords;
8818 rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8820 if (rc != DBG_STATUS_OK)
8821 buf_size_dwords = 0;
8823 /* Feature will not be dumped if it exceeds maximum size */
8824 if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
8825 buf_size_dwords = 0;
8827 qed_ptt_release(p_hwfn, p_ptt);
8828 qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8829 return qed_feature->buf_size;
8832 u8 qed_get_debug_engine(struct qed_dev *cdev)
8834 return cdev->dbg_params.engine_for_debug;
8837 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8839 DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8841 cdev->dbg_params.engine_for_debug = engine_number;
8844 void qed_dbg_pf_init(struct qed_dev *cdev)
8846 const u8 *dbg_values;
8848 /* Debug values are after init values.
8849 * The offset is the first dword of the file.
8851 dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8852 qed_dbg_set_bin_ptr((u8 *)dbg_values);
8853 qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
8856 void qed_dbg_pf_exit(struct qed_dev *cdev)
8858 struct qed_dbg_feature *feature = NULL;
8859 enum qed_dbg_features feature_idx;
8861 /* Debug features' buffers may be allocated if debug feature was used
8862 * but dump wasn't called.
8864 for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8865 feature = &cdev->dbg_params.features[feature_idx];
8866 if (feature->dump_buf) {
8867 vfree(feature->dump_buf);
8868 feature->dump_buf = NULL;