2 * Copyright (C) 2017 Chelsio Communications. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
20 #include "cxgb4_cudbg.h"
22 static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
23 { CUDBG_EDC0, cudbg_collect_edc0_meminfo },
24 { CUDBG_EDC1, cudbg_collect_edc1_meminfo },
25 { CUDBG_MC0, cudbg_collect_mc0_meminfo },
26 { CUDBG_MC1, cudbg_collect_mc1_meminfo },
27 { CUDBG_HMA, cudbg_collect_hma_meminfo },
30 static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
31 { CUDBG_MBOX_LOG, cudbg_collect_mbox_log },
32 { CUDBG_DEV_LOG, cudbg_collect_fw_devlog },
33 { CUDBG_REG_DUMP, cudbg_collect_reg_dump },
34 { CUDBG_CIM_LA, cudbg_collect_cim_la },
35 { CUDBG_CIM_MA_LA, cudbg_collect_cim_ma_la },
36 { CUDBG_CIM_QCFG, cudbg_collect_cim_qcfg },
37 { CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 },
38 { CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 },
39 { CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp },
40 { CUDBG_CIM_IBQ_SGE0, cudbg_collect_cim_ibq_sge0 },
41 { CUDBG_CIM_IBQ_SGE1, cudbg_collect_cim_ibq_sge1 },
42 { CUDBG_CIM_IBQ_NCSI, cudbg_collect_cim_ibq_ncsi },
43 { CUDBG_CIM_OBQ_ULP0, cudbg_collect_cim_obq_ulp0 },
44 { CUDBG_CIM_OBQ_ULP1, cudbg_collect_cim_obq_ulp1 },
45 { CUDBG_CIM_OBQ_ULP2, cudbg_collect_cim_obq_ulp2 },
46 { CUDBG_CIM_OBQ_ULP3, cudbg_collect_cim_obq_ulp3 },
47 { CUDBG_CIM_OBQ_SGE, cudbg_collect_cim_obq_sge },
48 { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi },
49 { CUDBG_RSS, cudbg_collect_rss },
50 { CUDBG_RSS_VF_CONF, cudbg_collect_rss_vf_config },
51 { CUDBG_PATH_MTU, cudbg_collect_path_mtu },
52 { CUDBG_PM_STATS, cudbg_collect_pm_stats },
53 { CUDBG_HW_SCHED, cudbg_collect_hw_sched },
54 { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect },
55 { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
56 { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
57 { CUDBG_TP_LA, cudbg_collect_tp_la },
58 { CUDBG_MEMINFO, cudbg_collect_meminfo },
59 { CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
60 { CUDBG_CLK, cudbg_collect_clk_info },
61 { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
62 { CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 },
63 { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
64 { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
65 { CUDBG_TID_INFO, cudbg_collect_tid },
66 { CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config },
67 { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
68 { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
69 { CUDBG_VPD_DATA, cudbg_collect_vpd_data },
70 { CUDBG_LE_TCAM, cudbg_collect_le_tcam },
71 { CUDBG_CCTRL, cudbg_collect_cctrl },
72 { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect },
73 { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la },
74 { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect },
75 { CUDBG_PBT_TABLE, cudbg_collect_pbt_tables },
76 { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect },
79 static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
81 struct cudbg_tcam tcam_region = { 0 };
82 u32 value, n = 0, len = 0;
86 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
99 len = adap->params.devlog.size;
102 if (is_t6(adap->params.chip)) {
103 len = adap->params.cim_la_size / 10 + 1;
104 len *= 11 * sizeof(u32);
106 len = adap->params.cim_la_size / 8;
107 len *= 8 * sizeof(u32);
109 len += sizeof(u32); /* for reading CIM LA configuration */
111 case CUDBG_CIM_MA_LA:
112 len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
115 len = sizeof(struct cudbg_cim_qcfg);
117 case CUDBG_CIM_IBQ_TP0:
118 case CUDBG_CIM_IBQ_TP1:
119 case CUDBG_CIM_IBQ_ULP:
120 case CUDBG_CIM_IBQ_SGE0:
121 case CUDBG_CIM_IBQ_SGE1:
122 case CUDBG_CIM_IBQ_NCSI:
123 len = CIM_IBQ_SIZE * 4 * sizeof(u32);
125 case CUDBG_CIM_OBQ_ULP0:
126 len = cudbg_cim_obq_size(adap, 0);
128 case CUDBG_CIM_OBQ_ULP1:
129 len = cudbg_cim_obq_size(adap, 1);
131 case CUDBG_CIM_OBQ_ULP2:
132 len = cudbg_cim_obq_size(adap, 2);
134 case CUDBG_CIM_OBQ_ULP3:
135 len = cudbg_cim_obq_size(adap, 3);
137 case CUDBG_CIM_OBQ_SGE:
138 len = cudbg_cim_obq_size(adap, 4);
140 case CUDBG_CIM_OBQ_NCSI:
141 len = cudbg_cim_obq_size(adap, 5);
143 case CUDBG_CIM_OBQ_RXQ0:
144 len = cudbg_cim_obq_size(adap, 6);
146 case CUDBG_CIM_OBQ_RXQ1:
147 len = cudbg_cim_obq_size(adap, 7);
150 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
151 if (value & EDRAM0_ENABLE_F) {
152 value = t4_read_reg(adap, MA_EDRAM0_BAR_A);
153 len = EDRAM0_SIZE_G(value);
155 len = cudbg_mbytes_to_bytes(len);
158 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
159 if (value & EDRAM1_ENABLE_F) {
160 value = t4_read_reg(adap, MA_EDRAM1_BAR_A);
161 len = EDRAM1_SIZE_G(value);
163 len = cudbg_mbytes_to_bytes(len);
166 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
167 if (value & EXT_MEM0_ENABLE_F) {
168 value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
169 len = EXT_MEM0_SIZE_G(value);
171 len = cudbg_mbytes_to_bytes(len);
174 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
175 if (value & EXT_MEM1_ENABLE_F) {
176 value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
177 len = EXT_MEM1_SIZE_G(value);
179 len = cudbg_mbytes_to_bytes(len);
182 len = t4_chip_rss_size(adap) * sizeof(u16);
184 case CUDBG_RSS_VF_CONF:
185 len = adap->params.arch.vfcount *
186 sizeof(struct cudbg_rss_vf_conf);
189 len = NMTUS * sizeof(u16);
192 len = sizeof(struct cudbg_pm_stats);
195 len = sizeof(struct cudbg_hw_sched);
197 case CUDBG_TP_INDIRECT:
198 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
200 n = sizeof(t5_tp_pio_array) +
201 sizeof(t5_tp_tm_pio_array) +
202 sizeof(t5_tp_mib_index_array);
205 n = sizeof(t6_tp_pio_array) +
206 sizeof(t6_tp_tm_pio_array) +
207 sizeof(t6_tp_mib_index_array);
212 n = n / (IREG_NUM_ELEM * sizeof(u32));
213 len = sizeof(struct ireg_buf) * n;
215 case CUDBG_SGE_INDIRECT:
216 len = sizeof(struct ireg_buf) * 2;
219 len = sizeof(struct cudbg_ulprx_la);
222 len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
225 len = sizeof(struct cudbg_meminfo);
227 case CUDBG_CIM_PIF_LA:
228 len = sizeof(struct cudbg_cim_pif_la);
229 len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
232 len = sizeof(struct cudbg_clk_info);
234 case CUDBG_PCIE_INDIRECT:
235 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
236 len = sizeof(struct ireg_buf) * n * 2;
238 case CUDBG_PM_INDIRECT:
239 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
240 len = sizeof(struct ireg_buf) * n * 2;
243 len = sizeof(struct cudbg_tid_info_region_rev1);
245 case CUDBG_PCIE_CONFIG:
246 len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
248 case CUDBG_DUMP_CONTEXT:
249 len = cudbg_dump_context_size(adap);
252 len = sizeof(struct cudbg_mps_tcam) *
253 adap->params.arch.mps_tcam_size;
256 len = sizeof(struct cudbg_vpd_data);
259 cudbg_fill_le_tcam_info(adap, &tcam_region);
260 len = sizeof(struct cudbg_tcam) +
261 sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
264 len = sizeof(u16) * NMTUS * NCCTRL_WIN;
266 case CUDBG_MA_INDIRECT:
267 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
268 n = sizeof(t6_ma_ireg_array) /
269 (IREG_NUM_ELEM * sizeof(u32));
270 len = sizeof(struct ireg_buf) * n * 2;
274 len = sizeof(struct cudbg_ulptx_la);
276 case CUDBG_UP_CIM_INDIRECT:
278 if (is_t5(adap->params.chip))
279 n = sizeof(t5_up_cim_reg_array) /
280 ((IREG_NUM_ELEM + 1) * sizeof(u32));
281 else if (is_t6(adap->params.chip))
282 n = sizeof(t6_up_cim_reg_array) /
283 ((IREG_NUM_ELEM + 1) * sizeof(u32));
284 len = sizeof(struct ireg_buf) * n;
286 case CUDBG_PBT_TABLE:
287 len = sizeof(struct cudbg_pbt_tables);
290 len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size;
292 case CUDBG_HMA_INDIRECT:
293 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
294 n = sizeof(t6_hma_ireg_array) /
295 (IREG_NUM_ELEM * sizeof(u32));
296 len = sizeof(struct ireg_buf) * n;
300 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
301 if (value & HMA_MUX_F) {
302 /* In T6, there's no MC1. So, HMA shares MC1
305 value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
306 len = EXT_MEM1_SIZE_G(value);
308 len = cudbg_mbytes_to_bytes(len);
317 u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
322 if (flag & CXGB4_ETH_DUMP_HW) {
323 for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
324 entity = cxgb4_collect_hw_dump[i].entity;
325 len += cxgb4_get_entity_length(adap, entity);
329 if (flag & CXGB4_ETH_DUMP_MEM) {
330 for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) {
331 entity = cxgb4_collect_mem_dump[i].entity;
332 len += cxgb4_get_entity_length(adap, entity);
339 static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
340 struct cudbg_buffer *dbg_buff,
341 const struct cxgb4_collect_entity *e_arr,
342 u32 arr_size, void *buf, u32 *tot_size)
344 struct cudbg_error cudbg_err = { 0 };
345 struct cudbg_entity_hdr *entity_hdr;
346 u32 i, total_size = 0;
349 for (i = 0; i < arr_size; i++) {
350 const struct cxgb4_collect_entity *e = &e_arr[i];
352 entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
353 entity_hdr->entity_type = e->entity;
354 entity_hdr->start_offset = dbg_buff->offset;
355 memset(&cudbg_err, 0, sizeof(struct cudbg_error));
356 ret = e->collect_cb(pdbg_init, dbg_buff, &cudbg_err);
358 entity_hdr->size = 0;
359 dbg_buff->offset = entity_hdr->start_offset;
361 cudbg_align_debug_buffer(dbg_buff, entity_hdr);
364 /* Log error and continue with next entity */
365 if (cudbg_err.sys_err)
366 ret = CUDBG_SYSTEM_ERROR;
368 entity_hdr->hdr_flags = ret;
369 entity_hdr->sys_err = cudbg_err.sys_err;
370 entity_hdr->sys_warn = cudbg_err.sys_warn;
371 total_size += entity_hdr->size;
374 *tot_size += total_size;
377 int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
380 struct cudbg_init cudbg_init = { 0 };
381 struct cudbg_buffer dbg_buff = { 0 };
382 u32 size, min_size, total_size = 0;
383 struct cudbg_hdr *cudbg_hdr;
387 cudbg_init.adap = adap;
388 cudbg_init.outbuf = buf;
389 cudbg_init.outbuf_size = size;
392 dbg_buff.size = size;
395 cudbg_hdr = (struct cudbg_hdr *)buf;
396 cudbg_hdr->signature = CUDBG_SIGNATURE;
397 cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
398 cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
399 cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
400 cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
401 cudbg_hdr->chip_ver = adap->params.chip;
402 cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
403 cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
404 cudbg_hdr->compress_type = cudbg_init.compress_type;
406 min_size = sizeof(struct cudbg_hdr) +
407 sizeof(struct cudbg_entity_hdr) *
408 cudbg_hdr->max_entities;
412 dbg_buff.offset += min_size;
413 total_size = dbg_buff.offset;
415 if (flag & CXGB4_ETH_DUMP_HW)
416 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
417 cxgb4_collect_hw_dump,
418 ARRAY_SIZE(cxgb4_collect_hw_dump),
422 if (flag & CXGB4_ETH_DUMP_MEM)
423 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
424 cxgb4_collect_mem_dump,
425 ARRAY_SIZE(cxgb4_collect_mem_dump),
429 cudbg_hdr->data_len = total_size;
430 *buf_size = total_size;
434 void cxgb4_init_ethtool_dump(struct adapter *adapter)
436 adapter->eth_dump.flag = CXGB4_ETH_DUMP_NONE;
437 adapter->eth_dump.version = adapter->params.fw_vers;
438 adapter->eth_dump.len = 0;