2 * Copyright (C) 2017 Chelsio Communications. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
21 #include "cudbg_lib_common.h"
22 #include "cudbg_lib.h"
23 #include "cudbg_entity.h"
25 static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
26 struct cudbg_buffer *dbg_buff)
28 cudbg_update_buff(pin_buff, dbg_buff);
29 cudbg_put_buff(pin_buff, dbg_buff);
32 static int is_fw_attached(struct cudbg_init *pdbg_init)
34 struct adapter *padap = pdbg_init->adap;
36 if (!(padap->flags & FW_OK) || padap->use_bd)
42 /* This function will add additional padding bytes into debug_buffer to make it
45 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
46 struct cudbg_entity_hdr *entity_hdr)
51 remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
54 memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
56 dbg_buff->offset += padding;
57 entity_hdr->num_pad = padding;
59 entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
62 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
64 struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
66 return (struct cudbg_entity_hdr *)
67 ((char *)outbuf + cudbg_hdr->hdr_len +
68 (sizeof(struct cudbg_entity_hdr) * (i - 1)));
71 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
72 struct cudbg_buffer *dbg_buff,
73 struct cudbg_error *cudbg_err)
75 struct adapter *padap = pdbg_init->adap;
76 struct cudbg_buffer temp_buff = { 0 };
80 if (is_t4(padap->params.chip))
81 buf_size = T4_REGMAP_SIZE;
82 else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
83 buf_size = T5_REGMAP_SIZE;
85 rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
88 t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
89 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
93 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
94 struct cudbg_buffer *dbg_buff,
95 struct cudbg_error *cudbg_err)
97 struct adapter *padap = pdbg_init->adap;
98 struct cudbg_buffer temp_buff = { 0 };
99 struct devlog_params *dparams;
102 rc = t4_init_devlog_params(padap);
104 cudbg_err->sys_err = rc;
108 dparams = &padap->params.devlog;
109 rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
113 /* Collect FW devlog */
114 if (dparams->start != 0) {
115 spin_lock(&padap->win0_lock);
116 rc = t4_memory_rw(padap, padap->params.drv_memwin,
117 dparams->memtype, dparams->start,
119 (__be32 *)(char *)temp_buff.data,
121 spin_unlock(&padap->win0_lock);
123 cudbg_err->sys_err = rc;
124 cudbg_put_buff(&temp_buff, dbg_buff);
128 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
132 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
133 struct cudbg_buffer *dbg_buff,
134 struct cudbg_error *cudbg_err)
136 struct adapter *padap = pdbg_init->adap;
137 struct cudbg_buffer temp_buff = { 0 };
141 if (is_t6(padap->params.chip)) {
142 size = padap->params.cim_la_size / 10 + 1;
143 size *= 11 * sizeof(u32);
145 size = padap->params.cim_la_size / 8;
146 size *= 8 * sizeof(u32);
150 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
154 rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
156 cudbg_err->sys_err = rc;
157 cudbg_put_buff(&temp_buff, dbg_buff);
161 memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
162 rc = t4_cim_read_la(padap,
163 (u32 *)((char *)temp_buff.data + sizeof(cfg)),
166 cudbg_err->sys_err = rc;
167 cudbg_put_buff(&temp_buff, dbg_buff);
170 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
174 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
175 struct cudbg_buffer *dbg_buff,
176 struct cudbg_error *cudbg_err)
178 struct adapter *padap = pdbg_init->adap;
179 struct cudbg_buffer temp_buff = { 0 };
182 size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
183 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
187 t4_cim_read_ma_la(padap,
188 (u32 *)temp_buff.data,
189 (u32 *)((char *)temp_buff.data +
191 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
195 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
196 struct cudbg_buffer *dbg_buff,
197 struct cudbg_error *cudbg_err)
199 struct adapter *padap = pdbg_init->adap;
200 struct cudbg_buffer temp_buff = { 0 };
201 struct cudbg_cim_qcfg *cim_qcfg_data;
204 rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
209 cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
210 cim_qcfg_data->chip = padap->params.chip;
211 rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
212 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
214 cudbg_err->sys_err = rc;
215 cudbg_put_buff(&temp_buff, dbg_buff);
219 rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
220 ARRAY_SIZE(cim_qcfg_data->obq_wr),
221 cim_qcfg_data->obq_wr);
223 cudbg_err->sys_err = rc;
224 cudbg_put_buff(&temp_buff, dbg_buff);
228 t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
229 cim_qcfg_data->thres);
230 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
234 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
235 struct cudbg_buffer *dbg_buff,
236 struct cudbg_error *cudbg_err, int qid)
238 struct adapter *padap = pdbg_init->adap;
239 struct cudbg_buffer temp_buff = { 0 };
240 int no_of_read_words, rc = 0;
243 /* collect CIM IBQ */
244 qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
245 rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
249 /* t4_read_cim_ibq will return no. of read words or error */
250 no_of_read_words = t4_read_cim_ibq(padap, qid,
251 (u32 *)temp_buff.data, qsize);
252 /* no_of_read_words is less than or equal to 0 means error */
253 if (no_of_read_words <= 0) {
254 if (!no_of_read_words)
255 rc = CUDBG_SYSTEM_ERROR;
257 rc = no_of_read_words;
258 cudbg_err->sys_err = rc;
259 cudbg_put_buff(&temp_buff, dbg_buff);
262 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
266 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
267 struct cudbg_buffer *dbg_buff,
268 struct cudbg_error *cudbg_err)
270 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
273 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
274 struct cudbg_buffer *dbg_buff,
275 struct cudbg_error *cudbg_err)
277 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
280 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
281 struct cudbg_buffer *dbg_buff,
282 struct cudbg_error *cudbg_err)
284 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
287 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
288 struct cudbg_buffer *dbg_buff,
289 struct cudbg_error *cudbg_err)
291 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
294 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
295 struct cudbg_buffer *dbg_buff,
296 struct cudbg_error *cudbg_err)
298 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
301 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
302 struct cudbg_buffer *dbg_buff,
303 struct cudbg_error *cudbg_err)
305 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
308 u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
312 t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
313 QUENUMSELECT_V(qid));
314 value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
315 value = CIMQSIZE_G(value) * 64; /* size in number of words */
316 return value * sizeof(u32);
319 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
320 struct cudbg_buffer *dbg_buff,
321 struct cudbg_error *cudbg_err, int qid)
323 struct adapter *padap = pdbg_init->adap;
324 struct cudbg_buffer temp_buff = { 0 };
325 int no_of_read_words, rc = 0;
328 /* collect CIM OBQ */
329 qsize = cudbg_cim_obq_size(padap, qid);
330 rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
334 /* t4_read_cim_obq will return no. of read words or error */
335 no_of_read_words = t4_read_cim_obq(padap, qid,
336 (u32 *)temp_buff.data, qsize);
337 /* no_of_read_words is less than or equal to 0 means error */
338 if (no_of_read_words <= 0) {
339 if (!no_of_read_words)
340 rc = CUDBG_SYSTEM_ERROR;
342 rc = no_of_read_words;
343 cudbg_err->sys_err = rc;
344 cudbg_put_buff(&temp_buff, dbg_buff);
347 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
351 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
352 struct cudbg_buffer *dbg_buff,
353 struct cudbg_error *cudbg_err)
355 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
358 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
359 struct cudbg_buffer *dbg_buff,
360 struct cudbg_error *cudbg_err)
362 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
365 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
366 struct cudbg_buffer *dbg_buff,
367 struct cudbg_error *cudbg_err)
369 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
372 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
373 struct cudbg_buffer *dbg_buff,
374 struct cudbg_error *cudbg_err)
376 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
379 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
380 struct cudbg_buffer *dbg_buff,
381 struct cudbg_error *cudbg_err)
383 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
386 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
387 struct cudbg_buffer *dbg_buff,
388 struct cudbg_error *cudbg_err)
390 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
393 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
394 struct cudbg_buffer *dbg_buff,
395 struct cudbg_error *cudbg_err)
397 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
400 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
401 struct cudbg_buffer *dbg_buff,
402 struct cudbg_error *cudbg_err)
404 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
407 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
408 struct cudbg_buffer *dbg_buff, u8 mem_type,
409 unsigned long tot_len,
410 struct cudbg_error *cudbg_err)
412 unsigned long bytes, bytes_left, bytes_read = 0;
413 struct adapter *padap = pdbg_init->adap;
414 struct cudbg_buffer temp_buff = { 0 };
417 bytes_left = tot_len;
418 while (bytes_left > 0) {
419 bytes = min_t(unsigned long, bytes_left,
420 (unsigned long)CUDBG_CHUNK_SIZE);
421 rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
424 spin_lock(&padap->win0_lock);
425 rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
427 (__be32 *)temp_buff.data,
429 spin_unlock(&padap->win0_lock);
431 cudbg_err->sys_err = rc;
432 cudbg_put_buff(&temp_buff, dbg_buff);
437 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
442 static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
443 struct card_mem *mem_info)
445 struct adapter *padap = pdbg_init->adap;
448 value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
449 value = EDRAM0_SIZE_G(value);
450 mem_info->size_edc0 = (u16)value;
452 value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
453 value = EDRAM1_SIZE_G(value);
454 mem_info->size_edc1 = (u16)value;
456 value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
457 if (value & EDRAM0_ENABLE_F)
458 mem_info->mem_flag |= (1 << EDC0_FLAG);
459 if (value & EDRAM1_ENABLE_F)
460 mem_info->mem_flag |= (1 << EDC1_FLAG);
463 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
464 struct cudbg_error *cudbg_err)
466 struct adapter *padap = pdbg_init->adap;
469 if (is_fw_attached(pdbg_init)) {
470 /* Flush uP dcache before reading edcX/mcX */
471 rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
473 cudbg_err->sys_warn = rc;
477 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
478 struct cudbg_buffer *dbg_buff,
479 struct cudbg_error *cudbg_err,
482 struct card_mem mem_info = {0};
483 unsigned long flag, size;
486 cudbg_t4_fwcache(pdbg_init, cudbg_err);
487 cudbg_collect_mem_info(pdbg_init, &mem_info);
490 flag = (1 << EDC0_FLAG);
491 size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
494 flag = (1 << EDC1_FLAG);
495 size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
498 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
502 if (mem_info.mem_flag & flag) {
503 rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
508 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
515 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
516 struct cudbg_buffer *dbg_buff,
517 struct cudbg_error *cudbg_err)
519 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
523 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
524 struct cudbg_buffer *dbg_buff,
525 struct cudbg_error *cudbg_err)
527 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
531 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
532 struct cudbg_buffer *dbg_buff,
533 struct cudbg_error *cudbg_err)
535 struct adapter *padap = pdbg_init->adap;
536 struct cudbg_buffer temp_buff = { 0 };
539 rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
543 rc = t4_read_rss(padap, (u16 *)temp_buff.data);
545 cudbg_err->sys_err = rc;
546 cudbg_put_buff(&temp_buff, dbg_buff);
549 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
553 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
554 struct cudbg_buffer *dbg_buff,
555 struct cudbg_error *cudbg_err)
557 struct adapter *padap = pdbg_init->adap;
558 struct cudbg_buffer temp_buff = { 0 };
559 struct cudbg_rss_vf_conf *vfconf;
560 int vf, rc, vf_count;
562 vf_count = padap->params.arch.vfcount;
563 rc = cudbg_get_buff(dbg_buff,
564 vf_count * sizeof(struct cudbg_rss_vf_conf),
569 vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
570 for (vf = 0; vf < vf_count; vf++)
571 t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
572 &vfconf[vf].rss_vf_vfh, true);
573 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
577 int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
578 struct cudbg_buffer *dbg_buff,
579 struct cudbg_error *cudbg_err)
581 struct adapter *padap = pdbg_init->adap;
582 struct cudbg_buffer temp_buff = { 0 };
585 rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
589 t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
590 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
594 int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
595 struct cudbg_buffer *dbg_buff,
596 struct cudbg_error *cudbg_err)
598 struct adapter *padap = pdbg_init->adap;
599 struct cudbg_buffer temp_buff = { 0 };
600 struct cudbg_pm_stats *pm_stats_buff;
603 rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
608 pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
609 t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
610 t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
611 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
615 int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
616 struct cudbg_buffer *dbg_buff,
617 struct cudbg_error *cudbg_err)
619 struct adapter *padap = pdbg_init->adap;
620 struct cudbg_buffer temp_buff = { 0 };
621 struct cudbg_hw_sched *hw_sched_buff;
624 if (!padap->params.vpd.cclk)
625 return CUDBG_STATUS_CCLK_NOT_DEFINED;
627 rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
629 hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
630 hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
631 hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
632 t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
633 for (i = 0; i < NTX_SCHED; ++i)
634 t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
635 &hw_sched_buff->ipg[i], true);
636 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
640 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
641 struct cudbg_buffer *dbg_buff,
642 struct cudbg_error *cudbg_err)
644 struct adapter *padap = pdbg_init->adap;
645 struct cudbg_buffer temp_buff = { 0 };
646 struct ireg_buf *ch_tp_pio;
650 if (is_t5(padap->params.chip))
651 n = sizeof(t5_tp_pio_array) +
652 sizeof(t5_tp_tm_pio_array) +
653 sizeof(t5_tp_mib_index_array);
655 n = sizeof(t6_tp_pio_array) +
656 sizeof(t6_tp_tm_pio_array) +
657 sizeof(t6_tp_mib_index_array);
659 n = n / (IREG_NUM_ELEM * sizeof(u32));
660 size = sizeof(struct ireg_buf) * n;
661 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
665 ch_tp_pio = (struct ireg_buf *)temp_buff.data;
668 if (is_t5(padap->params.chip))
669 n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
670 else if (is_t6(padap->params.chip))
671 n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
673 for (i = 0; i < n; i++) {
674 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
675 u32 *buff = ch_tp_pio->outbuf;
677 if (is_t5(padap->params.chip)) {
678 tp_pio->ireg_addr = t5_tp_pio_array[i][0];
679 tp_pio->ireg_data = t5_tp_pio_array[i][1];
680 tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
681 tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
682 } else if (is_t6(padap->params.chip)) {
683 tp_pio->ireg_addr = t6_tp_pio_array[i][0];
684 tp_pio->ireg_data = t6_tp_pio_array[i][1];
685 tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
686 tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
688 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
689 tp_pio->ireg_local_offset, true);
694 if (is_t5(padap->params.chip))
695 n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
696 else if (is_t6(padap->params.chip))
697 n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
699 for (i = 0; i < n; i++) {
700 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
701 u32 *buff = ch_tp_pio->outbuf;
703 if (is_t5(padap->params.chip)) {
704 tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
705 tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
706 tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
707 tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
708 } else if (is_t6(padap->params.chip)) {
709 tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
710 tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
711 tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
712 tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
714 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
715 tp_pio->ireg_local_offset, true);
720 if (is_t5(padap->params.chip))
721 n = sizeof(t5_tp_mib_index_array) /
722 (IREG_NUM_ELEM * sizeof(u32));
723 else if (is_t6(padap->params.chip))
724 n = sizeof(t6_tp_mib_index_array) /
725 (IREG_NUM_ELEM * sizeof(u32));
727 for (i = 0; i < n ; i++) {
728 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
729 u32 *buff = ch_tp_pio->outbuf;
731 if (is_t5(padap->params.chip)) {
732 tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
733 tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
734 tp_pio->ireg_local_offset =
735 t5_tp_mib_index_array[i][2];
736 tp_pio->ireg_offset_range =
737 t5_tp_mib_index_array[i][3];
738 } else if (is_t6(padap->params.chip)) {
739 tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
740 tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
741 tp_pio->ireg_local_offset =
742 t6_tp_mib_index_array[i][2];
743 tp_pio->ireg_offset_range =
744 t6_tp_mib_index_array[i][3];
746 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
747 tp_pio->ireg_local_offset, true);
750 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
754 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
755 struct cudbg_buffer *dbg_buff,
756 struct cudbg_error *cudbg_err)
758 struct adapter *padap = pdbg_init->adap;
759 struct cudbg_buffer temp_buff = { 0 };
760 struct ireg_buf *ch_sge_dbg;
763 rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
767 ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
768 for (i = 0; i < 2; i++) {
769 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
770 u32 *buff = ch_sge_dbg->outbuf;
772 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
773 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
774 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
775 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
776 t4_read_indirect(padap,
780 sge_pio->ireg_offset_range,
781 sge_pio->ireg_local_offset);
784 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
788 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
789 struct cudbg_buffer *dbg_buff,
790 struct cudbg_error *cudbg_err)
792 struct adapter *padap = pdbg_init->adap;
793 struct cudbg_buffer temp_buff = { 0 };
794 struct cudbg_ulprx_la *ulprx_la_buff;
797 rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
802 ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
803 t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
804 ulprx_la_buff->size = ULPRX_LA_SIZE;
805 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
809 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
810 struct cudbg_buffer *dbg_buff,
811 struct cudbg_error *cudbg_err)
813 struct adapter *padap = pdbg_init->adap;
814 struct cudbg_buffer temp_buff = { 0 };
815 struct cudbg_tp_la *tp_la_buff;
818 size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
819 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
823 tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
824 tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
825 t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
826 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
830 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
831 struct cudbg_buffer *dbg_buff,
832 struct cudbg_error *cudbg_err)
834 struct cudbg_cim_pif_la *cim_pif_la_buff;
835 struct adapter *padap = pdbg_init->adap;
836 struct cudbg_buffer temp_buff = { 0 };
839 size = sizeof(struct cudbg_cim_pif_la) +
840 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
841 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
845 cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
846 cim_pif_la_buff->size = CIM_PIFLA_SIZE;
847 t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
848 (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
850 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
854 int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
855 struct cudbg_buffer *dbg_buff,
856 struct cudbg_error *cudbg_err)
858 struct adapter *padap = pdbg_init->adap;
859 struct cudbg_buffer temp_buff = { 0 };
860 struct cudbg_clk_info *clk_info_buff;
864 if (!padap->params.vpd.cclk)
865 return CUDBG_STATUS_CCLK_NOT_DEFINED;
867 rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
872 clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
873 clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */
874 clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
875 clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
876 clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
877 tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
879 clk_info_buff->dack_timer =
880 (clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
881 t4_read_reg(padap, TP_DACK_TIMER_A);
882 clk_info_buff->retransmit_min =
883 tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
884 clk_info_buff->retransmit_max =
885 tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
886 clk_info_buff->persist_timer_min =
887 tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
888 clk_info_buff->persist_timer_max =
889 tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
890 clk_info_buff->keepalive_idle_timer =
891 tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
892 clk_info_buff->keepalive_interval =
893 tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
894 clk_info_buff->initial_srtt =
895 tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
896 clk_info_buff->finwait2_timer =
897 tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
899 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
903 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
904 struct cudbg_buffer *dbg_buff,
905 struct cudbg_error *cudbg_err)
907 struct adapter *padap = pdbg_init->adap;
908 struct cudbg_buffer temp_buff = { 0 };
909 struct ireg_buf *ch_pcie;
913 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
914 size = sizeof(struct ireg_buf) * n * 2;
915 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
919 ch_pcie = (struct ireg_buf *)temp_buff.data;
921 for (i = 0; i < n; i++) {
922 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
923 u32 *buff = ch_pcie->outbuf;
925 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
926 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
927 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
928 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
929 t4_read_indirect(padap,
933 pcie_pio->ireg_offset_range,
934 pcie_pio->ireg_local_offset);
939 n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
940 for (i = 0; i < n; i++) {
941 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
942 u32 *buff = ch_pcie->outbuf;
944 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
945 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
946 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
947 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
948 t4_read_indirect(padap,
952 pcie_pio->ireg_offset_range,
953 pcie_pio->ireg_local_offset);
956 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
960 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
961 struct cudbg_buffer *dbg_buff,
962 struct cudbg_error *cudbg_err)
964 struct adapter *padap = pdbg_init->adap;
965 struct cudbg_buffer temp_buff = { 0 };
966 struct ireg_buf *ch_pm;
970 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
971 size = sizeof(struct ireg_buf) * n * 2;
972 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
976 ch_pm = (struct ireg_buf *)temp_buff.data;
978 for (i = 0; i < n; i++) {
979 struct ireg_field *pm_pio = &ch_pm->tp_pio;
980 u32 *buff = ch_pm->outbuf;
982 pm_pio->ireg_addr = t5_pm_rx_array[i][0];
983 pm_pio->ireg_data = t5_pm_rx_array[i][1];
984 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
985 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
986 t4_read_indirect(padap,
990 pm_pio->ireg_offset_range,
991 pm_pio->ireg_local_offset);
996 n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
997 for (i = 0; i < n; i++) {
998 struct ireg_field *pm_pio = &ch_pm->tp_pio;
999 u32 *buff = ch_pm->outbuf;
1001 pm_pio->ireg_addr = t5_pm_tx_array[i][0];
1002 pm_pio->ireg_data = t5_pm_tx_array[i][1];
1003 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
1004 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
1005 t4_read_indirect(padap,
1009 pm_pio->ireg_offset_range,
1010 pm_pio->ireg_local_offset);
1013 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1017 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
1018 struct cudbg_buffer *dbg_buff,
1019 struct cudbg_error *cudbg_err)
1021 struct adapter *padap = pdbg_init->adap;
1022 struct cudbg_tid_info_region_rev1 *tid1;
1023 struct cudbg_buffer temp_buff = { 0 };
1024 struct cudbg_tid_info_region *tid;
1025 u32 para[2], val[2];
1028 rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
1033 tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
1035 tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1036 tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
1037 tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
1038 sizeof(struct cudbg_ver_hdr);
1040 #define FW_PARAM_PFVF_A(param) \
1041 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1042 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1043 FW_PARAMS_PARAM_Y_V(0) | \
1044 FW_PARAMS_PARAM_Z_V(0))
1046 para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
1047 para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
1048 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
1050 cudbg_err->sys_err = rc;
1051 cudbg_put_buff(&temp_buff, dbg_buff);
1054 tid->uotid_base = val[0];
1055 tid->nuotids = val[1] - val[0] + 1;
1057 if (is_t5(padap->params.chip)) {
1058 tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
1059 } else if (is_t6(padap->params.chip)) {
1061 t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
1062 tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
1064 para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
1065 para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
1066 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
1069 cudbg_err->sys_err = rc;
1070 cudbg_put_buff(&temp_buff, dbg_buff);
1073 tid->hpftid_base = val[0];
1074 tid->nhpftids = val[1] - val[0] + 1;
1077 tid->ntids = padap->tids.ntids;
1078 tid->nstids = padap->tids.nstids;
1079 tid->stid_base = padap->tids.stid_base;
1080 tid->hash_base = padap->tids.hash_base;
1082 tid->natids = padap->tids.natids;
1083 tid->nftids = padap->tids.nftids;
1084 tid->ftid_base = padap->tids.ftid_base;
1085 tid->aftid_base = padap->tids.aftid_base;
1086 tid->aftid_end = padap->tids.aftid_end;
1088 tid->sftid_base = padap->tids.sftid_base;
1089 tid->nsftids = padap->tids.nsftids;
1091 tid->flags = padap->flags;
1092 tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
1093 tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
1094 tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
1096 #undef FW_PARAM_PFVF_A
1098 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1102 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
1105 y = (__force u64)cpu_to_be64(y);
1106 memcpy(addr, (char *)&y + 2, ETH_ALEN);
1109 static void cudbg_mps_rpl_backdoor(struct adapter *padap,
1110 struct fw_ldst_mps_rplc *mps_rplc)
1112 if (is_t5(padap->params.chip)) {
1113 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1114 MPS_VF_RPLCT_MAP3_A));
1115 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1116 MPS_VF_RPLCT_MAP2_A));
1117 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1118 MPS_VF_RPLCT_MAP1_A));
1119 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1120 MPS_VF_RPLCT_MAP0_A));
1122 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1123 MPS_VF_RPLCT_MAP7_A));
1124 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1125 MPS_VF_RPLCT_MAP6_A));
1126 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1127 MPS_VF_RPLCT_MAP5_A));
1128 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1129 MPS_VF_RPLCT_MAP4_A));
1131 mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
1132 mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
1133 mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
1134 mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
1137 static int cudbg_collect_tcam_index(struct adapter *padap,
1138 struct cudbg_mps_tcam *tcam, u32 idx)
1140 u64 tcamy, tcamx, val;
1144 if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
1145 /* CtlReqID - 1: use Host Driver Requester ID
1146 * CtlCmdType - 0: Read, 1: Write
1147 * CtlTcamSel - 0: TCAM0, 1: TCAM1
1148 * CtlXYBitSel- 0: Y bit, 1: X bit
1152 ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
1154 ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
1156 ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
1158 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1159 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1160 tcamy = DMACH_G(val) << 32;
1161 tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1162 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1163 tcam->lookup_type = DATALKPTYPE_G(data2);
1165 /* 0 - Outer header, 1 - Inner header
1166 * [71:48] bit locations are overloaded for
1167 * outer vs. inner lookup types.
1169 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1170 /* Inner header VNI */
1171 tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1172 tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
1173 tcam->dip_hit = data2 & DATADIPHIT_F;
1175 tcam->vlan_vld = data2 & DATAVIDH2_F;
1176 tcam->ivlan = VIDL_G(val);
1179 tcam->port_num = DATAPORTNUM_G(data2);
1181 /* Read tcamx. Change the control param */
1182 ctl |= CTLXYBITSEL_V(1);
1183 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1184 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1185 tcamx = DMACH_G(val) << 32;
1186 tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1187 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1188 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1189 /* Inner header VNI mask */
1190 tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1191 tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
1194 tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
1195 tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
1198 /* If no entry, return */
1202 tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
1203 tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
1205 if (is_t5(padap->params.chip))
1206 tcam->repli = (tcam->cls_lo & REPLICATE_F);
1207 else if (is_t6(padap->params.chip))
1208 tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
1211 struct fw_ldst_cmd ldst_cmd;
1212 struct fw_ldst_mps_rplc mps_rplc;
1214 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
1215 ldst_cmd.op_to_addrspace =
1216 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
1217 FW_CMD_REQUEST_F | FW_CMD_READ_F |
1218 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
1219 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
1220 ldst_cmd.u.mps.rplc.fid_idx =
1221 htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
1222 FW_LDST_CMD_IDX_V(idx));
1224 rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1227 cudbg_mps_rpl_backdoor(padap, &mps_rplc);
1229 mps_rplc = ldst_cmd.u.mps.rplc;
1231 tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
1232 tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
1233 tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
1234 tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
1235 if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
1236 tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
1237 tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
1238 tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
1239 tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
1242 cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
1244 tcam->rplc_size = padap->params.arch.mps_rplc_size;
1248 int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
1249 struct cudbg_buffer *dbg_buff,
1250 struct cudbg_error *cudbg_err)
1252 struct adapter *padap = pdbg_init->adap;
1253 struct cudbg_buffer temp_buff = { 0 };
1254 u32 size = 0, i, n, total_size = 0;
1255 struct cudbg_mps_tcam *tcam;
1258 n = padap->params.arch.mps_tcam_size;
1259 size = sizeof(struct cudbg_mps_tcam) * n;
1260 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1264 tcam = (struct cudbg_mps_tcam *)temp_buff.data;
1265 for (i = 0; i < n; i++) {
1266 rc = cudbg_collect_tcam_index(padap, tcam, i);
1268 cudbg_err->sys_err = rc;
1269 cudbg_put_buff(&temp_buff, dbg_buff);
1272 total_size += sizeof(struct cudbg_mps_tcam);
1277 rc = CUDBG_SYSTEM_ERROR;
1278 cudbg_err->sys_err = rc;
1279 cudbg_put_buff(&temp_buff, dbg_buff);
1282 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1286 int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
1287 struct cudbg_buffer *dbg_buff,
1288 struct cudbg_error *cudbg_err)
1290 struct adapter *padap = pdbg_init->adap;
1291 struct cudbg_buffer temp_buff = { 0 };
1292 struct cudbg_vpd_data *vpd_data;
1295 rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
1300 vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
1301 memcpy(vpd_data->sn, padap->params.vpd.sn, SERNUM_LEN + 1);
1302 memcpy(vpd_data->bn, padap->params.vpd.pn, PN_LEN + 1);
1303 memcpy(vpd_data->na, padap->params.vpd.na, MACADDR_LEN + 1);
1304 memcpy(vpd_data->mn, padap->params.vpd.id, ID_LEN + 1);
1305 vpd_data->scfg_vers = padap->params.scfg_vers;
1306 vpd_data->vpd_vers = padap->params.vpd_vers;
1307 vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(padap->params.fw_vers);
1308 vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(padap->params.fw_vers);
1309 vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(padap->params.fw_vers);
1310 vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(padap->params.fw_vers);
1311 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1315 int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
1316 struct cudbg_buffer *dbg_buff,
1317 struct cudbg_error *cudbg_err)
1319 struct adapter *padap = pdbg_init->adap;
1320 struct cudbg_buffer temp_buff = { 0 };
1324 size = sizeof(u16) * NMTUS * NCCTRL_WIN;
1325 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1329 t4_read_cong_tbl(padap, (void *)temp_buff.data);
1330 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1334 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
1335 struct cudbg_buffer *dbg_buff,
1336 struct cudbg_error *cudbg_err)
1338 struct adapter *padap = pdbg_init->adap;
1339 struct cudbg_buffer temp_buff = { 0 };
1340 struct ireg_buf *ma_indr;
1344 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1345 return CUDBG_STATUS_ENTITY_NOT_FOUND;
1347 n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1348 size = sizeof(struct ireg_buf) * n * 2;
1349 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1353 ma_indr = (struct ireg_buf *)temp_buff.data;
1354 for (i = 0; i < n; i++) {
1355 struct ireg_field *ma_fli = &ma_indr->tp_pio;
1356 u32 *buff = ma_indr->outbuf;
1358 ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
1359 ma_fli->ireg_data = t6_ma_ireg_array[i][1];
1360 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
1361 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
1362 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
1363 buff, ma_fli->ireg_offset_range,
1364 ma_fli->ireg_local_offset);
1368 n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
1369 for (i = 0; i < n; i++) {
1370 struct ireg_field *ma_fli = &ma_indr->tp_pio;
1371 u32 *buff = ma_indr->outbuf;
1373 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
1374 ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
1375 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
1376 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
1377 t4_read_indirect(padap, ma_fli->ireg_addr,
1378 ma_fli->ireg_data, buff, 1,
1379 ma_fli->ireg_local_offset);
1381 ma_fli->ireg_local_offset += 0x20;
1385 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1389 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
1390 struct cudbg_buffer *dbg_buff,
1391 struct cudbg_error *cudbg_err)
1393 struct adapter *padap = pdbg_init->adap;
1394 struct cudbg_buffer temp_buff = { 0 };
1395 struct cudbg_ulptx_la *ulptx_la_buff;
1399 rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
1404 ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
1405 for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
1406 ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
1407 ULP_TX_LA_RDPTR_0_A +
1409 ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
1410 ULP_TX_LA_WRPTR_0_A +
1412 ulptx_la_buff->rddata[i] = t4_read_reg(padap,
1413 ULP_TX_LA_RDDATA_0_A +
1415 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
1416 ulptx_la_buff->rd_data[i][j] =
1418 ULP_TX_LA_RDDATA_0_A + 0x10 * i);
1420 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1424 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
1425 struct cudbg_buffer *dbg_buff,
1426 struct cudbg_error *cudbg_err)
1428 struct adapter *padap = pdbg_init->adap;
1429 struct cudbg_buffer temp_buff = { 0 };
1430 struct ireg_buf *up_cim;
1434 n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
1435 size = sizeof(struct ireg_buf) * n;
1436 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1440 up_cim = (struct ireg_buf *)temp_buff.data;
1441 for (i = 0; i < n; i++) {
1442 struct ireg_field *up_cim_reg = &up_cim->tp_pio;
1443 u32 *buff = up_cim->outbuf;
1445 if (is_t5(padap->params.chip)) {
1446 up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
1447 up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
1448 up_cim_reg->ireg_local_offset =
1449 t5_up_cim_reg_array[i][2];
1450 up_cim_reg->ireg_offset_range =
1451 t5_up_cim_reg_array[i][3];
1452 } else if (is_t6(padap->params.chip)) {
1453 up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
1454 up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
1455 up_cim_reg->ireg_local_offset =
1456 t6_up_cim_reg_array[i][2];
1457 up_cim_reg->ireg_offset_range =
1458 t6_up_cim_reg_array[i][3];
1461 rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
1462 up_cim_reg->ireg_offset_range, buff);
1464 cudbg_put_buff(&temp_buff, dbg_buff);
1469 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1473 int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
1474 struct cudbg_buffer *dbg_buff,
1475 struct cudbg_error *cudbg_err)
1477 struct adapter *padap = pdbg_init->adap;
1478 struct cudbg_buffer temp_buff = { 0 };
1479 struct cudbg_pbt_tables *pbt;
1483 rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
1488 pbt = (struct cudbg_pbt_tables *)temp_buff.data;
1489 /* PBT dynamic entries */
1490 addr = CUDBG_CHAC_PBT_ADDR;
1491 for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
1492 rc = t4_cim_read(padap, addr + (i * 4), 1,
1493 &pbt->pbt_dynamic[i]);
1495 cudbg_err->sys_err = rc;
1496 cudbg_put_buff(&temp_buff, dbg_buff);
1501 /* PBT static entries */
1502 /* static entries start when bit 6 is set */
1503 addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
1504 for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
1505 rc = t4_cim_read(padap, addr + (i * 4), 1,
1506 &pbt->pbt_static[i]);
1508 cudbg_err->sys_err = rc;
1509 cudbg_put_buff(&temp_buff, dbg_buff);
1515 addr = CUDBG_CHAC_PBT_LRF;
1516 for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
1517 rc = t4_cim_read(padap, addr + (i * 4), 1,
1518 &pbt->lrf_table[i]);
1520 cudbg_err->sys_err = rc;
1521 cudbg_put_buff(&temp_buff, dbg_buff);
1526 /* PBT data entries */
1527 addr = CUDBG_CHAC_PBT_DATA;
1528 for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
1529 rc = t4_cim_read(padap, addr + (i * 4), 1,
1532 cudbg_err->sys_err = rc;
1533 cudbg_put_buff(&temp_buff, dbg_buff);
1537 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1541 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
1542 struct cudbg_buffer *dbg_buff,
1543 struct cudbg_error *cudbg_err)
1545 struct adapter *padap = pdbg_init->adap;
1546 struct cudbg_mbox_log *mboxlog = NULL;
1547 struct cudbg_buffer temp_buff = { 0 };
1548 struct mbox_cmd_log *log = NULL;
1549 struct mbox_cmd *entry;
1550 unsigned int entry_idx;
1556 log = padap->mbox_log;
1557 mbox_cmds = padap->mbox_log->size;
1558 size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
1559 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1563 mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
1564 for (k = 0; k < mbox_cmds; k++) {
1565 entry_idx = log->cursor + k;
1566 if (entry_idx >= log->size)
1567 entry_idx -= log->size;
1569 entry = mbox_cmd_log_entry(log, entry_idx);
1570 /* skip over unused entries */
1571 if (entry->timestamp == 0)
1574 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
1575 for (i = 0; i < MBOX_LEN / 8; i++) {
1576 flit = entry->cmd[i];
1577 mboxlog->hi[i] = (u32)(flit >> 32);
1578 mboxlog->lo[i] = (u32)flit;
1582 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1586 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
1587 struct cudbg_buffer *dbg_buff,
1588 struct cudbg_error *cudbg_err)
1590 struct adapter *padap = pdbg_init->adap;
1591 struct cudbg_buffer temp_buff = { 0 };
1592 struct ireg_buf *hma_indr;
1596 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1597 return CUDBG_STATUS_ENTITY_NOT_FOUND;
1599 n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1600 size = sizeof(struct ireg_buf) * n;
1601 rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1605 hma_indr = (struct ireg_buf *)temp_buff.data;
1606 for (i = 0; i < n; i++) {
1607 struct ireg_field *hma_fli = &hma_indr->tp_pio;
1608 u32 *buff = hma_indr->outbuf;
1610 hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
1611 hma_fli->ireg_data = t6_hma_ireg_array[i][1];
1612 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
1613 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
1614 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
1615 buff, hma_fli->ireg_offset_range,
1616 hma_fli->ireg_local_offset);
1619 cudbg_write_and_release_buff(&temp_buff, dbg_buff);