cxgb4: collect hardware misc dumps
[linux-2.6-microblaze.git] / drivers / net / ethernet / chelsio / cxgb4 / cudbg_lib.c
1 /*
2  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
3  *
4  *  This program is free software; you can redistribute it and/or modify it
5  *  under the terms and conditions of the GNU General Public License,
6  *  version 2, as published by the Free Software Foundation.
7  *
8  *  This program is distributed in the hope it will be useful, but WITHOUT
9  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  *  more details.
12  *
13  *  The full GNU General Public License is included in this distribution in
14  *  the file called "COPYING".
15  *
16  */
17
18 #include "t4_regs.h"
19 #include "cxgb4.h"
20 #include "cudbg_if.h"
21 #include "cudbg_lib_common.h"
22 #include "cudbg_lib.h"
23 #include "cudbg_entity.h"
24
25 static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
26                                          struct cudbg_buffer *dbg_buff)
27 {
28         cudbg_update_buff(pin_buff, dbg_buff);
29         cudbg_put_buff(pin_buff, dbg_buff);
30 }
31
32 static int is_fw_attached(struct cudbg_init *pdbg_init)
33 {
34         struct adapter *padap = pdbg_init->adap;
35
36         if (!(padap->flags & FW_OK) || padap->use_bd)
37                 return 0;
38
39         return 1;
40 }
41
42 /* This function will add additional padding bytes into debug_buffer to make it
43  * 4 byte aligned.
44  */
45 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
46                               struct cudbg_entity_hdr *entity_hdr)
47 {
48         u8 zero_buf[4] = {0};
49         u8 padding, remain;
50
51         remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
52         padding = 4 - remain;
53         if (remain) {
54                 memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
55                        padding);
56                 dbg_buff->offset += padding;
57                 entity_hdr->num_pad = padding;
58         }
59         entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
60 }
61
62 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
63 {
64         struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
65
66         return (struct cudbg_entity_hdr *)
67                ((char *)outbuf + cudbg_hdr->hdr_len +
68                 (sizeof(struct cudbg_entity_hdr) * (i - 1)));
69 }
70
71 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
72                            struct cudbg_buffer *dbg_buff,
73                            struct cudbg_error *cudbg_err)
74 {
75         struct adapter *padap = pdbg_init->adap;
76         struct cudbg_buffer temp_buff = { 0 };
77         u32 buf_size = 0;
78         int rc = 0;
79
80         if (is_t4(padap->params.chip))
81                 buf_size = T4_REGMAP_SIZE;
82         else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
83                 buf_size = T5_REGMAP_SIZE;
84
85         rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
86         if (rc)
87                 return rc;
88         t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
89         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
90         return rc;
91 }
92
93 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
94                             struct cudbg_buffer *dbg_buff,
95                             struct cudbg_error *cudbg_err)
96 {
97         struct adapter *padap = pdbg_init->adap;
98         struct cudbg_buffer temp_buff = { 0 };
99         struct devlog_params *dparams;
100         int rc = 0;
101
102         rc = t4_init_devlog_params(padap);
103         if (rc < 0) {
104                 cudbg_err->sys_err = rc;
105                 return rc;
106         }
107
108         dparams = &padap->params.devlog;
109         rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
110         if (rc)
111                 return rc;
112
113         /* Collect FW devlog */
114         if (dparams->start != 0) {
115                 spin_lock(&padap->win0_lock);
116                 rc = t4_memory_rw(padap, padap->params.drv_memwin,
117                                   dparams->memtype, dparams->start,
118                                   dparams->size,
119                                   (__be32 *)(char *)temp_buff.data,
120                                   1);
121                 spin_unlock(&padap->win0_lock);
122                 if (rc) {
123                         cudbg_err->sys_err = rc;
124                         cudbg_put_buff(&temp_buff, dbg_buff);
125                         return rc;
126                 }
127         }
128         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
129         return rc;
130 }
131
132 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
133                          struct cudbg_buffer *dbg_buff,
134                          struct cudbg_error *cudbg_err)
135 {
136         struct adapter *padap = pdbg_init->adap;
137         struct cudbg_buffer temp_buff = { 0 };
138         int size, rc;
139         u32 cfg = 0;
140
141         if (is_t6(padap->params.chip)) {
142                 size = padap->params.cim_la_size / 10 + 1;
143                 size *= 11 * sizeof(u32);
144         } else {
145                 size = padap->params.cim_la_size / 8;
146                 size *= 8 * sizeof(u32);
147         }
148
149         size += sizeof(cfg);
150         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
151         if (rc)
152                 return rc;
153
154         rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
155         if (rc) {
156                 cudbg_err->sys_err = rc;
157                 cudbg_put_buff(&temp_buff, dbg_buff);
158                 return rc;
159         }
160
161         memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
162         rc = t4_cim_read_la(padap,
163                             (u32 *)((char *)temp_buff.data + sizeof(cfg)),
164                             NULL);
165         if (rc < 0) {
166                 cudbg_err->sys_err = rc;
167                 cudbg_put_buff(&temp_buff, dbg_buff);
168                 return rc;
169         }
170         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
171         return rc;
172 }
173
174 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
175                             struct cudbg_buffer *dbg_buff,
176                             struct cudbg_error *cudbg_err)
177 {
178         struct adapter *padap = pdbg_init->adap;
179         struct cudbg_buffer temp_buff = { 0 };
180         int size, rc;
181
182         size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
183         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
184         if (rc)
185                 return rc;
186
187         t4_cim_read_ma_la(padap,
188                           (u32 *)temp_buff.data,
189                           (u32 *)((char *)temp_buff.data +
190                                   5 * CIM_MALA_SIZE));
191         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
192         return rc;
193 }
194
195 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
196                            struct cudbg_buffer *dbg_buff,
197                            struct cudbg_error *cudbg_err)
198 {
199         struct adapter *padap = pdbg_init->adap;
200         struct cudbg_buffer temp_buff = { 0 };
201         struct cudbg_cim_qcfg *cim_qcfg_data;
202         int rc;
203
204         rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
205                             &temp_buff);
206         if (rc)
207                 return rc;
208
209         cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
210         cim_qcfg_data->chip = padap->params.chip;
211         rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
212                          ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
213         if (rc) {
214                 cudbg_err->sys_err = rc;
215                 cudbg_put_buff(&temp_buff, dbg_buff);
216                 return rc;
217         }
218
219         rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
220                          ARRAY_SIZE(cim_qcfg_data->obq_wr),
221                          cim_qcfg_data->obq_wr);
222         if (rc) {
223                 cudbg_err->sys_err = rc;
224                 cudbg_put_buff(&temp_buff, dbg_buff);
225                 return rc;
226         }
227
228         t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
229                          cim_qcfg_data->thres);
230         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
231         return rc;
232 }
233
234 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
235                               struct cudbg_buffer *dbg_buff,
236                               struct cudbg_error *cudbg_err, int qid)
237 {
238         struct adapter *padap = pdbg_init->adap;
239         struct cudbg_buffer temp_buff = { 0 };
240         int no_of_read_words, rc = 0;
241         u32 qsize;
242
243         /* collect CIM IBQ */
244         qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
245         rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
246         if (rc)
247                 return rc;
248
249         /* t4_read_cim_ibq will return no. of read words or error */
250         no_of_read_words = t4_read_cim_ibq(padap, qid,
251                                            (u32 *)temp_buff.data, qsize);
252         /* no_of_read_words is less than or equal to 0 means error */
253         if (no_of_read_words <= 0) {
254                 if (!no_of_read_words)
255                         rc = CUDBG_SYSTEM_ERROR;
256                 else
257                         rc = no_of_read_words;
258                 cudbg_err->sys_err = rc;
259                 cudbg_put_buff(&temp_buff, dbg_buff);
260                 return rc;
261         }
262         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
263         return rc;
264 }
265
266 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
267                               struct cudbg_buffer *dbg_buff,
268                               struct cudbg_error *cudbg_err)
269 {
270         return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
271 }
272
273 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
274                               struct cudbg_buffer *dbg_buff,
275                               struct cudbg_error *cudbg_err)
276 {
277         return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
278 }
279
280 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
281                               struct cudbg_buffer *dbg_buff,
282                               struct cudbg_error *cudbg_err)
283 {
284         return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
285 }
286
287 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
288                                struct cudbg_buffer *dbg_buff,
289                                struct cudbg_error *cudbg_err)
290 {
291         return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
292 }
293
294 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
295                                struct cudbg_buffer *dbg_buff,
296                                struct cudbg_error *cudbg_err)
297 {
298         return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
299 }
300
301 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
302                                struct cudbg_buffer *dbg_buff,
303                                struct cudbg_error *cudbg_err)
304 {
305         return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
306 }
307
308 u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
309 {
310         u32 value;
311
312         t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
313                      QUENUMSELECT_V(qid));
314         value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
315         value = CIMQSIZE_G(value) * 64; /* size in number of words */
316         return value * sizeof(u32);
317 }
318
319 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
320                               struct cudbg_buffer *dbg_buff,
321                               struct cudbg_error *cudbg_err, int qid)
322 {
323         struct adapter *padap = pdbg_init->adap;
324         struct cudbg_buffer temp_buff = { 0 };
325         int no_of_read_words, rc = 0;
326         u32 qsize;
327
328         /* collect CIM OBQ */
329         qsize =  cudbg_cim_obq_size(padap, qid);
330         rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
331         if (rc)
332                 return rc;
333
334         /* t4_read_cim_obq will return no. of read words or error */
335         no_of_read_words = t4_read_cim_obq(padap, qid,
336                                            (u32 *)temp_buff.data, qsize);
337         /* no_of_read_words is less than or equal to 0 means error */
338         if (no_of_read_words <= 0) {
339                 if (!no_of_read_words)
340                         rc = CUDBG_SYSTEM_ERROR;
341                 else
342                         rc = no_of_read_words;
343                 cudbg_err->sys_err = rc;
344                 cudbg_put_buff(&temp_buff, dbg_buff);
345                 return rc;
346         }
347         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
348         return rc;
349 }
350
351 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
352                                struct cudbg_buffer *dbg_buff,
353                                struct cudbg_error *cudbg_err)
354 {
355         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
356 }
357
358 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
359                                struct cudbg_buffer *dbg_buff,
360                                struct cudbg_error *cudbg_err)
361 {
362         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
363 }
364
365 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
366                                struct cudbg_buffer *dbg_buff,
367                                struct cudbg_error *cudbg_err)
368 {
369         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
370 }
371
372 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
373                                struct cudbg_buffer *dbg_buff,
374                                struct cudbg_error *cudbg_err)
375 {
376         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
377 }
378
379 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
380                               struct cudbg_buffer *dbg_buff,
381                               struct cudbg_error *cudbg_err)
382 {
383         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
384 }
385
386 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
387                                struct cudbg_buffer *dbg_buff,
388                                struct cudbg_error *cudbg_err)
389 {
390         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
391 }
392
393 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
394                                 struct cudbg_buffer *dbg_buff,
395                                 struct cudbg_error *cudbg_err)
396 {
397         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
398 }
399
400 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
401                                 struct cudbg_buffer *dbg_buff,
402                                 struct cudbg_error *cudbg_err)
403 {
404         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
405 }
406
407 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
408                              struct cudbg_buffer *dbg_buff, u8 mem_type,
409                              unsigned long tot_len,
410                              struct cudbg_error *cudbg_err)
411 {
412         unsigned long bytes, bytes_left, bytes_read = 0;
413         struct adapter *padap = pdbg_init->adap;
414         struct cudbg_buffer temp_buff = { 0 };
415         int rc = 0;
416
417         bytes_left = tot_len;
418         while (bytes_left > 0) {
419                 bytes = min_t(unsigned long, bytes_left,
420                               (unsigned long)CUDBG_CHUNK_SIZE);
421                 rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
422                 if (rc)
423                         return rc;
424                 spin_lock(&padap->win0_lock);
425                 rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
426                                   bytes_read, bytes,
427                                   (__be32 *)temp_buff.data,
428                                   1);
429                 spin_unlock(&padap->win0_lock);
430                 if (rc) {
431                         cudbg_err->sys_err = rc;
432                         cudbg_put_buff(&temp_buff, dbg_buff);
433                         return rc;
434                 }
435                 bytes_left -= bytes;
436                 bytes_read += bytes;
437                 cudbg_write_and_release_buff(&temp_buff, dbg_buff);
438         }
439         return rc;
440 }
441
442 static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
443                                    struct card_mem *mem_info)
444 {
445         struct adapter *padap = pdbg_init->adap;
446         u32 value;
447
448         value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
449         value = EDRAM0_SIZE_G(value);
450         mem_info->size_edc0 = (u16)value;
451
452         value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
453         value = EDRAM1_SIZE_G(value);
454         mem_info->size_edc1 = (u16)value;
455
456         value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
457         if (value & EDRAM0_ENABLE_F)
458                 mem_info->mem_flag |= (1 << EDC0_FLAG);
459         if (value & EDRAM1_ENABLE_F)
460                 mem_info->mem_flag |= (1 << EDC1_FLAG);
461 }
462
463 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
464                              struct cudbg_error *cudbg_err)
465 {
466         struct adapter *padap = pdbg_init->adap;
467         int rc;
468
469         if (is_fw_attached(pdbg_init)) {
470                 /* Flush uP dcache before reading edcX/mcX  */
471                 rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
472                 if (rc)
473                         cudbg_err->sys_warn = rc;
474         }
475 }
476
477 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
478                                     struct cudbg_buffer *dbg_buff,
479                                     struct cudbg_error *cudbg_err,
480                                     u8 mem_type)
481 {
482         struct card_mem mem_info = {0};
483         unsigned long flag, size;
484         int rc;
485
486         cudbg_t4_fwcache(pdbg_init, cudbg_err);
487         cudbg_collect_mem_info(pdbg_init, &mem_info);
488         switch (mem_type) {
489         case MEM_EDC0:
490                 flag = (1 << EDC0_FLAG);
491                 size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
492                 break;
493         case MEM_EDC1:
494                 flag = (1 << EDC1_FLAG);
495                 size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
496                 break;
497         default:
498                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
499                 goto err;
500         }
501
502         if (mem_info.mem_flag & flag) {
503                 rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
504                                        size, cudbg_err);
505                 if (rc)
506                         goto err;
507         } else {
508                 rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
509                 goto err;
510         }
511 err:
512         return rc;
513 }
514
515 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
516                                struct cudbg_buffer *dbg_buff,
517                                struct cudbg_error *cudbg_err)
518 {
519         return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
520                                         MEM_EDC0);
521 }
522
523 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
524                                struct cudbg_buffer *dbg_buff,
525                                struct cudbg_error *cudbg_err)
526 {
527         return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
528                                         MEM_EDC1);
529 }
530
531 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
532                       struct cudbg_buffer *dbg_buff,
533                       struct cudbg_error *cudbg_err)
534 {
535         struct adapter *padap = pdbg_init->adap;
536         struct cudbg_buffer temp_buff = { 0 };
537         int rc;
538
539         rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
540         if (rc)
541                 return rc;
542
543         rc = t4_read_rss(padap, (u16 *)temp_buff.data);
544         if (rc) {
545                 cudbg_err->sys_err = rc;
546                 cudbg_put_buff(&temp_buff, dbg_buff);
547                 return rc;
548         }
549         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
550         return rc;
551 }
552
553 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
554                                 struct cudbg_buffer *dbg_buff,
555                                 struct cudbg_error *cudbg_err)
556 {
557         struct adapter *padap = pdbg_init->adap;
558         struct cudbg_buffer temp_buff = { 0 };
559         struct cudbg_rss_vf_conf *vfconf;
560         int vf, rc, vf_count;
561
562         vf_count = padap->params.arch.vfcount;
563         rc = cudbg_get_buff(dbg_buff,
564                             vf_count * sizeof(struct cudbg_rss_vf_conf),
565                             &temp_buff);
566         if (rc)
567                 return rc;
568
569         vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
570         for (vf = 0; vf < vf_count; vf++)
571                 t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
572                                       &vfconf[vf].rss_vf_vfh, true);
573         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
574         return rc;
575 }
576
577 int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
578                            struct cudbg_buffer *dbg_buff,
579                            struct cudbg_error *cudbg_err)
580 {
581         struct adapter *padap = pdbg_init->adap;
582         struct cudbg_buffer temp_buff = { 0 };
583         int rc;
584
585         rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
586         if (rc)
587                 return rc;
588
589         t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
590         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
591         return rc;
592 }
593
594 int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
595                            struct cudbg_buffer *dbg_buff,
596                            struct cudbg_error *cudbg_err)
597 {
598         struct adapter *padap = pdbg_init->adap;
599         struct cudbg_buffer temp_buff = { 0 };
600         struct cudbg_pm_stats *pm_stats_buff;
601         int rc;
602
603         rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
604                             &temp_buff);
605         if (rc)
606                 return rc;
607
608         pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
609         t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
610         t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
611         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
612         return rc;
613 }
614
615 int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
616                            struct cudbg_buffer *dbg_buff,
617                            struct cudbg_error *cudbg_err)
618 {
619         struct adapter *padap = pdbg_init->adap;
620         struct cudbg_buffer temp_buff = { 0 };
621         struct cudbg_hw_sched *hw_sched_buff;
622         int i, rc = 0;
623
624         if (!padap->params.vpd.cclk)
625                 return CUDBG_STATUS_CCLK_NOT_DEFINED;
626
627         rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
628                             &temp_buff);
629         hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
630         hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
631         hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
632         t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
633         for (i = 0; i < NTX_SCHED; ++i)
634                 t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
635                                 &hw_sched_buff->ipg[i], true);
636         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
637         return rc;
638 }
639
640 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
641                               struct cudbg_buffer *dbg_buff,
642                               struct cudbg_error *cudbg_err)
643 {
644         struct adapter *padap = pdbg_init->adap;
645         struct cudbg_buffer temp_buff = { 0 };
646         struct ireg_buf *ch_tp_pio;
647         int i, rc, n = 0;
648         u32 size;
649
650         if (is_t5(padap->params.chip))
651                 n = sizeof(t5_tp_pio_array) +
652                     sizeof(t5_tp_tm_pio_array) +
653                     sizeof(t5_tp_mib_index_array);
654         else
655                 n = sizeof(t6_tp_pio_array) +
656                     sizeof(t6_tp_tm_pio_array) +
657                     sizeof(t6_tp_mib_index_array);
658
659         n = n / (IREG_NUM_ELEM * sizeof(u32));
660         size = sizeof(struct ireg_buf) * n;
661         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
662         if (rc)
663                 return rc;
664
665         ch_tp_pio = (struct ireg_buf *)temp_buff.data;
666
667         /* TP_PIO */
668         if (is_t5(padap->params.chip))
669                 n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
670         else if (is_t6(padap->params.chip))
671                 n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
672
673         for (i = 0; i < n; i++) {
674                 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
675                 u32 *buff = ch_tp_pio->outbuf;
676
677                 if (is_t5(padap->params.chip)) {
678                         tp_pio->ireg_addr = t5_tp_pio_array[i][0];
679                         tp_pio->ireg_data = t5_tp_pio_array[i][1];
680                         tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
681                         tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
682                 } else if (is_t6(padap->params.chip)) {
683                         tp_pio->ireg_addr = t6_tp_pio_array[i][0];
684                         tp_pio->ireg_data = t6_tp_pio_array[i][1];
685                         tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
686                         tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
687                 }
688                 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
689                                tp_pio->ireg_local_offset, true);
690                 ch_tp_pio++;
691         }
692
693         /* TP_TM_PIO */
694         if (is_t5(padap->params.chip))
695                 n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
696         else if (is_t6(padap->params.chip))
697                 n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
698
699         for (i = 0; i < n; i++) {
700                 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
701                 u32 *buff = ch_tp_pio->outbuf;
702
703                 if (is_t5(padap->params.chip)) {
704                         tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
705                         tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
706                         tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
707                         tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
708                 } else if (is_t6(padap->params.chip)) {
709                         tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
710                         tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
711                         tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
712                         tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
713                 }
714                 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
715                                   tp_pio->ireg_local_offset, true);
716                 ch_tp_pio++;
717         }
718
719         /* TP_MIB_INDEX */
720         if (is_t5(padap->params.chip))
721                 n = sizeof(t5_tp_mib_index_array) /
722                     (IREG_NUM_ELEM * sizeof(u32));
723         else if (is_t6(padap->params.chip))
724                 n = sizeof(t6_tp_mib_index_array) /
725                     (IREG_NUM_ELEM * sizeof(u32));
726
727         for (i = 0; i < n ; i++) {
728                 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
729                 u32 *buff = ch_tp_pio->outbuf;
730
731                 if (is_t5(padap->params.chip)) {
732                         tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
733                         tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
734                         tp_pio->ireg_local_offset =
735                                 t5_tp_mib_index_array[i][2];
736                         tp_pio->ireg_offset_range =
737                                 t5_tp_mib_index_array[i][3];
738                 } else if (is_t6(padap->params.chip)) {
739                         tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
740                         tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
741                         tp_pio->ireg_local_offset =
742                                 t6_tp_mib_index_array[i][2];
743                         tp_pio->ireg_offset_range =
744                                 t6_tp_mib_index_array[i][3];
745                 }
746                 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
747                                tp_pio->ireg_local_offset, true);
748                 ch_tp_pio++;
749         }
750         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
751         return rc;
752 }
753
754 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
755                                struct cudbg_buffer *dbg_buff,
756                                struct cudbg_error *cudbg_err)
757 {
758         struct adapter *padap = pdbg_init->adap;
759         struct cudbg_buffer temp_buff = { 0 };
760         struct ireg_buf *ch_sge_dbg;
761         int i, rc;
762
763         rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
764         if (rc)
765                 return rc;
766
767         ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
768         for (i = 0; i < 2; i++) {
769                 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
770                 u32 *buff = ch_sge_dbg->outbuf;
771
772                 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
773                 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
774                 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
775                 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
776                 t4_read_indirect(padap,
777                                  sge_pio->ireg_addr,
778                                  sge_pio->ireg_data,
779                                  buff,
780                                  sge_pio->ireg_offset_range,
781                                  sge_pio->ireg_local_offset);
782                 ch_sge_dbg++;
783         }
784         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
785         return rc;
786 }
787
788 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
789                            struct cudbg_buffer *dbg_buff,
790                            struct cudbg_error *cudbg_err)
791 {
792         struct adapter *padap = pdbg_init->adap;
793         struct cudbg_buffer temp_buff = { 0 };
794         struct cudbg_ulprx_la *ulprx_la_buff;
795         int rc;
796
797         rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
798                             &temp_buff);
799         if (rc)
800                 return rc;
801
802         ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
803         t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
804         ulprx_la_buff->size = ULPRX_LA_SIZE;
805         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
806         return rc;
807 }
808
809 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
810                         struct cudbg_buffer *dbg_buff,
811                         struct cudbg_error *cudbg_err)
812 {
813         struct adapter *padap = pdbg_init->adap;
814         struct cudbg_buffer temp_buff = { 0 };
815         struct cudbg_tp_la *tp_la_buff;
816         int size, rc;
817
818         size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
819         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
820         if (rc)
821                 return rc;
822
823         tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
824         tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
825         t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
826         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
827         return rc;
828 }
829
830 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
831                              struct cudbg_buffer *dbg_buff,
832                              struct cudbg_error *cudbg_err)
833 {
834         struct cudbg_cim_pif_la *cim_pif_la_buff;
835         struct adapter *padap = pdbg_init->adap;
836         struct cudbg_buffer temp_buff = { 0 };
837         int size, rc;
838
839         size = sizeof(struct cudbg_cim_pif_la) +
840                2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
841         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
842         if (rc)
843                 return rc;
844
845         cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
846         cim_pif_la_buff->size = CIM_PIFLA_SIZE;
847         t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
848                            (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
849                            NULL, NULL);
850         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
851         return rc;
852 }
853
854 int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
855                            struct cudbg_buffer *dbg_buff,
856                            struct cudbg_error *cudbg_err)
857 {
858         struct adapter *padap = pdbg_init->adap;
859         struct cudbg_buffer temp_buff = { 0 };
860         struct cudbg_clk_info *clk_info_buff;
861         u64 tp_tick_us;
862         int rc;
863
864         if (!padap->params.vpd.cclk)
865                 return CUDBG_STATUS_CCLK_NOT_DEFINED;
866
867         rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
868                             &temp_buff);
869         if (rc)
870                 return rc;
871
872         clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
873         clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */
874         clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
875         clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
876         clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
877         tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
878
879         clk_info_buff->dack_timer =
880                 (clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
881                 t4_read_reg(padap, TP_DACK_TIMER_A);
882         clk_info_buff->retransmit_min =
883                 tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
884         clk_info_buff->retransmit_max =
885                 tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
886         clk_info_buff->persist_timer_min =
887                 tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
888         clk_info_buff->persist_timer_max =
889                 tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
890         clk_info_buff->keepalive_idle_timer =
891                 tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
892         clk_info_buff->keepalive_interval =
893                 tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
894         clk_info_buff->initial_srtt =
895                 tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
896         clk_info_buff->finwait2_timer =
897                 tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
898
899         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
900         return rc;
901 }
902
903 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
904                                 struct cudbg_buffer *dbg_buff,
905                                 struct cudbg_error *cudbg_err)
906 {
907         struct adapter *padap = pdbg_init->adap;
908         struct cudbg_buffer temp_buff = { 0 };
909         struct ireg_buf *ch_pcie;
910         int i, rc, n;
911         u32 size;
912
913         n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
914         size = sizeof(struct ireg_buf) * n * 2;
915         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
916         if (rc)
917                 return rc;
918
919         ch_pcie = (struct ireg_buf *)temp_buff.data;
920         /* PCIE_PDBG */
921         for (i = 0; i < n; i++) {
922                 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
923                 u32 *buff = ch_pcie->outbuf;
924
925                 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
926                 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
927                 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
928                 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
929                 t4_read_indirect(padap,
930                                  pcie_pio->ireg_addr,
931                                  pcie_pio->ireg_data,
932                                  buff,
933                                  pcie_pio->ireg_offset_range,
934                                  pcie_pio->ireg_local_offset);
935                 ch_pcie++;
936         }
937
938         /* PCIE_CDBG */
939         n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
940         for (i = 0; i < n; i++) {
941                 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
942                 u32 *buff = ch_pcie->outbuf;
943
944                 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
945                 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
946                 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
947                 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
948                 t4_read_indirect(padap,
949                                  pcie_pio->ireg_addr,
950                                  pcie_pio->ireg_data,
951                                  buff,
952                                  pcie_pio->ireg_offset_range,
953                                  pcie_pio->ireg_local_offset);
954                 ch_pcie++;
955         }
956         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
957         return rc;
958 }
959
960 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
961                               struct cudbg_buffer *dbg_buff,
962                               struct cudbg_error *cudbg_err)
963 {
964         struct adapter *padap = pdbg_init->adap;
965         struct cudbg_buffer temp_buff = { 0 };
966         struct ireg_buf *ch_pm;
967         int i, rc, n;
968         u32 size;
969
970         n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
971         size = sizeof(struct ireg_buf) * n * 2;
972         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
973         if (rc)
974                 return rc;
975
976         ch_pm = (struct ireg_buf *)temp_buff.data;
977         /* PM_RX */
978         for (i = 0; i < n; i++) {
979                 struct ireg_field *pm_pio = &ch_pm->tp_pio;
980                 u32 *buff = ch_pm->outbuf;
981
982                 pm_pio->ireg_addr = t5_pm_rx_array[i][0];
983                 pm_pio->ireg_data = t5_pm_rx_array[i][1];
984                 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
985                 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
986                 t4_read_indirect(padap,
987                                  pm_pio->ireg_addr,
988                                  pm_pio->ireg_data,
989                                  buff,
990                                  pm_pio->ireg_offset_range,
991                                  pm_pio->ireg_local_offset);
992                 ch_pm++;
993         }
994
995         /* PM_TX */
996         n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
997         for (i = 0; i < n; i++) {
998                 struct ireg_field *pm_pio = &ch_pm->tp_pio;
999                 u32 *buff = ch_pm->outbuf;
1000
1001                 pm_pio->ireg_addr = t5_pm_tx_array[i][0];
1002                 pm_pio->ireg_data = t5_pm_tx_array[i][1];
1003                 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
1004                 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
1005                 t4_read_indirect(padap,
1006                                  pm_pio->ireg_addr,
1007                                  pm_pio->ireg_data,
1008                                  buff,
1009                                  pm_pio->ireg_offset_range,
1010                                  pm_pio->ireg_local_offset);
1011                 ch_pm++;
1012         }
1013         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1014         return rc;
1015 }
1016
1017 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
1018                       struct cudbg_buffer *dbg_buff,
1019                       struct cudbg_error *cudbg_err)
1020 {
1021         struct adapter *padap = pdbg_init->adap;
1022         struct cudbg_tid_info_region_rev1 *tid1;
1023         struct cudbg_buffer temp_buff = { 0 };
1024         struct cudbg_tid_info_region *tid;
1025         u32 para[2], val[2];
1026         int rc;
1027
1028         rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
1029                             &temp_buff);
1030         if (rc)
1031                 return rc;
1032
1033         tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
1034         tid = &tid1->tid;
1035         tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1036         tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
1037         tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
1038                              sizeof(struct cudbg_ver_hdr);
1039
1040 #define FW_PARAM_PFVF_A(param) \
1041         (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1042          FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1043          FW_PARAMS_PARAM_Y_V(0) | \
1044          FW_PARAMS_PARAM_Z_V(0))
1045
1046         para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
1047         para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
1048         rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
1049         if (rc <  0) {
1050                 cudbg_err->sys_err = rc;
1051                 cudbg_put_buff(&temp_buff, dbg_buff);
1052                 return rc;
1053         }
1054         tid->uotid_base = val[0];
1055         tid->nuotids = val[1] - val[0] + 1;
1056
1057         if (is_t5(padap->params.chip)) {
1058                 tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
1059         } else if (is_t6(padap->params.chip)) {
1060                 tid1->tid_start =
1061                         t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
1062                 tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
1063
1064                 para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
1065                 para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
1066                 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
1067                                      para, val);
1068                 if (rc < 0) {
1069                         cudbg_err->sys_err = rc;
1070                         cudbg_put_buff(&temp_buff, dbg_buff);
1071                         return rc;
1072                 }
1073                 tid->hpftid_base = val[0];
1074                 tid->nhpftids = val[1] - val[0] + 1;
1075         }
1076
1077         tid->ntids = padap->tids.ntids;
1078         tid->nstids = padap->tids.nstids;
1079         tid->stid_base = padap->tids.stid_base;
1080         tid->hash_base = padap->tids.hash_base;
1081
1082         tid->natids = padap->tids.natids;
1083         tid->nftids = padap->tids.nftids;
1084         tid->ftid_base = padap->tids.ftid_base;
1085         tid->aftid_base = padap->tids.aftid_base;
1086         tid->aftid_end = padap->tids.aftid_end;
1087
1088         tid->sftid_base = padap->tids.sftid_base;
1089         tid->nsftids = padap->tids.nsftids;
1090
1091         tid->flags = padap->flags;
1092         tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
1093         tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
1094         tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
1095
1096 #undef FW_PARAM_PFVF_A
1097
1098         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1099         return rc;
1100 }
1101
1102 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
1103 {
1104         *mask = x | y;
1105         y = (__force u64)cpu_to_be64(y);
1106         memcpy(addr, (char *)&y + 2, ETH_ALEN);
1107 }
1108
1109 static void cudbg_mps_rpl_backdoor(struct adapter *padap,
1110                                    struct fw_ldst_mps_rplc *mps_rplc)
1111 {
1112         if (is_t5(padap->params.chip)) {
1113                 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1114                                                           MPS_VF_RPLCT_MAP3_A));
1115                 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1116                                                           MPS_VF_RPLCT_MAP2_A));
1117                 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1118                                                           MPS_VF_RPLCT_MAP1_A));
1119                 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1120                                                           MPS_VF_RPLCT_MAP0_A));
1121         } else {
1122                 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
1123                                                           MPS_VF_RPLCT_MAP7_A));
1124                 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
1125                                                           MPS_VF_RPLCT_MAP6_A));
1126                 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
1127                                                           MPS_VF_RPLCT_MAP5_A));
1128                 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
1129                                                           MPS_VF_RPLCT_MAP4_A));
1130         }
1131         mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
1132         mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
1133         mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
1134         mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
1135 }
1136
1137 static int cudbg_collect_tcam_index(struct adapter *padap,
1138                                     struct cudbg_mps_tcam *tcam, u32 idx)
1139 {
1140         u64 tcamy, tcamx, val;
1141         u32 ctl, data2;
1142         int rc = 0;
1143
1144         if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
1145                 /* CtlReqID   - 1: use Host Driver Requester ID
1146                  * CtlCmdType - 0: Read, 1: Write
1147                  * CtlTcamSel - 0: TCAM0, 1: TCAM1
1148                  * CtlXYBitSel- 0: Y bit, 1: X bit
1149                  */
1150
1151                 /* Read tcamy */
1152                 ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
1153                 if (idx < 256)
1154                         ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
1155                 else
1156                         ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
1157
1158                 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1159                 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1160                 tcamy = DMACH_G(val) << 32;
1161                 tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1162                 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1163                 tcam->lookup_type = DATALKPTYPE_G(data2);
1164
1165                 /* 0 - Outer header, 1 - Inner header
1166                  * [71:48] bit locations are overloaded for
1167                  * outer vs. inner lookup types.
1168                  */
1169                 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1170                         /* Inner header VNI */
1171                         tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1172                         tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
1173                         tcam->dip_hit = data2 & DATADIPHIT_F;
1174                 } else {
1175                         tcam->vlan_vld = data2 & DATAVIDH2_F;
1176                         tcam->ivlan = VIDL_G(val);
1177                 }
1178
1179                 tcam->port_num = DATAPORTNUM_G(data2);
1180
1181                 /* Read tcamx. Change the control param */
1182                 ctl |= CTLXYBITSEL_V(1);
1183                 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
1184                 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
1185                 tcamx = DMACH_G(val) << 32;
1186                 tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
1187                 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
1188                 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
1189                         /* Inner header VNI mask */
1190                         tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
1191                         tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
1192                 }
1193         } else {
1194                 tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
1195                 tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
1196         }
1197
1198         /* If no entry, return */
1199         if (tcamx & tcamy)
1200                 return rc;
1201
1202         tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
1203         tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
1204
1205         if (is_t5(padap->params.chip))
1206                 tcam->repli = (tcam->cls_lo & REPLICATE_F);
1207         else if (is_t6(padap->params.chip))
1208                 tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
1209
1210         if (tcam->repli) {
1211                 struct fw_ldst_cmd ldst_cmd;
1212                 struct fw_ldst_mps_rplc mps_rplc;
1213
1214                 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
1215                 ldst_cmd.op_to_addrspace =
1216                         htonl(FW_CMD_OP_V(FW_LDST_CMD) |
1217                               FW_CMD_REQUEST_F | FW_CMD_READ_F |
1218                               FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
1219                 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
1220                 ldst_cmd.u.mps.rplc.fid_idx =
1221                         htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
1222                               FW_LDST_CMD_IDX_V(idx));
1223
1224                 rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
1225                                 &ldst_cmd);
1226                 if (rc)
1227                         cudbg_mps_rpl_backdoor(padap, &mps_rplc);
1228                 else
1229                         mps_rplc = ldst_cmd.u.mps.rplc;
1230
1231                 tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
1232                 tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
1233                 tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
1234                 tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
1235                 if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
1236                         tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
1237                         tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
1238                         tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
1239                         tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
1240                 }
1241         }
1242         cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
1243         tcam->idx = idx;
1244         tcam->rplc_size = padap->params.arch.mps_rplc_size;
1245         return rc;
1246 }
1247
1248 int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
1249                            struct cudbg_buffer *dbg_buff,
1250                            struct cudbg_error *cudbg_err)
1251 {
1252         struct adapter *padap = pdbg_init->adap;
1253         struct cudbg_buffer temp_buff = { 0 };
1254         u32 size = 0, i, n, total_size = 0;
1255         struct cudbg_mps_tcam *tcam;
1256         int rc;
1257
1258         n = padap->params.arch.mps_tcam_size;
1259         size = sizeof(struct cudbg_mps_tcam) * n;
1260         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1261         if (rc)
1262                 return rc;
1263
1264         tcam = (struct cudbg_mps_tcam *)temp_buff.data;
1265         for (i = 0; i < n; i++) {
1266                 rc = cudbg_collect_tcam_index(padap, tcam, i);
1267                 if (rc) {
1268                         cudbg_err->sys_err = rc;
1269                         cudbg_put_buff(&temp_buff, dbg_buff);
1270                         return rc;
1271                 }
1272                 total_size += sizeof(struct cudbg_mps_tcam);
1273                 tcam++;
1274         }
1275
1276         if (!total_size) {
1277                 rc = CUDBG_SYSTEM_ERROR;
1278                 cudbg_err->sys_err = rc;
1279                 cudbg_put_buff(&temp_buff, dbg_buff);
1280                 return rc;
1281         }
1282         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1283         return rc;
1284 }
1285
1286 int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
1287                            struct cudbg_buffer *dbg_buff,
1288                            struct cudbg_error *cudbg_err)
1289 {
1290         struct adapter *padap = pdbg_init->adap;
1291         struct cudbg_buffer temp_buff = { 0 };
1292         struct cudbg_vpd_data *vpd_data;
1293         int rc;
1294
1295         rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
1296                             &temp_buff);
1297         if (rc)
1298                 return rc;
1299
1300         vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
1301         memcpy(vpd_data->sn, padap->params.vpd.sn, SERNUM_LEN + 1);
1302         memcpy(vpd_data->bn, padap->params.vpd.pn, PN_LEN + 1);
1303         memcpy(vpd_data->na, padap->params.vpd.na, MACADDR_LEN + 1);
1304         memcpy(vpd_data->mn, padap->params.vpd.id, ID_LEN + 1);
1305         vpd_data->scfg_vers = padap->params.scfg_vers;
1306         vpd_data->vpd_vers = padap->params.vpd_vers;
1307         vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(padap->params.fw_vers);
1308         vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(padap->params.fw_vers);
1309         vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(padap->params.fw_vers);
1310         vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(padap->params.fw_vers);
1311         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1312         return rc;
1313 }
1314
1315 int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
1316                         struct cudbg_buffer *dbg_buff,
1317                         struct cudbg_error *cudbg_err)
1318 {
1319         struct adapter *padap = pdbg_init->adap;
1320         struct cudbg_buffer temp_buff = { 0 };
1321         u32 size;
1322         int rc;
1323
1324         size = sizeof(u16) * NMTUS * NCCTRL_WIN;
1325         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1326         if (rc)
1327                 return rc;
1328
1329         t4_read_cong_tbl(padap, (void *)temp_buff.data);
1330         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1331         return rc;
1332 }
1333
1334 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
1335                               struct cudbg_buffer *dbg_buff,
1336                               struct cudbg_error *cudbg_err)
1337 {
1338         struct adapter *padap = pdbg_init->adap;
1339         struct cudbg_buffer temp_buff = { 0 };
1340         struct ireg_buf *ma_indr;
1341         int i, rc, n;
1342         u32 size, j;
1343
1344         if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1345                 return CUDBG_STATUS_ENTITY_NOT_FOUND;
1346
1347         n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1348         size = sizeof(struct ireg_buf) * n * 2;
1349         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1350         if (rc)
1351                 return rc;
1352
1353         ma_indr = (struct ireg_buf *)temp_buff.data;
1354         for (i = 0; i < n; i++) {
1355                 struct ireg_field *ma_fli = &ma_indr->tp_pio;
1356                 u32 *buff = ma_indr->outbuf;
1357
1358                 ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
1359                 ma_fli->ireg_data = t6_ma_ireg_array[i][1];
1360                 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
1361                 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
1362                 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
1363                                  buff, ma_fli->ireg_offset_range,
1364                                  ma_fli->ireg_local_offset);
1365                 ma_indr++;
1366         }
1367
1368         n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
1369         for (i = 0; i < n; i++) {
1370                 struct ireg_field *ma_fli = &ma_indr->tp_pio;
1371                 u32 *buff = ma_indr->outbuf;
1372
1373                 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
1374                 ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
1375                 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
1376                 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
1377                         t4_read_indirect(padap, ma_fli->ireg_addr,
1378                                          ma_fli->ireg_data, buff, 1,
1379                                          ma_fli->ireg_local_offset);
1380                         buff++;
1381                         ma_fli->ireg_local_offset += 0x20;
1382                 }
1383                 ma_indr++;
1384         }
1385         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1386         return rc;
1387 }
1388
1389 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
1390                            struct cudbg_buffer *dbg_buff,
1391                            struct cudbg_error *cudbg_err)
1392 {
1393         struct adapter *padap = pdbg_init->adap;
1394         struct cudbg_buffer temp_buff = { 0 };
1395         struct cudbg_ulptx_la *ulptx_la_buff;
1396         u32 i, j;
1397         int rc;
1398
1399         rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
1400                             &temp_buff);
1401         if (rc)
1402                 return rc;
1403
1404         ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data;
1405         for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
1406                 ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
1407                                                       ULP_TX_LA_RDPTR_0_A +
1408                                                       0x10 * i);
1409                 ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
1410                                                       ULP_TX_LA_WRPTR_0_A +
1411                                                       0x10 * i);
1412                 ulptx_la_buff->rddata[i] = t4_read_reg(padap,
1413                                                        ULP_TX_LA_RDDATA_0_A +
1414                                                        0x10 * i);
1415                 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
1416                         ulptx_la_buff->rd_data[i][j] =
1417                                 t4_read_reg(padap,
1418                                             ULP_TX_LA_RDDATA_0_A + 0x10 * i);
1419         }
1420         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1421         return rc;
1422 }
1423
1424 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
1425                                   struct cudbg_buffer *dbg_buff,
1426                                   struct cudbg_error *cudbg_err)
1427 {
1428         struct adapter *padap = pdbg_init->adap;
1429         struct cudbg_buffer temp_buff = { 0 };
1430         struct ireg_buf *up_cim;
1431         int i, rc, n;
1432         u32 size;
1433
1434         n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
1435         size = sizeof(struct ireg_buf) * n;
1436         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1437         if (rc)
1438                 return rc;
1439
1440         up_cim = (struct ireg_buf *)temp_buff.data;
1441         for (i = 0; i < n; i++) {
1442                 struct ireg_field *up_cim_reg = &up_cim->tp_pio;
1443                 u32 *buff = up_cim->outbuf;
1444
1445                 if (is_t5(padap->params.chip)) {
1446                         up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
1447                         up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
1448                         up_cim_reg->ireg_local_offset =
1449                                                 t5_up_cim_reg_array[i][2];
1450                         up_cim_reg->ireg_offset_range =
1451                                                 t5_up_cim_reg_array[i][3];
1452                 } else if (is_t6(padap->params.chip)) {
1453                         up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
1454                         up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
1455                         up_cim_reg->ireg_local_offset =
1456                                                 t6_up_cim_reg_array[i][2];
1457                         up_cim_reg->ireg_offset_range =
1458                                                 t6_up_cim_reg_array[i][3];
1459                 }
1460
1461                 rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
1462                                  up_cim_reg->ireg_offset_range, buff);
1463                 if (rc) {
1464                         cudbg_put_buff(&temp_buff, dbg_buff);
1465                         return rc;
1466                 }
1467                 up_cim++;
1468         }
1469         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1470         return rc;
1471 }
1472
1473 int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
1474                              struct cudbg_buffer *dbg_buff,
1475                              struct cudbg_error *cudbg_err)
1476 {
1477         struct adapter *padap = pdbg_init->adap;
1478         struct cudbg_buffer temp_buff = { 0 };
1479         struct cudbg_pbt_tables *pbt;
1480         int i, rc;
1481         u32 addr;
1482
1483         rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
1484                             &temp_buff);
1485         if (rc)
1486                 return rc;
1487
1488         pbt = (struct cudbg_pbt_tables *)temp_buff.data;
1489         /* PBT dynamic entries */
1490         addr = CUDBG_CHAC_PBT_ADDR;
1491         for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
1492                 rc = t4_cim_read(padap, addr + (i * 4), 1,
1493                                  &pbt->pbt_dynamic[i]);
1494                 if (rc) {
1495                         cudbg_err->sys_err = rc;
1496                         cudbg_put_buff(&temp_buff, dbg_buff);
1497                         return rc;
1498                 }
1499         }
1500
1501         /* PBT static entries */
1502         /* static entries start when bit 6 is set */
1503         addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
1504         for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
1505                 rc = t4_cim_read(padap, addr + (i * 4), 1,
1506                                  &pbt->pbt_static[i]);
1507                 if (rc) {
1508                         cudbg_err->sys_err = rc;
1509                         cudbg_put_buff(&temp_buff, dbg_buff);
1510                         return rc;
1511                 }
1512         }
1513
1514         /* LRF entries */
1515         addr = CUDBG_CHAC_PBT_LRF;
1516         for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
1517                 rc = t4_cim_read(padap, addr + (i * 4), 1,
1518                                  &pbt->lrf_table[i]);
1519                 if (rc) {
1520                         cudbg_err->sys_err = rc;
1521                         cudbg_put_buff(&temp_buff, dbg_buff);
1522                         return rc;
1523                 }
1524         }
1525
1526         /* PBT data entries */
1527         addr = CUDBG_CHAC_PBT_DATA;
1528         for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
1529                 rc = t4_cim_read(padap, addr + (i * 4), 1,
1530                                  &pbt->pbt_data[i]);
1531                 if (rc) {
1532                         cudbg_err->sys_err = rc;
1533                         cudbg_put_buff(&temp_buff, dbg_buff);
1534                         return rc;
1535                 }
1536         }
1537         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1538         return rc;
1539 }
1540
1541 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
1542                            struct cudbg_buffer *dbg_buff,
1543                            struct cudbg_error *cudbg_err)
1544 {
1545         struct adapter *padap = pdbg_init->adap;
1546         struct cudbg_mbox_log *mboxlog = NULL;
1547         struct cudbg_buffer temp_buff = { 0 };
1548         struct mbox_cmd_log *log = NULL;
1549         struct mbox_cmd *entry;
1550         unsigned int entry_idx;
1551         u16 mbox_cmds;
1552         int i, k, rc;
1553         u64 flit;
1554         u32 size;
1555
1556         log = padap->mbox_log;
1557         mbox_cmds = padap->mbox_log->size;
1558         size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
1559         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1560         if (rc)
1561                 return rc;
1562
1563         mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
1564         for (k = 0; k < mbox_cmds; k++) {
1565                 entry_idx = log->cursor + k;
1566                 if (entry_idx >= log->size)
1567                         entry_idx -= log->size;
1568
1569                 entry = mbox_cmd_log_entry(log, entry_idx);
1570                 /* skip over unused entries */
1571                 if (entry->timestamp == 0)
1572                         continue;
1573
1574                 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
1575                 for (i = 0; i < MBOX_LEN / 8; i++) {
1576                         flit = entry->cmd[i];
1577                         mboxlog->hi[i] = (u32)(flit >> 32);
1578                         mboxlog->lo[i] = (u32)flit;
1579                 }
1580                 mboxlog++;
1581         }
1582         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1583         return rc;
1584 }
1585
1586 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
1587                                struct cudbg_buffer *dbg_buff,
1588                                struct cudbg_error *cudbg_err)
1589 {
1590         struct adapter *padap = pdbg_init->adap;
1591         struct cudbg_buffer temp_buff = { 0 };
1592         struct ireg_buf *hma_indr;
1593         int i, rc, n;
1594         u32 size;
1595
1596         if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
1597                 return CUDBG_STATUS_ENTITY_NOT_FOUND;
1598
1599         n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
1600         size = sizeof(struct ireg_buf) * n;
1601         rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
1602         if (rc)
1603                 return rc;
1604
1605         hma_indr = (struct ireg_buf *)temp_buff.data;
1606         for (i = 0; i < n; i++) {
1607                 struct ireg_field *hma_fli = &hma_indr->tp_pio;
1608                 u32 *buff = hma_indr->outbuf;
1609
1610                 hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
1611                 hma_fli->ireg_data = t6_hma_ireg_array[i][1];
1612                 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
1613                 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
1614                 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
1615                                  buff, hma_fli->ireg_offset_range,
1616                                  hma_fli->ireg_local_offset);
1617                 hma_indr++;
1618         }
1619         cudbg_write_and_release_buff(&temp_buff, dbg_buff);
1620         return rc;
1621 }