Merge tag 'core-urgent-2021-07-25' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu_debugfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2019 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #ifdef CONFIG_DEBUG_FS
12
13 #include <linux/fs.h>
14 #include <linux/debugfs.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17
18 #include "rvu_struct.h"
19 #include "rvu_reg.h"
20 #include "rvu.h"
21 #include "cgx.h"
22 #include "lmac_common.h"
23 #include "npc.h"
24
25 #define DEBUGFS_DIR_NAME "octeontx2"
26
27 enum {
28         CGX_STAT0,
29         CGX_STAT1,
30         CGX_STAT2,
31         CGX_STAT3,
32         CGX_STAT4,
33         CGX_STAT5,
34         CGX_STAT6,
35         CGX_STAT7,
36         CGX_STAT8,
37         CGX_STAT9,
38         CGX_STAT10,
39         CGX_STAT11,
40         CGX_STAT12,
41         CGX_STAT13,
42         CGX_STAT14,
43         CGX_STAT15,
44         CGX_STAT16,
45         CGX_STAT17,
46         CGX_STAT18,
47 };
48
49 /* NIX TX stats */
50 enum nix_stat_lf_tx {
51         TX_UCAST        = 0x0,
52         TX_BCAST        = 0x1,
53         TX_MCAST        = 0x2,
54         TX_DROP         = 0x3,
55         TX_OCTS         = 0x4,
56         TX_STATS_ENUM_LAST,
57 };
58
59 /* NIX RX stats */
60 enum nix_stat_lf_rx {
61         RX_OCTS         = 0x0,
62         RX_UCAST        = 0x1,
63         RX_BCAST        = 0x2,
64         RX_MCAST        = 0x3,
65         RX_DROP         = 0x4,
66         RX_DROP_OCTS    = 0x5,
67         RX_FCS          = 0x6,
68         RX_ERR          = 0x7,
69         RX_DRP_BCAST    = 0x8,
70         RX_DRP_MCAST    = 0x9,
71         RX_DRP_L3BCAST  = 0xa,
72         RX_DRP_L3MCAST  = 0xb,
73         RX_STATS_ENUM_LAST,
74 };
75
76 static char *cgx_rx_stats_fields[] = {
77         [CGX_STAT0]     = "Received packets",
78         [CGX_STAT1]     = "Octets of received packets",
79         [CGX_STAT2]     = "Received PAUSE packets",
80         [CGX_STAT3]     = "Received PAUSE and control packets",
81         [CGX_STAT4]     = "Filtered DMAC0 (NIX-bound) packets",
82         [CGX_STAT5]     = "Filtered DMAC0 (NIX-bound) octets",
83         [CGX_STAT6]     = "Packets dropped due to RX FIFO full",
84         [CGX_STAT7]     = "Octets dropped due to RX FIFO full",
85         [CGX_STAT8]     = "Error packets",
86         [CGX_STAT9]     = "Filtered DMAC1 (NCSI-bound) packets",
87         [CGX_STAT10]    = "Filtered DMAC1 (NCSI-bound) octets",
88         [CGX_STAT11]    = "NCSI-bound packets dropped",
89         [CGX_STAT12]    = "NCSI-bound octets dropped",
90 };
91
92 static char *cgx_tx_stats_fields[] = {
93         [CGX_STAT0]     = "Packets dropped due to excessive collisions",
94         [CGX_STAT1]     = "Packets dropped due to excessive deferral",
95         [CGX_STAT2]     = "Multiple collisions before successful transmission",
96         [CGX_STAT3]     = "Single collisions before successful transmission",
97         [CGX_STAT4]     = "Total octets sent on the interface",
98         [CGX_STAT5]     = "Total frames sent on the interface",
99         [CGX_STAT6]     = "Packets sent with an octet count < 64",
100         [CGX_STAT7]     = "Packets sent with an octet count == 64",
101         [CGX_STAT8]     = "Packets sent with an octet count of 65–127",
102         [CGX_STAT9]     = "Packets sent with an octet count of 128-255",
103         [CGX_STAT10]    = "Packets sent with an octet count of 256-511",
104         [CGX_STAT11]    = "Packets sent with an octet count of 512-1023",
105         [CGX_STAT12]    = "Packets sent with an octet count of 1024-1518",
106         [CGX_STAT13]    = "Packets sent with an octet count of > 1518",
107         [CGX_STAT14]    = "Packets sent to a broadcast DMAC",
108         [CGX_STAT15]    = "Packets sent to the multicast DMAC",
109         [CGX_STAT16]    = "Transmit underflow and were truncated",
110         [CGX_STAT17]    = "Control/PAUSE packets sent",
111 };
112
113 static char *rpm_rx_stats_fields[] = {
114         "Octets of received packets",
115         "Octets of received packets with out error",
116         "Received packets with alignment errors",
117         "Control/PAUSE packets received",
118         "Packets received with Frame too long Errors",
119         "Packets received with a1nrange length Errors",
120         "Received packets",
121         "Packets received with FrameCheckSequenceErrors",
122         "Packets received with VLAN header",
123         "Error packets",
124         "Packets received with unicast DMAC",
125         "Packets received with multicast DMAC",
126         "Packets received with broadcast DMAC",
127         "Dropped packets",
128         "Total frames received on interface",
129         "Packets received with an octet count < 64",
130         "Packets received with an octet count == 64",
131         "Packets received with an octet count of 65â\80\93127",
132         "Packets received with an octet count of 128-255",
133         "Packets received with an octet count of 256-511",
134         "Packets received with an octet count of 512-1023",
135         "Packets received with an octet count of 1024-1518",
136         "Packets received with an octet count of > 1518",
137         "Oversized Packets",
138         "Jabber Packets",
139         "Fragmented Packets",
140         "CBFC(class based flow control) pause frames received for class 0",
141         "CBFC pause frames received for class 1",
142         "CBFC pause frames received for class 2",
143         "CBFC pause frames received for class 3",
144         "CBFC pause frames received for class 4",
145         "CBFC pause frames received for class 5",
146         "CBFC pause frames received for class 6",
147         "CBFC pause frames received for class 7",
148         "CBFC pause frames received for class 8",
149         "CBFC pause frames received for class 9",
150         "CBFC pause frames received for class 10",
151         "CBFC pause frames received for class 11",
152         "CBFC pause frames received for class 12",
153         "CBFC pause frames received for class 13",
154         "CBFC pause frames received for class 14",
155         "CBFC pause frames received for class 15",
156         "MAC control packets received",
157 };
158
159 static char *rpm_tx_stats_fields[] = {
160         "Total octets sent on the interface",
161         "Total octets transmitted OK",
162         "Control/Pause frames sent",
163         "Total frames transmitted OK",
164         "Total frames sent with VLAN header",
165         "Error Packets",
166         "Packets sent to unicast DMAC",
167         "Packets sent to the multicast DMAC",
168         "Packets sent to a broadcast DMAC",
169         "Packets sent with an octet count == 64",
170         "Packets sent with an octet count of 65â\80\93127",
171         "Packets sent with an octet count of 128-255",
172         "Packets sent with an octet count of 256-511",
173         "Packets sent with an octet count of 512-1023",
174         "Packets sent with an octet count of 1024-1518",
175         "Packets sent with an octet count of > 1518",
176         "CBFC(class based flow control) pause frames transmitted for class 0",
177         "CBFC pause frames transmitted for class 1",
178         "CBFC pause frames transmitted for class 2",
179         "CBFC pause frames transmitted for class 3",
180         "CBFC pause frames transmitted for class 4",
181         "CBFC pause frames transmitted for class 5",
182         "CBFC pause frames transmitted for class 6",
183         "CBFC pause frames transmitted for class 7",
184         "CBFC pause frames transmitted for class 8",
185         "CBFC pause frames transmitted for class 9",
186         "CBFC pause frames transmitted for class 10",
187         "CBFC pause frames transmitted for class 11",
188         "CBFC pause frames transmitted for class 12",
189         "CBFC pause frames transmitted for class 13",
190         "CBFC pause frames transmitted for class 14",
191         "CBFC pause frames transmitted for class 15",
192         "MAC control packets sent",
193         "Total frames sent on the interface"
194 };
195
196 enum cpt_eng_type {
197         CPT_AE_TYPE = 1,
198         CPT_SE_TYPE = 2,
199         CPT_IE_TYPE = 3,
200 };
201
202 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
203                                                 blk_addr, NDC_AF_CONST) & 0xFF)
204
205 #define rvu_dbg_NULL NULL
206 #define rvu_dbg_open_NULL NULL
207
208 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op)     \
209 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
210 { \
211         return single_open(file, rvu_dbg_##read_op, inode->i_private); \
212 } \
213 static const struct file_operations rvu_dbg_##name##_fops = { \
214         .owner          = THIS_MODULE, \
215         .open           = rvu_dbg_open_##name, \
216         .read           = seq_read, \
217         .write          = rvu_dbg_##write_op, \
218         .llseek         = seq_lseek, \
219         .release        = single_release, \
220 }
221
222 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
223 static const struct file_operations rvu_dbg_##name##_fops = { \
224         .owner = THIS_MODULE, \
225         .open = simple_open, \
226         .read = rvu_dbg_##read_op, \
227         .write = rvu_dbg_##write_op \
228 }
229
230 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
231
232 /* Dumps current provisioning status of all RVU block LFs */
233 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
234                                           char __user *buffer,
235                                           size_t count, loff_t *ppos)
236 {
237         int index, off = 0, flag = 0, go_back = 0, len = 0;
238         struct rvu *rvu = filp->private_data;
239         int lf, pf, vf, pcifunc;
240         struct rvu_block block;
241         int bytes_not_copied;
242         int lf_str_size = 12;
243         int buf_size = 2048;
244         char *lfs;
245         char *buf;
246
247         /* don't allow partial reads */
248         if (*ppos != 0)
249                 return 0;
250
251         buf = kzalloc(buf_size, GFP_KERNEL);
252         if (!buf)
253                 return -ENOSPC;
254
255         lfs = kzalloc(lf_str_size, GFP_KERNEL);
256         if (!lfs) {
257                 kfree(buf);
258                 return -ENOMEM;
259         }
260         off +=  scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
261                           "pcifunc");
262         for (index = 0; index < BLK_COUNT; index++)
263                 if (strlen(rvu->hw->block[index].name)) {
264                         off += scnprintf(&buf[off], buf_size - 1 - off,
265                                          "%-*s", lf_str_size,
266                                          rvu->hw->block[index].name);
267                 }
268         off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
269         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
270                 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
271                         pcifunc = pf << 10 | vf;
272                         if (!pcifunc)
273                                 continue;
274
275                         if (vf) {
276                                 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
277                                 go_back = scnprintf(&buf[off],
278                                                     buf_size - 1 - off,
279                                                     "%-*s", lf_str_size, lfs);
280                         } else {
281                                 sprintf(lfs, "PF%d", pf);
282                                 go_back = scnprintf(&buf[off],
283                                                     buf_size - 1 - off,
284                                                     "%-*s", lf_str_size, lfs);
285                         }
286
287                         off += go_back;
288                         for (index = 0; index < BLKTYPE_MAX; index++) {
289                                 block = rvu->hw->block[index];
290                                 if (!strlen(block.name))
291                                         continue;
292                                 len = 0;
293                                 lfs[len] = '\0';
294                                 for (lf = 0; lf < block.lf.max; lf++) {
295                                         if (block.fn_map[lf] != pcifunc)
296                                                 continue;
297                                         flag = 1;
298                                         len += sprintf(&lfs[len], "%d,", lf);
299                                 }
300
301                                 if (flag)
302                                         len--;
303                                 lfs[len] = '\0';
304                                 off += scnprintf(&buf[off], buf_size - 1 - off,
305                                                  "%-*s", lf_str_size, lfs);
306                                 if (!strlen(lfs))
307                                         go_back += lf_str_size;
308                         }
309                         if (!flag)
310                                 off -= go_back;
311                         else
312                                 flag = 0;
313                         off--;
314                         off +=  scnprintf(&buf[off], buf_size - 1 - off, "\n");
315                 }
316         }
317
318         bytes_not_copied = copy_to_user(buffer, buf, off);
319         kfree(lfs);
320         kfree(buf);
321
322         if (bytes_not_copied)
323                 return -EFAULT;
324
325         *ppos = off;
326         return off;
327 }
328
329 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
330
331 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
332 {
333         struct rvu *rvu = filp->private;
334         struct pci_dev *pdev = NULL;
335         struct mac_ops *mac_ops;
336         char cgx[10], lmac[10];
337         struct rvu_pfvf *pfvf;
338         int pf, domain, blkid;
339         u8 cgx_id, lmac_id;
340         u16 pcifunc;
341
342         domain = 2;
343         mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
344         /* There can be no CGX devices at all */
345         if (!mac_ops)
346                 return 0;
347         seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
348                    mac_ops->name);
349         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
350                 if (!is_pf_cgxmapped(rvu, pf))
351                         continue;
352
353                 pdev =  pci_get_domain_bus_and_slot(domain, pf + 1, 0);
354                 if (!pdev)
355                         continue;
356
357                 cgx[0] = 0;
358                 lmac[0] = 0;
359                 pcifunc = pf << 10;
360                 pfvf = rvu_get_pfvf(rvu, pcifunc);
361
362                 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
363                         blkid = 0;
364                 else
365                         blkid = 1;
366
367                 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
368                                     &lmac_id);
369                 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
370                 sprintf(lmac, "LMAC%d", lmac_id);
371                 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
372                            dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
373         }
374         return 0;
375 }
376
377 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
378
379 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
380                                 u16 *pcifunc)
381 {
382         struct rvu_block *block;
383         struct rvu_hwinfo *hw;
384
385         hw = rvu->hw;
386         block = &hw->block[blkaddr];
387
388         if (lf < 0 || lf >= block->lf.max) {
389                 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
390                          block->lf.max - 1);
391                 return false;
392         }
393
394         *pcifunc = block->fn_map[lf];
395         if (!*pcifunc) {
396                 dev_warn(rvu->dev,
397                          "This LF is not attached to any RVU PFFUNC\n");
398                 return false;
399         }
400         return true;
401 }
402
403 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
404 {
405         char *buf;
406
407         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
408         if (!buf)
409                 return;
410
411         if (!pfvf->aura_ctx) {
412                 seq_puts(m, "Aura context is not initialized\n");
413         } else {
414                 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
415                                         pfvf->aura_ctx->qsize);
416                 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
417                 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
418         }
419
420         if (!pfvf->pool_ctx) {
421                 seq_puts(m, "Pool context is not initialized\n");
422         } else {
423                 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
424                                         pfvf->pool_ctx->qsize);
425                 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
426                 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
427         }
428         kfree(buf);
429 }
430
431 /* The 'qsize' entry dumps current Aura/Pool context Qsize
432  * and each context's current enable/disable status in a bitmap.
433  */
434 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
435                                  int blktype)
436 {
437         void (*print_qsize)(struct seq_file *filp,
438                             struct rvu_pfvf *pfvf) = NULL;
439         struct dentry *current_dir;
440         struct rvu_pfvf *pfvf;
441         struct rvu *rvu;
442         int qsize_id;
443         u16 pcifunc;
444         int blkaddr;
445
446         rvu = filp->private;
447         switch (blktype) {
448         case BLKTYPE_NPA:
449                 qsize_id = rvu->rvu_dbg.npa_qsize_id;
450                 print_qsize = print_npa_qsize;
451                 break;
452
453         case BLKTYPE_NIX:
454                 qsize_id = rvu->rvu_dbg.nix_qsize_id;
455                 print_qsize = print_nix_qsize;
456                 break;
457
458         default:
459                 return -EINVAL;
460         }
461
462         if (blktype == BLKTYPE_NPA) {
463                 blkaddr = BLKADDR_NPA;
464         } else {
465                 current_dir = filp->file->f_path.dentry->d_parent;
466                 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
467                                    BLKADDR_NIX1 : BLKADDR_NIX0);
468         }
469
470         if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
471                 return -EINVAL;
472
473         pfvf = rvu_get_pfvf(rvu, pcifunc);
474         print_qsize(filp, pfvf);
475
476         return 0;
477 }
478
479 static ssize_t rvu_dbg_qsize_write(struct file *filp,
480                                    const char __user *buffer, size_t count,
481                                    loff_t *ppos, int blktype)
482 {
483         char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
484         struct seq_file *seqfile = filp->private_data;
485         char *cmd_buf, *cmd_buf_tmp, *subtoken;
486         struct rvu *rvu = seqfile->private;
487         struct dentry *current_dir;
488         int blkaddr;
489         u16 pcifunc;
490         int ret, lf;
491
492         cmd_buf = memdup_user(buffer, count + 1);
493         if (IS_ERR(cmd_buf))
494                 return -ENOMEM;
495
496         cmd_buf[count] = '\0';
497
498         cmd_buf_tmp = strchr(cmd_buf, '\n');
499         if (cmd_buf_tmp) {
500                 *cmd_buf_tmp = '\0';
501                 count = cmd_buf_tmp - cmd_buf + 1;
502         }
503
504         cmd_buf_tmp = cmd_buf;
505         subtoken = strsep(&cmd_buf, " ");
506         ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
507         if (cmd_buf)
508                 ret = -EINVAL;
509
510         if (!strncmp(subtoken, "help", 4) || ret < 0) {
511                 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
512                 goto qsize_write_done;
513         }
514
515         if (blktype == BLKTYPE_NPA) {
516                 blkaddr = BLKADDR_NPA;
517         } else {
518                 current_dir = filp->f_path.dentry->d_parent;
519                 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
520                                    BLKADDR_NIX1 : BLKADDR_NIX0);
521         }
522
523         if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
524                 ret = -EINVAL;
525                 goto qsize_write_done;
526         }
527         if (blktype  == BLKTYPE_NPA)
528                 rvu->rvu_dbg.npa_qsize_id = lf;
529         else
530                 rvu->rvu_dbg.nix_qsize_id = lf;
531
532 qsize_write_done:
533         kfree(cmd_buf_tmp);
534         return ret ? ret : count;
535 }
536
537 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
538                                        const char __user *buffer,
539                                        size_t count, loff_t *ppos)
540 {
541         return rvu_dbg_qsize_write(filp, buffer, count, ppos,
542                                             BLKTYPE_NPA);
543 }
544
545 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
546 {
547         return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
548 }
549
550 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
551
552 /* Dumps given NPA Aura's context */
553 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
554 {
555         struct npa_aura_s *aura = &rsp->aura;
556         struct rvu *rvu = m->private;
557
558         seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
559
560         seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
561                    aura->ena, aura->pool_caching);
562         seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
563                    aura->pool_way_mask, aura->avg_con);
564         seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
565                    aura->pool_drop_ena, aura->aura_drop_ena);
566         seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
567                    aura->bp_ena, aura->aura_drop);
568         seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
569                    aura->shift, aura->avg_level);
570
571         seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
572                    (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
573
574         seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
575                    (u64)aura->limit, aura->bp, aura->fc_ena);
576
577         if (!is_rvu_otx2(rvu))
578                 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
579         seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
580                    aura->fc_up_crossing, aura->fc_stype);
581         seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
582
583         seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
584
585         seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
586                    aura->pool_drop, aura->update_time);
587         seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
588                    aura->err_int, aura->err_int_ena);
589         seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
590                    aura->thresh_int, aura->thresh_int_ena);
591         seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
592                    aura->thresh_up, aura->thresh_qint_idx);
593         seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
594
595         seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
596         if (!is_rvu_otx2(rvu))
597                 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
598 }
599
600 /* Dumps given NPA Pool's context */
601 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
602 {
603         struct npa_pool_s *pool = &rsp->pool;
604         struct rvu *rvu = m->private;
605
606         seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
607
608         seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
609                    pool->ena, pool->nat_align);
610         seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
611                    pool->stack_caching, pool->stack_way_mask);
612         seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
613                    pool->buf_offset, pool->buf_size);
614
615         seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
616                    pool->stack_max_pages, pool->stack_pages);
617
618         seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
619
620         seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
621                    pool->stack_offset, pool->shift, pool->avg_level);
622         seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
623                    pool->avg_con, pool->fc_ena, pool->fc_stype);
624         seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
625                    pool->fc_hyst_bits, pool->fc_up_crossing);
626         if (!is_rvu_otx2(rvu))
627                 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
628         seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
629
630         seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
631
632         seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
633
634         seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
635
636         seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
637                    pool->err_int, pool->err_int_ena);
638         seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
639         seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
640                    pool->thresh_int_ena, pool->thresh_up);
641         seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
642                    pool->thresh_qint_idx, pool->err_qint_idx);
643         if (!is_rvu_otx2(rvu))
644                 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
645 }
646
647 /* Reads aura/pool's ctx from admin queue */
648 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
649 {
650         void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
651         struct npa_aq_enq_req aq_req;
652         struct npa_aq_enq_rsp rsp;
653         struct rvu_pfvf *pfvf;
654         int aura, rc, max_id;
655         int npalf, id, all;
656         struct rvu *rvu;
657         u16 pcifunc;
658
659         rvu = m->private;
660
661         switch (ctype) {
662         case NPA_AQ_CTYPE_AURA:
663                 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
664                 id = rvu->rvu_dbg.npa_aura_ctx.id;
665                 all = rvu->rvu_dbg.npa_aura_ctx.all;
666                 break;
667
668         case NPA_AQ_CTYPE_POOL:
669                 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
670                 id = rvu->rvu_dbg.npa_pool_ctx.id;
671                 all = rvu->rvu_dbg.npa_pool_ctx.all;
672                 break;
673         default:
674                 return -EINVAL;
675         }
676
677         if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
678                 return -EINVAL;
679
680         pfvf = rvu_get_pfvf(rvu, pcifunc);
681         if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
682                 seq_puts(m, "Aura context is not initialized\n");
683                 return -EINVAL;
684         } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
685                 seq_puts(m, "Pool context is not initialized\n");
686                 return -EINVAL;
687         }
688
689         memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
690         aq_req.hdr.pcifunc = pcifunc;
691         aq_req.ctype = ctype;
692         aq_req.op = NPA_AQ_INSTOP_READ;
693         if (ctype == NPA_AQ_CTYPE_AURA) {
694                 max_id = pfvf->aura_ctx->qsize;
695                 print_npa_ctx = print_npa_aura_ctx;
696         } else {
697                 max_id = pfvf->pool_ctx->qsize;
698                 print_npa_ctx = print_npa_pool_ctx;
699         }
700
701         if (id < 0 || id >= max_id) {
702                 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
703                            (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
704                         max_id - 1);
705                 return -EINVAL;
706         }
707
708         if (all)
709                 id = 0;
710         else
711                 max_id = id + 1;
712
713         for (aura = id; aura < max_id; aura++) {
714                 aq_req.aura_id = aura;
715                 seq_printf(m, "======%s : %d=======\n",
716                            (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
717                         aq_req.aura_id);
718                 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
719                 if (rc) {
720                         seq_puts(m, "Failed to read context\n");
721                         return -EINVAL;
722                 }
723                 print_npa_ctx(m, &rsp);
724         }
725         return 0;
726 }
727
728 static int write_npa_ctx(struct rvu *rvu, bool all,
729                          int npalf, int id, int ctype)
730 {
731         struct rvu_pfvf *pfvf;
732         int max_id = 0;
733         u16 pcifunc;
734
735         if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
736                 return -EINVAL;
737
738         pfvf = rvu_get_pfvf(rvu, pcifunc);
739
740         if (ctype == NPA_AQ_CTYPE_AURA) {
741                 if (!pfvf->aura_ctx) {
742                         dev_warn(rvu->dev, "Aura context is not initialized\n");
743                         return -EINVAL;
744                 }
745                 max_id = pfvf->aura_ctx->qsize;
746         } else if (ctype == NPA_AQ_CTYPE_POOL) {
747                 if (!pfvf->pool_ctx) {
748                         dev_warn(rvu->dev, "Pool context is not initialized\n");
749                         return -EINVAL;
750                 }
751                 max_id = pfvf->pool_ctx->qsize;
752         }
753
754         if (id < 0 || id >= max_id) {
755                 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
756                          (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
757                         max_id - 1);
758                 return -EINVAL;
759         }
760
761         switch (ctype) {
762         case NPA_AQ_CTYPE_AURA:
763                 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
764                 rvu->rvu_dbg.npa_aura_ctx.id = id;
765                 rvu->rvu_dbg.npa_aura_ctx.all = all;
766                 break;
767
768         case NPA_AQ_CTYPE_POOL:
769                 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
770                 rvu->rvu_dbg.npa_pool_ctx.id = id;
771                 rvu->rvu_dbg.npa_pool_ctx.all = all;
772                 break;
773         default:
774                 return -EINVAL;
775         }
776         return 0;
777 }
778
779 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
780                                 const char __user *buffer, int *npalf,
781                                 int *id, bool *all)
782 {
783         int bytes_not_copied;
784         char *cmd_buf_tmp;
785         char *subtoken;
786         int ret;
787
788         bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
789         if (bytes_not_copied)
790                 return -EFAULT;
791
792         cmd_buf[*count] = '\0';
793         cmd_buf_tmp = strchr(cmd_buf, '\n');
794
795         if (cmd_buf_tmp) {
796                 *cmd_buf_tmp = '\0';
797                 *count = cmd_buf_tmp - cmd_buf + 1;
798         }
799
800         subtoken = strsep(&cmd_buf, " ");
801         ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
802         if (ret < 0)
803                 return ret;
804         subtoken = strsep(&cmd_buf, " ");
805         if (subtoken && strcmp(subtoken, "all") == 0) {
806                 *all = true;
807         } else {
808                 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
809                 if (ret < 0)
810                         return ret;
811         }
812         if (cmd_buf)
813                 return -EINVAL;
814         return ret;
815 }
816
817 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
818                                      const char __user *buffer,
819                                      size_t count, loff_t *ppos, int ctype)
820 {
821         char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
822                                         "aura" : "pool";
823         struct seq_file *seqfp = filp->private_data;
824         struct rvu *rvu = seqfp->private;
825         int npalf, id = 0, ret;
826         bool all = false;
827
828         if ((*ppos != 0) || !count)
829                 return -EINVAL;
830
831         cmd_buf = kzalloc(count + 1, GFP_KERNEL);
832         if (!cmd_buf)
833                 return count;
834         ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
835                                    &npalf, &id, &all);
836         if (ret < 0) {
837                 dev_info(rvu->dev,
838                          "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
839                          ctype_string, ctype_string);
840                 goto done;
841         } else {
842                 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
843         }
844 done:
845         kfree(cmd_buf);
846         return ret ? ret : count;
847 }
848
849 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
850                                           const char __user *buffer,
851                                           size_t count, loff_t *ppos)
852 {
853         return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
854                                      NPA_AQ_CTYPE_AURA);
855 }
856
857 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
858 {
859         return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
860 }
861
862 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
863
864 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
865                                           const char __user *buffer,
866                                           size_t count, loff_t *ppos)
867 {
868         return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
869                                      NPA_AQ_CTYPE_POOL);
870 }
871
872 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
873 {
874         return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
875 }
876
877 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
878
879 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
880                             int ctype, int transaction)
881 {
882         u64 req, out_req, lat, cant_alloc;
883         struct nix_hw *nix_hw;
884         struct rvu *rvu;
885         int port;
886
887         if (blk_addr == BLKADDR_NDC_NPA0) {
888                 rvu = s->private;
889         } else {
890                 nix_hw = s->private;
891                 rvu = nix_hw->rvu;
892         }
893
894         for (port = 0; port < NDC_MAX_PORT; port++) {
895                 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
896                                                 (port, ctype, transaction));
897                 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
898                                                 (port, ctype, transaction));
899                 out_req = rvu_read64(rvu, blk_addr,
900                                      NDC_AF_PORTX_RTX_RWX_OSTDN_PC
901                                      (port, ctype, transaction));
902                 cant_alloc = rvu_read64(rvu, blk_addr,
903                                         NDC_AF_PORTX_RTX_CANT_ALLOC_PC
904                                         (port, transaction));
905                 seq_printf(s, "\nPort:%d\n", port);
906                 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
907                 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
908                 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
909                 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
910                 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
911         }
912 }
913
914 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
915 {
916         seq_puts(s, "\n***** CACHE mode read stats *****\n");
917         ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
918         seq_puts(s, "\n***** CACHE mode write stats *****\n");
919         ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
920         seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
921         ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
922         seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
923         ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
924         return 0;
925 }
926
927 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
928 {
929         return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
930 }
931
932 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
933
934 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
935 {
936         struct nix_hw *nix_hw;
937         struct rvu *rvu;
938         int bank, max_bank;
939
940         if (blk_addr == BLKADDR_NDC_NPA0) {
941                 rvu = s->private;
942         } else {
943                 nix_hw = s->private;
944                 rvu = nix_hw->rvu;
945         }
946
947         max_bank = NDC_MAX_BANK(rvu, blk_addr);
948         for (bank = 0; bank < max_bank; bank++) {
949                 seq_printf(s, "BANK:%d\n", bank);
950                 seq_printf(s, "\tHits:\t%lld\n",
951                            (u64)rvu_read64(rvu, blk_addr,
952                            NDC_AF_BANKX_HIT_PC(bank)));
953                 seq_printf(s, "\tMiss:\t%lld\n",
954                            (u64)rvu_read64(rvu, blk_addr,
955                             NDC_AF_BANKX_MISS_PC(bank)));
956         }
957         return 0;
958 }
959
960 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
961 {
962         struct nix_hw *nix_hw = filp->private;
963         int blkaddr = 0;
964         int ndc_idx = 0;
965
966         blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
967                    BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
968         ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
969
970         return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
971 }
972
973 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
974
975 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
976 {
977         struct nix_hw *nix_hw = filp->private;
978         int blkaddr = 0;
979         int ndc_idx = 0;
980
981         blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
982                    BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
983         ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
984
985         return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
986 }
987
988 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
989
990 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
991                                              void *unused)
992 {
993         return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
994 }
995
996 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
997
998 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
999                                                 void *unused)
1000 {
1001         struct nix_hw *nix_hw = filp->private;
1002         int ndc_idx = NPA0_U;
1003         int blkaddr = 0;
1004
1005         blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1006                    BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1007
1008         return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1009 }
1010
1011 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1012
1013 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1014                                                 void *unused)
1015 {
1016         struct nix_hw *nix_hw = filp->private;
1017         int ndc_idx = NPA0_U;
1018         int blkaddr = 0;
1019
1020         blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1021                    BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1022
1023         return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1024 }
1025
1026 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1027
1028 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1029                                    struct nix_cn10k_sq_ctx_s *sq_ctx)
1030 {
1031         seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1032                    sq_ctx->ena, sq_ctx->qint_idx);
1033         seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1034                    sq_ctx->substream, sq_ctx->sdp_mcast);
1035         seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1036                    sq_ctx->cq, sq_ctx->sqe_way_mask);
1037
1038         seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1039                    sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1040         seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1041                    sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1042         seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1043                    sq_ctx->default_chan, sq_ctx->sqb_count);
1044
1045         seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1046         seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1047         seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1048                    sq_ctx->sqb_aura, sq_ctx->sq_int);
1049         seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1050                    sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1051
1052         seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1053                    sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1054         seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1055                    sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1056         seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1057                    sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1058         seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1059                    sq_ctx->tail_offset, sq_ctx->smenq_offset);
1060         seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1061                    sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1062
1063         seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1064         seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1065         seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1066         seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1067                    sq_ctx->smenq_next_sqb);
1068
1069         seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1070
1071         seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1072         seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1073                    sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1074         seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1075                    sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1076         seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1077                    sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1078
1079         seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1080                    (u64)sq_ctx->scm_lso_rem);
1081         seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1082         seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1083         seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1084                    (u64)sq_ctx->dropped_octs);
1085         seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1086                    (u64)sq_ctx->dropped_pkts);
1087 }
1088
1089 /* Dumps given nix_sq's context */
1090 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1091 {
1092         struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1093         struct nix_hw *nix_hw = m->private;
1094         struct rvu *rvu = nix_hw->rvu;
1095
1096         if (!is_rvu_otx2(rvu)) {
1097                 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1098                 return;
1099         }
1100         seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1101                    sq_ctx->sqe_way_mask, sq_ctx->cq);
1102         seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1103                    sq_ctx->sdp_mcast, sq_ctx->substream);
1104         seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1105                    sq_ctx->qint_idx, sq_ctx->ena);
1106
1107         seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1108                    sq_ctx->sqb_count, sq_ctx->default_chan);
1109         seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1110                    sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1111         seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1112                    sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1113
1114         seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1115                    sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1116         seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1117                    sq_ctx->sq_int, sq_ctx->sqb_aura);
1118         seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1119
1120         seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1121                    sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1122         seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1123                    sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1124         seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1125                    sq_ctx->smenq_offset, sq_ctx->tail_offset);
1126         seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1127                    sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1128         seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1129                    sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1130         seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1131                    sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1132
1133         seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1134         seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1135         seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1136         seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1137                    sq_ctx->smenq_next_sqb);
1138
1139         seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1140
1141         seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1142                    sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1143         seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1144                    sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1145         seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1146                    sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1147         seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1148
1149         seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1150                    (u64)sq_ctx->scm_lso_rem);
1151         seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1152         seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1153         seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1154                    (u64)sq_ctx->dropped_octs);
1155         seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1156                    (u64)sq_ctx->dropped_pkts);
1157 }
1158
1159 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1160                                    struct nix_cn10k_rq_ctx_s *rq_ctx)
1161 {
1162         seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1163                    rq_ctx->ena, rq_ctx->sso_ena);
1164         seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1165                    rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1166         seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1167                    rq_ctx->cq, rq_ctx->lenerr_dis);
1168         seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1169                    rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1170         seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1171                    rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1172         seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1173                    rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1174         seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1175
1176         seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1177                    rq_ctx->spb_aura, rq_ctx->lpb_aura);
1178         seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1179         seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1180                    rq_ctx->sso_grp, rq_ctx->sso_tt);
1181         seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1182                    rq_ctx->pb_caching, rq_ctx->wqe_caching);
1183         seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1184                    rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1185         seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1186                    rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1187         seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1188                    rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1189
1190         seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1191         seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1192         seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1193         seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1194                    rq_ctx->wqe_skip, rq_ctx->spb_ena);
1195         seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1196                    rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1197         seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1198                    rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1199         seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1200                    rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1201
1202         seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1203                    rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1204         seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1205                    rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1206         seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1207                    rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1208         seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1209                    rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1210
1211         seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1212                    rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1213         seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1214                    rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1215         seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1216                    rq_ctx->rq_int, rq_ctx->rq_int_ena);
1217         seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1218
1219         seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1220                    rq_ctx->ltag, rq_ctx->good_utag);
1221         seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1222                    rq_ctx->bad_utag, rq_ctx->flow_tagw);
1223         seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1224                    rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1225         seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1226                    rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1227         seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1228
1229         seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1230         seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1231         seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1232         seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1233         seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1234 }
1235
1236 /* Dumps given nix_rq's context */
1237 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1238 {
1239         struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1240         struct nix_hw *nix_hw = m->private;
1241         struct rvu *rvu = nix_hw->rvu;
1242
1243         if (!is_rvu_otx2(rvu)) {
1244                 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1245                 return;
1246         }
1247
1248         seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1249                    rq_ctx->wqe_aura, rq_ctx->substream);
1250         seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1251                    rq_ctx->cq, rq_ctx->ena_wqwd);
1252         seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1253                    rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1254         seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1255
1256         seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1257                    rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1258         seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1259                    rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1260         seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1261                    rq_ctx->pb_caching, rq_ctx->sso_tt);
1262         seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1263                    rq_ctx->sso_grp, rq_ctx->lpb_aura);
1264         seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1265
1266         seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1267                    rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1268         seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1269                    rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1270         seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1271                    rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1272         seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1273                    rq_ctx->spb_ena, rq_ctx->wqe_skip);
1274         seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1275
1276         seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1277                    rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1278         seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1279                    rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1280         seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1281                    rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1282         seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1283                    rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1284
1285         seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1286                    rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1287         seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1288                    rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1289         seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1290                    rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1291         seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1292
1293         seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1294                    rq_ctx->flow_tagw, rq_ctx->bad_utag);
1295         seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1296                    rq_ctx->good_utag, rq_ctx->ltag);
1297
1298         seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1299         seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1300         seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1301         seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1302         seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1303 }
1304
1305 /* Dumps given nix_cq's context */
1306 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1307 {
1308         struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1309
1310         seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1311
1312         seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1313         seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1314                    cq_ctx->avg_con, cq_ctx->cint_idx);
1315         seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1316                    cq_ctx->cq_err, cq_ctx->qint_idx);
1317         seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1318                    cq_ctx->bpid, cq_ctx->bp_ena);
1319
1320         seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1321                    cq_ctx->update_time, cq_ctx->avg_level);
1322         seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1323                    cq_ctx->head, cq_ctx->tail);
1324
1325         seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1326                    cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1327         seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1328                    cq_ctx->qsize, cq_ctx->caching);
1329         seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1330                    cq_ctx->substream, cq_ctx->ena);
1331         seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1332                    cq_ctx->drop_ena, cq_ctx->drop);
1333         seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1334 }
1335
1336 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1337                                          void *unused, int ctype)
1338 {
1339         void (*print_nix_ctx)(struct seq_file *filp,
1340                               struct nix_aq_enq_rsp *rsp) = NULL;
1341         struct nix_hw *nix_hw = filp->private;
1342         struct rvu *rvu = nix_hw->rvu;
1343         struct nix_aq_enq_req aq_req;
1344         struct nix_aq_enq_rsp rsp;
1345         char *ctype_string = NULL;
1346         int qidx, rc, max_id = 0;
1347         struct rvu_pfvf *pfvf;
1348         int nixlf, id, all;
1349         u16 pcifunc;
1350
1351         switch (ctype) {
1352         case NIX_AQ_CTYPE_CQ:
1353                 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1354                 id = rvu->rvu_dbg.nix_cq_ctx.id;
1355                 all = rvu->rvu_dbg.nix_cq_ctx.all;
1356                 break;
1357
1358         case NIX_AQ_CTYPE_SQ:
1359                 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1360                 id = rvu->rvu_dbg.nix_sq_ctx.id;
1361                 all = rvu->rvu_dbg.nix_sq_ctx.all;
1362                 break;
1363
1364         case NIX_AQ_CTYPE_RQ:
1365                 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1366                 id = rvu->rvu_dbg.nix_rq_ctx.id;
1367                 all = rvu->rvu_dbg.nix_rq_ctx.all;
1368                 break;
1369
1370         default:
1371                 return -EINVAL;
1372         }
1373
1374         if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1375                 return -EINVAL;
1376
1377         pfvf = rvu_get_pfvf(rvu, pcifunc);
1378         if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1379                 seq_puts(filp, "SQ context is not initialized\n");
1380                 return -EINVAL;
1381         } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1382                 seq_puts(filp, "RQ context is not initialized\n");
1383                 return -EINVAL;
1384         } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1385                 seq_puts(filp, "CQ context is not initialized\n");
1386                 return -EINVAL;
1387         }
1388
1389         if (ctype == NIX_AQ_CTYPE_SQ) {
1390                 max_id = pfvf->sq_ctx->qsize;
1391                 ctype_string = "sq";
1392                 print_nix_ctx = print_nix_sq_ctx;
1393         } else if (ctype == NIX_AQ_CTYPE_RQ) {
1394                 max_id = pfvf->rq_ctx->qsize;
1395                 ctype_string = "rq";
1396                 print_nix_ctx = print_nix_rq_ctx;
1397         } else if (ctype == NIX_AQ_CTYPE_CQ) {
1398                 max_id = pfvf->cq_ctx->qsize;
1399                 ctype_string = "cq";
1400                 print_nix_ctx = print_nix_cq_ctx;
1401         }
1402
1403         memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1404         aq_req.hdr.pcifunc = pcifunc;
1405         aq_req.ctype = ctype;
1406         aq_req.op = NIX_AQ_INSTOP_READ;
1407         if (all)
1408                 id = 0;
1409         else
1410                 max_id = id + 1;
1411         for (qidx = id; qidx < max_id; qidx++) {
1412                 aq_req.qidx = qidx;
1413                 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1414                            ctype_string, nixlf, aq_req.qidx);
1415                 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1416                 if (rc) {
1417                         seq_puts(filp, "Failed to read the context\n");
1418                         return -EINVAL;
1419                 }
1420                 print_nix_ctx(filp, &rsp);
1421         }
1422         return 0;
1423 }
1424
1425 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1426                                int id, int ctype, char *ctype_string,
1427                                struct seq_file *m)
1428 {
1429         struct nix_hw *nix_hw = m->private;
1430         struct rvu_pfvf *pfvf;
1431         int max_id = 0;
1432         u16 pcifunc;
1433
1434         if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1435                 return -EINVAL;
1436
1437         pfvf = rvu_get_pfvf(rvu, pcifunc);
1438
1439         if (ctype == NIX_AQ_CTYPE_SQ) {
1440                 if (!pfvf->sq_ctx) {
1441                         dev_warn(rvu->dev, "SQ context is not initialized\n");
1442                         return -EINVAL;
1443                 }
1444                 max_id = pfvf->sq_ctx->qsize;
1445         } else if (ctype == NIX_AQ_CTYPE_RQ) {
1446                 if (!pfvf->rq_ctx) {
1447                         dev_warn(rvu->dev, "RQ context is not initialized\n");
1448                         return -EINVAL;
1449                 }
1450                 max_id = pfvf->rq_ctx->qsize;
1451         } else if (ctype == NIX_AQ_CTYPE_CQ) {
1452                 if (!pfvf->cq_ctx) {
1453                         dev_warn(rvu->dev, "CQ context is not initialized\n");
1454                         return -EINVAL;
1455                 }
1456                 max_id = pfvf->cq_ctx->qsize;
1457         }
1458
1459         if (id < 0 || id >= max_id) {
1460                 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1461                          ctype_string, max_id - 1);
1462                 return -EINVAL;
1463         }
1464         switch (ctype) {
1465         case NIX_AQ_CTYPE_CQ:
1466                 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1467                 rvu->rvu_dbg.nix_cq_ctx.id = id;
1468                 rvu->rvu_dbg.nix_cq_ctx.all = all;
1469                 break;
1470
1471         case NIX_AQ_CTYPE_SQ:
1472                 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1473                 rvu->rvu_dbg.nix_sq_ctx.id = id;
1474                 rvu->rvu_dbg.nix_sq_ctx.all = all;
1475                 break;
1476
1477         case NIX_AQ_CTYPE_RQ:
1478                 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1479                 rvu->rvu_dbg.nix_rq_ctx.id = id;
1480                 rvu->rvu_dbg.nix_rq_ctx.all = all;
1481                 break;
1482         default:
1483                 return -EINVAL;
1484         }
1485         return 0;
1486 }
1487
1488 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1489                                            const char __user *buffer,
1490                                            size_t count, loff_t *ppos,
1491                                            int ctype)
1492 {
1493         struct seq_file *m = filp->private_data;
1494         struct nix_hw *nix_hw = m->private;
1495         struct rvu *rvu = nix_hw->rvu;
1496         char *cmd_buf, *ctype_string;
1497         int nixlf, id = 0, ret;
1498         bool all = false;
1499
1500         if ((*ppos != 0) || !count)
1501                 return -EINVAL;
1502
1503         switch (ctype) {
1504         case NIX_AQ_CTYPE_SQ:
1505                 ctype_string = "sq";
1506                 break;
1507         case NIX_AQ_CTYPE_RQ:
1508                 ctype_string = "rq";
1509                 break;
1510         case NIX_AQ_CTYPE_CQ:
1511                 ctype_string = "cq";
1512                 break;
1513         default:
1514                 return -EINVAL;
1515         }
1516
1517         cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1518
1519         if (!cmd_buf)
1520                 return count;
1521
1522         ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1523                                    &nixlf, &id, &all);
1524         if (ret < 0) {
1525                 dev_info(rvu->dev,
1526                          "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1527                          ctype_string, ctype_string);
1528                 goto done;
1529         } else {
1530                 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1531                                           ctype_string, m);
1532         }
1533 done:
1534         kfree(cmd_buf);
1535         return ret ? ret : count;
1536 }
1537
1538 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1539                                         const char __user *buffer,
1540                                         size_t count, loff_t *ppos)
1541 {
1542         return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1543                                             NIX_AQ_CTYPE_SQ);
1544 }
1545
1546 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1547 {
1548         return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1549 }
1550
1551 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1552
1553 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1554                                         const char __user *buffer,
1555                                         size_t count, loff_t *ppos)
1556 {
1557         return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1558                                             NIX_AQ_CTYPE_RQ);
1559 }
1560
1561 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void  *unused)
1562 {
1563         return rvu_dbg_nix_queue_ctx_display(filp, unused,  NIX_AQ_CTYPE_RQ);
1564 }
1565
1566 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1567
1568 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1569                                         const char __user *buffer,
1570                                         size_t count, loff_t *ppos)
1571 {
1572         return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1573                                             NIX_AQ_CTYPE_CQ);
1574 }
1575
1576 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1577 {
1578         return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1579 }
1580
1581 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1582
1583 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1584                                  unsigned long *bmap, char *qtype)
1585 {
1586         char *buf;
1587
1588         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1589         if (!buf)
1590                 return;
1591
1592         bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1593         seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1594         seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1595                    qtype, buf);
1596         kfree(buf);
1597 }
1598
1599 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1600 {
1601         if (!pfvf->cq_ctx)
1602                 seq_puts(filp, "cq context is not initialized\n");
1603         else
1604                 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1605                                      "cq");
1606
1607         if (!pfvf->rq_ctx)
1608                 seq_puts(filp, "rq context is not initialized\n");
1609         else
1610                 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1611                                      "rq");
1612
1613         if (!pfvf->sq_ctx)
1614                 seq_puts(filp, "sq context is not initialized\n");
1615         else
1616                 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1617                                      "sq");
1618 }
1619
1620 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1621                                        const char __user *buffer,
1622                                        size_t count, loff_t *ppos)
1623 {
1624         return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1625                                    BLKTYPE_NIX);
1626 }
1627
1628 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1629 {
1630         return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1631 }
1632
1633 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1634
1635 static void print_band_prof_ctx(struct seq_file *m,
1636                                 struct nix_bandprof_s *prof)
1637 {
1638         char *str;
1639
1640         switch (prof->pc_mode) {
1641         case NIX_RX_PC_MODE_VLAN:
1642                 str = "VLAN";
1643                 break;
1644         case NIX_RX_PC_MODE_DSCP:
1645                 str = "DSCP";
1646                 break;
1647         case NIX_RX_PC_MODE_GEN:
1648                 str = "Generic";
1649                 break;
1650         case NIX_RX_PC_MODE_RSVD:
1651                 str = "Reserved";
1652                 break;
1653         }
1654         seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1655         str = (prof->icolor == 3) ? "Color blind" :
1656                 (prof->icolor == 0) ? "Green" :
1657                 (prof->icolor == 1) ? "Yellow" : "Red";
1658         seq_printf(m, "W0: icolor\t\t%s\n", str);
1659         seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1660         seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1661         seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1662         seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1663         seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1664         seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1665         seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1666         seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1667
1668         seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1669         str = (prof->lmode == 0) ? "byte" : "packet";
1670         seq_printf(m, "W1: lmode\t\t%s\n", str);
1671         seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1672         seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1673         seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1674         seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1675         str = (prof->gc_action == 0) ? "PASS" :
1676                 (prof->gc_action == 1) ? "DROP" : "RED";
1677         seq_printf(m, "W1: gc_action\t\t%s\n", str);
1678         str = (prof->yc_action == 0) ? "PASS" :
1679                 (prof->yc_action == 1) ? "DROP" : "RED";
1680         seq_printf(m, "W1: yc_action\t\t%s\n", str);
1681         str = (prof->rc_action == 0) ? "PASS" :
1682                 (prof->rc_action == 1) ? "DROP" : "RED";
1683         seq_printf(m, "W1: rc_action\t\t%s\n", str);
1684         seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1685         seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1686         seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1687
1688         seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1689         seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1690         seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1691         seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1692                    (u64)prof->green_pkt_pass);
1693         seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1694                    (u64)prof->yellow_pkt_pass);
1695         seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1696         seq_printf(m, "W7: green_octs_pass\t%lld\n",
1697                    (u64)prof->green_octs_pass);
1698         seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1699                    (u64)prof->yellow_octs_pass);
1700         seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1701         seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1702                    (u64)prof->green_pkt_drop);
1703         seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1704                    (u64)prof->yellow_pkt_drop);
1705         seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1706         seq_printf(m, "W13: green_octs_drop\t%lld\n",
1707                    (u64)prof->green_octs_drop);
1708         seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1709                    (u64)prof->yellow_octs_drop);
1710         seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1711         seq_puts(m, "==============================\n");
1712 }
1713
1714 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1715 {
1716         struct nix_hw *nix_hw = m->private;
1717         struct nix_cn10k_aq_enq_req aq_req;
1718         struct nix_cn10k_aq_enq_rsp aq_rsp;
1719         struct rvu *rvu = nix_hw->rvu;
1720         struct nix_ipolicer *ipolicer;
1721         int layer, prof_idx, idx, rc;
1722         u16 pcifunc;
1723         char *str;
1724
1725         for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1726                 if (layer == BAND_PROF_INVAL_LAYER)
1727                         continue;
1728                 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1729                         (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1730
1731                 seq_printf(m, "\n%s bandwidth profiles\n", str);
1732                 seq_puts(m, "=======================\n");
1733
1734                 ipolicer = &nix_hw->ipolicer[layer];
1735
1736                 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1737                         if (is_rsrc_free(&ipolicer->band_prof, idx))
1738                                 continue;
1739
1740                         prof_idx = (idx & 0x3FFF) | (layer << 14);
1741                         rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1742                                                  0x00, NIX_AQ_CTYPE_BANDPROF,
1743                                                  prof_idx);
1744                         if (rc) {
1745                                 dev_err(rvu->dev,
1746                                         "%s: Failed to fetch context of %s profile %d, err %d\n",
1747                                         __func__, str, idx, rc);
1748                                 return 0;
1749                         }
1750                         seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1751                         pcifunc = ipolicer->pfvf_map[idx];
1752                         if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1753                                 seq_printf(m, "Allocated to :: PF %d\n",
1754                                            rvu_get_pf(pcifunc));
1755                         else
1756                                 seq_printf(m, "Allocated to :: PF %d VF %d\n",
1757                                            rvu_get_pf(pcifunc),
1758                                            (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1759                         print_band_prof_ctx(m, &aq_rsp.prof);
1760                 }
1761         }
1762         return 0;
1763 }
1764
1765 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1766
1767 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1768 {
1769         struct nix_hw *nix_hw = m->private;
1770         struct nix_ipolicer *ipolicer;
1771         int layer;
1772         char *str;
1773
1774         seq_puts(m, "\nBandwidth profile resource free count\n");
1775         seq_puts(m, "=====================================\n");
1776         for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1777                 if (layer == BAND_PROF_INVAL_LAYER)
1778                         continue;
1779                 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1780                         (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1781
1782                 ipolicer = &nix_hw->ipolicer[layer];
1783                 seq_printf(m, "%s :: Max: %4d  Free: %4d\n", str,
1784                            ipolicer->band_prof.max,
1785                            rvu_rsrc_free_count(&ipolicer->band_prof));
1786         }
1787         seq_puts(m, "=====================================\n");
1788
1789         return 0;
1790 }
1791
1792 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1793
1794 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1795 {
1796         struct nix_hw *nix_hw;
1797
1798         if (!is_block_implemented(rvu->hw, blkaddr))
1799                 return;
1800
1801         if (blkaddr == BLKADDR_NIX0) {
1802                 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1803                 nix_hw = &rvu->hw->nix[0];
1804         } else {
1805                 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1806                                                       rvu->rvu_dbg.root);
1807                 nix_hw = &rvu->hw->nix[1];
1808         }
1809
1810         debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1811                             &rvu_dbg_nix_sq_ctx_fops);
1812         debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1813                             &rvu_dbg_nix_rq_ctx_fops);
1814         debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1815                             &rvu_dbg_nix_cq_ctx_fops);
1816         debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1817                             &rvu_dbg_nix_ndc_tx_cache_fops);
1818         debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1819                             &rvu_dbg_nix_ndc_rx_cache_fops);
1820         debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1821                             &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1822         debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1823                             &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1824         debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1825                             &rvu_dbg_nix_qsize_fops);
1826         debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1827                             &rvu_dbg_nix_band_prof_ctx_fops);
1828         debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
1829                             &rvu_dbg_nix_band_prof_rsrc_fops);
1830 }
1831
1832 static void rvu_dbg_npa_init(struct rvu *rvu)
1833 {
1834         rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1835
1836         debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1837                             &rvu_dbg_npa_qsize_fops);
1838         debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1839                             &rvu_dbg_npa_aura_ctx_fops);
1840         debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1841                             &rvu_dbg_npa_pool_ctx_fops);
1842         debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1843                             &rvu_dbg_npa_ndc_cache_fops);
1844         debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
1845                             &rvu_dbg_npa_ndc_hits_miss_fops);
1846 }
1847
1848 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name)                          \
1849         ({                                                              \
1850                 u64 cnt;                                                \
1851                 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1852                                              NIX_STATS_RX, &(cnt));     \
1853                 if (!err)                                               \
1854                         seq_printf(s, "%s: %llu\n", name, cnt);         \
1855                 cnt;                                                    \
1856         })
1857
1858 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name)                  \
1859         ({                                                              \
1860                 u64 cnt;                                                \
1861                 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1862                                           NIX_STATS_TX, &(cnt));        \
1863                 if (!err)                                               \
1864                         seq_printf(s, "%s: %llu\n", name, cnt);         \
1865                 cnt;                                                    \
1866         })
1867
1868 static int cgx_print_stats(struct seq_file *s, int lmac_id)
1869 {
1870         struct cgx_link_user_info linfo;
1871         struct mac_ops *mac_ops;
1872         void *cgxd = s->private;
1873         u64 ucast, mcast, bcast;
1874         int stat = 0, err = 0;
1875         u64 tx_stat, rx_stat;
1876         struct rvu *rvu;
1877
1878         rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1879                                              PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1880         if (!rvu)
1881                 return -ENODEV;
1882
1883         mac_ops = get_mac_ops(cgxd);
1884
1885         if (!mac_ops)
1886                 return 0;
1887
1888         /* Link status */
1889         seq_puts(s, "\n=======Link Status======\n\n");
1890         err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1891         if (err)
1892                 seq_puts(s, "Failed to read link status\n");
1893         seq_printf(s, "\nLink is %s %d Mbps\n\n",
1894                    linfo.link_up ? "UP" : "DOWN", linfo.speed);
1895
1896         /* Rx stats */
1897         seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
1898                    mac_ops->name);
1899         ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1900         if (err)
1901                 return err;
1902         mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1903         if (err)
1904                 return err;
1905         bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1906         if (err)
1907                 return err;
1908         seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1909         PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1910         if (err)
1911                 return err;
1912         PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1913         if (err)
1914                 return err;
1915         PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1916         if (err)
1917                 return err;
1918
1919         /* Tx stats */
1920         seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
1921                    mac_ops->name);
1922         ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
1923         if (err)
1924                 return err;
1925         mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
1926         if (err)
1927                 return err;
1928         bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
1929         if (err)
1930                 return err;
1931         seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
1932         PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
1933         if (err)
1934                 return err;
1935         PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
1936         if (err)
1937                 return err;
1938
1939         /* Rx stats */
1940         seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
1941         while (stat < mac_ops->rx_stats_cnt) {
1942                 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
1943                 if (err)
1944                         return err;
1945                 if (is_rvu_otx2(rvu))
1946                         seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
1947                                    rx_stat);
1948                 else
1949                         seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
1950                                    rx_stat);
1951                 stat++;
1952         }
1953
1954         /* Tx stats */
1955         stat = 0;
1956         seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
1957         while (stat < mac_ops->tx_stats_cnt) {
1958                 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
1959                 if (err)
1960                         return err;
1961
1962         if (is_rvu_otx2(rvu))
1963                 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
1964                            tx_stat);
1965         else
1966                 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
1967                            tx_stat);
1968         stat++;
1969         }
1970
1971         return err;
1972 }
1973
1974 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
1975 {
1976         struct dentry *current_dir;
1977         char *buf;
1978
1979         current_dir = filp->file->f_path.dentry->d_parent;
1980         buf = strrchr(current_dir->d_name.name, 'c');
1981         if (!buf)
1982                 return -EINVAL;
1983
1984         return kstrtoint(buf + 1, 10, lmac_id);
1985 }
1986
1987 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
1988 {
1989         int lmac_id, err;
1990
1991         err = rvu_dbg_derive_lmacid(filp, &lmac_id);
1992         if (!err)
1993                 return cgx_print_stats(filp, lmac_id);
1994
1995         return err;
1996 }
1997
1998 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
1999
2000 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2001 {
2002         struct pci_dev *pdev = NULL;
2003         void *cgxd = s->private;
2004         char *bcast, *mcast;
2005         u16 index, domain;
2006         u8 dmac[ETH_ALEN];
2007         struct rvu *rvu;
2008         u64 cfg, mac;
2009         int pf;
2010
2011         rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2012                                              PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2013         if (!rvu)
2014                 return -ENODEV;
2015
2016         pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2017         domain = 2;
2018
2019         pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2020         if (!pdev)
2021                 return 0;
2022
2023         cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2024         bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2025         mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2026
2027         seq_puts(s,
2028                  "PCI dev       RVUPF   BROADCAST  MULTICAST  FILTER-MODE\n");
2029         seq_printf(s, "%s  PF%d  %9s  %9s",
2030                    dev_name(&pdev->dev), pf, bcast, mcast);
2031         if (cfg & CGX_DMAC_CAM_ACCEPT)
2032                 seq_printf(s, "%12s\n\n", "UNICAST");
2033         else
2034                 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2035
2036         seq_puts(s, "\nDMAC-INDEX  ADDRESS\n");
2037
2038         for (index = 0 ; index < 32 ; index++) {
2039                 cfg = cgx_read_dmac_entry(cgxd, index);
2040                 /* Display enabled dmac entries associated with current lmac */
2041                 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2042                     FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2043                         mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2044                         u64_to_ether_addr(mac, dmac);
2045                         seq_printf(s, "%7d     %pM\n", index, dmac);
2046                 }
2047         }
2048
2049         return 0;
2050 }
2051
2052 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2053 {
2054         int err, lmac_id;
2055
2056         err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2057         if (!err)
2058                 return cgx_print_dmac_flt(filp, lmac_id);
2059
2060         return err;
2061 }
2062
2063 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2064
2065 static void rvu_dbg_cgx_init(struct rvu *rvu)
2066 {
2067         struct mac_ops *mac_ops;
2068         unsigned long lmac_bmap;
2069         int i, lmac_id;
2070         char dname[20];
2071         void *cgx;
2072
2073         if (!cgx_get_cgxcnt_max())
2074                 return;
2075
2076         mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2077         if (!mac_ops)
2078                 return;
2079
2080         rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2081                                                    rvu->rvu_dbg.root);
2082
2083         for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2084                 cgx = rvu_cgx_pdata(i, rvu);
2085                 if (!cgx)
2086                         continue;
2087                 lmac_bmap = cgx_get_lmac_bmap(cgx);
2088                 /* cgx debugfs dir */
2089                 sprintf(dname, "%s%d", mac_ops->name, i);
2090                 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2091                                                       rvu->rvu_dbg.cgx_root);
2092
2093                 for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2094                         /* lmac debugfs dir */
2095                         sprintf(dname, "lmac%d", lmac_id);
2096                         rvu->rvu_dbg.lmac =
2097                                 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2098
2099                         debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2100                                             cgx, &rvu_dbg_cgx_stat_fops);
2101                         debugfs_create_file("mac_filter", 0600,
2102                                             rvu->rvu_dbg.lmac, cgx,
2103                                             &rvu_dbg_cgx_dmac_flt_fops);
2104                 }
2105         }
2106 }
2107
2108 /* NPC debugfs APIs */
2109 static void rvu_print_npc_mcam_info(struct seq_file *s,
2110                                     u16 pcifunc, int blkaddr)
2111 {
2112         struct rvu *rvu = s->private;
2113         int entry_acnt, entry_ecnt;
2114         int cntr_acnt, cntr_ecnt;
2115
2116         rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2117                                           &entry_acnt, &entry_ecnt);
2118         rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2119                                             &cntr_acnt, &cntr_ecnt);
2120         if (!entry_acnt && !cntr_acnt)
2121                 return;
2122
2123         if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2124                 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2125                            rvu_get_pf(pcifunc));
2126         else
2127                 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2128                            rvu_get_pf(pcifunc),
2129                            (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2130
2131         if (entry_acnt) {
2132                 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2133                 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2134         }
2135         if (cntr_acnt) {
2136                 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2137                 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2138         }
2139 }
2140
2141 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2142 {
2143         struct rvu *rvu = filp->private;
2144         int pf, vf, numvfs, blkaddr;
2145         struct npc_mcam *mcam;
2146         u16 pcifunc, counters;
2147         u64 cfg;
2148
2149         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2150         if (blkaddr < 0)
2151                 return -ENODEV;
2152
2153         mcam = &rvu->hw->mcam;
2154         counters = rvu->hw->npc_counters;
2155
2156         seq_puts(filp, "\nNPC MCAM info:\n");
2157         /* MCAM keywidth on receive and transmit sides */
2158         cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2159         cfg = (cfg >> 32) & 0x07;
2160         seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2161                    "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2162                    "224bits" : "448bits"));
2163         cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2164         cfg = (cfg >> 32) & 0x07;
2165         seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2166                    "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2167                    "224bits" : "448bits"));
2168
2169         mutex_lock(&mcam->lock);
2170         /* MCAM entries */
2171         seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2172         seq_printf(filp, "\t\t Reserved \t: %d\n",
2173                    mcam->total_entries - mcam->bmap_entries);
2174         seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2175
2176         /* MCAM counters */
2177         seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2178         seq_printf(filp, "\t\t Reserved \t: %d\n",
2179                    counters - mcam->counters.max);
2180         seq_printf(filp, "\t\t Available \t: %d\n",
2181                    rvu_rsrc_free_count(&mcam->counters));
2182
2183         if (mcam->bmap_entries == mcam->bmap_fcnt) {
2184                 mutex_unlock(&mcam->lock);
2185                 return 0;
2186         }
2187
2188         seq_puts(filp, "\n\t\t Current allocation\n");
2189         seq_puts(filp, "\t\t====================\n");
2190         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2191                 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2192                 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2193
2194                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2195                 numvfs = (cfg >> 12) & 0xFF;
2196                 for (vf = 0; vf < numvfs; vf++) {
2197                         pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2198                         rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2199                 }
2200         }
2201
2202         mutex_unlock(&mcam->lock);
2203         return 0;
2204 }
2205
2206 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2207
2208 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2209                                              void *unused)
2210 {
2211         struct rvu *rvu = filp->private;
2212         struct npc_mcam *mcam;
2213         int blkaddr;
2214
2215         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2216         if (blkaddr < 0)
2217                 return -ENODEV;
2218
2219         mcam = &rvu->hw->mcam;
2220
2221         seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2222         seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2223                    rvu_read64(rvu, blkaddr,
2224                               NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2225
2226         return 0;
2227 }
2228
2229 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2230
2231 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2232                                         struct rvu_npc_mcam_rule *rule)
2233 {
2234         u8 bit;
2235
2236         for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2237                 seq_printf(s, "\t%s  ", npc_get_field_name(bit));
2238                 switch (bit) {
2239                 case NPC_DMAC:
2240                         seq_printf(s, "%pM ", rule->packet.dmac);
2241                         seq_printf(s, "mask %pM\n", rule->mask.dmac);
2242                         break;
2243                 case NPC_SMAC:
2244                         seq_printf(s, "%pM ", rule->packet.smac);
2245                         seq_printf(s, "mask %pM\n", rule->mask.smac);
2246                         break;
2247                 case NPC_ETYPE:
2248                         seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2249                         seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2250                         break;
2251                 case NPC_OUTER_VID:
2252                         seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2253                         seq_printf(s, "mask 0x%x\n",
2254                                    ntohs(rule->mask.vlan_tci));
2255                         break;
2256                 case NPC_TOS:
2257                         seq_printf(s, "%d ", rule->packet.tos);
2258                         seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2259                         break;
2260                 case NPC_SIP_IPV4:
2261                         seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2262                         seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2263                         break;
2264                 case NPC_DIP_IPV4:
2265                         seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2266                         seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2267                         break;
2268                 case NPC_SIP_IPV6:
2269                         seq_printf(s, "%pI6 ", rule->packet.ip6src);
2270                         seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2271                         break;
2272                 case NPC_DIP_IPV6:
2273                         seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2274                         seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2275                         break;
2276                 case NPC_SPORT_TCP:
2277                 case NPC_SPORT_UDP:
2278                 case NPC_SPORT_SCTP:
2279                         seq_printf(s, "%d ", ntohs(rule->packet.sport));
2280                         seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2281                         break;
2282                 case NPC_DPORT_TCP:
2283                 case NPC_DPORT_UDP:
2284                 case NPC_DPORT_SCTP:
2285                         seq_printf(s, "%d ", ntohs(rule->packet.dport));
2286                         seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2287                         break;
2288                 default:
2289                         seq_puts(s, "\n");
2290                         break;
2291                 }
2292         }
2293 }
2294
2295 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2296                                          struct rvu_npc_mcam_rule *rule)
2297 {
2298         if (is_npc_intf_tx(rule->intf)) {
2299                 switch (rule->tx_action.op) {
2300                 case NIX_TX_ACTIONOP_DROP:
2301                         seq_puts(s, "\taction: Drop\n");
2302                         break;
2303                 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2304                         seq_puts(s, "\taction: Unicast to default channel\n");
2305                         break;
2306                 case NIX_TX_ACTIONOP_UCAST_CHAN:
2307                         seq_printf(s, "\taction: Unicast to channel %d\n",
2308                                    rule->tx_action.index);
2309                         break;
2310                 case NIX_TX_ACTIONOP_MCAST:
2311                         seq_puts(s, "\taction: Multicast\n");
2312                         break;
2313                 case NIX_TX_ACTIONOP_DROP_VIOL:
2314                         seq_puts(s, "\taction: Lockdown Violation Drop\n");
2315                         break;
2316                 default:
2317                         break;
2318                 }
2319         } else {
2320                 switch (rule->rx_action.op) {
2321                 case NIX_RX_ACTIONOP_DROP:
2322                         seq_puts(s, "\taction: Drop\n");
2323                         break;
2324                 case NIX_RX_ACTIONOP_UCAST:
2325                         seq_printf(s, "\taction: Direct to queue %d\n",
2326                                    rule->rx_action.index);
2327                         break;
2328                 case NIX_RX_ACTIONOP_RSS:
2329                         seq_puts(s, "\taction: RSS\n");
2330                         break;
2331                 case NIX_RX_ACTIONOP_UCAST_IPSEC:
2332                         seq_puts(s, "\taction: Unicast ipsec\n");
2333                         break;
2334                 case NIX_RX_ACTIONOP_MCAST:
2335                         seq_puts(s, "\taction: Multicast\n");
2336                         break;
2337                 default:
2338                         break;
2339                 }
2340         }
2341 }
2342
2343 static const char *rvu_dbg_get_intf_name(int intf)
2344 {
2345         switch (intf) {
2346         case NIX_INTFX_RX(0):
2347                 return "NIX0_RX";
2348         case NIX_INTFX_RX(1):
2349                 return "NIX1_RX";
2350         case NIX_INTFX_TX(0):
2351                 return "NIX0_TX";
2352         case NIX_INTFX_TX(1):
2353                 return "NIX1_TX";
2354         default:
2355                 break;
2356         }
2357
2358         return "unknown";
2359 }
2360
2361 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2362 {
2363         struct rvu_npc_mcam_rule *iter;
2364         struct rvu *rvu = s->private;
2365         struct npc_mcam *mcam;
2366         int pf, vf = -1;
2367         bool enabled;
2368         int blkaddr;
2369         u16 target;
2370         u64 hits;
2371
2372         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2373         if (blkaddr < 0)
2374                 return 0;
2375
2376         mcam = &rvu->hw->mcam;
2377
2378         mutex_lock(&mcam->lock);
2379         list_for_each_entry(iter, &mcam->mcam_rules, list) {
2380                 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2381                 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2382
2383                 if (iter->owner & RVU_PFVF_FUNC_MASK) {
2384                         vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2385                         seq_printf(s, "VF%d", vf);
2386                 }
2387                 seq_puts(s, "\n");
2388
2389                 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2390                                                     "RX" : "TX");
2391                 seq_printf(s, "\tinterface: %s\n",
2392                            rvu_dbg_get_intf_name(iter->intf));
2393                 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2394
2395                 rvu_dbg_npc_mcam_show_flows(s, iter);
2396                 if (is_npc_intf_rx(iter->intf)) {
2397                         target = iter->rx_action.pf_func;
2398                         pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2399                         seq_printf(s, "\tForward to: PF%d ", pf);
2400
2401                         if (target & RVU_PFVF_FUNC_MASK) {
2402                                 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2403                                 seq_printf(s, "VF%d", vf);
2404                         }
2405                         seq_puts(s, "\n");
2406                 }
2407
2408                 rvu_dbg_npc_mcam_show_action(s, iter);
2409
2410                 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2411                 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2412
2413                 if (!iter->has_cntr)
2414                         continue;
2415                 seq_printf(s, "\tcounter: %d\n", iter->cntr);
2416
2417                 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2418                 seq_printf(s, "\thits: %lld\n", hits);
2419         }
2420         mutex_unlock(&mcam->lock);
2421
2422         return 0;
2423 }
2424
2425 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2426
2427 static void rvu_dbg_npc_init(struct rvu *rvu)
2428 {
2429         rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2430
2431         debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2432                             &rvu_dbg_npc_mcam_info_fops);
2433         debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2434                             &rvu_dbg_npc_mcam_rules_fops);
2435         debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2436                             &rvu_dbg_npc_rx_miss_act_fops);
2437 }
2438
2439 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2440 {
2441         struct cpt_ctx *ctx = filp->private;
2442         u64 busy_sts = 0, free_sts = 0;
2443         u32 e_min = 0, e_max = 0, e, i;
2444         u16 max_ses, max_ies, max_aes;
2445         struct rvu *rvu = ctx->rvu;
2446         int blkaddr = ctx->blkaddr;
2447         u64 reg;
2448
2449         reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2450         max_ses = reg & 0xffff;
2451         max_ies = (reg >> 16) & 0xffff;
2452         max_aes = (reg >> 32) & 0xffff;
2453
2454         switch (eng_type) {
2455         case CPT_AE_TYPE:
2456                 e_min = max_ses + max_ies;
2457                 e_max = max_ses + max_ies + max_aes;
2458                 break;
2459         case CPT_SE_TYPE:
2460                 e_min = 0;
2461                 e_max = max_ses;
2462                 break;
2463         case CPT_IE_TYPE:
2464                 e_min = max_ses;
2465                 e_max = max_ses + max_ies;
2466                 break;
2467         default:
2468                 return -EINVAL;
2469         }
2470
2471         for (e = e_min, i = 0; e < e_max; e++, i++) {
2472                 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2473                 if (reg & 0x1)
2474                         busy_sts |= 1ULL << i;
2475
2476                 if (reg & 0x2)
2477                         free_sts |= 1ULL << i;
2478         }
2479         seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2480         seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2481
2482         return 0;
2483 }
2484
2485 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2486 {
2487         return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2488 }
2489
2490 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2491
2492 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2493 {
2494         return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2495 }
2496
2497 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2498
2499 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2500 {
2501         return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2502 }
2503
2504 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2505
2506 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2507 {
2508         struct cpt_ctx *ctx = filp->private;
2509         u16 max_ses, max_ies, max_aes;
2510         struct rvu *rvu = ctx->rvu;
2511         int blkaddr = ctx->blkaddr;
2512         u32 e_max, e;
2513         u64 reg;
2514
2515         reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2516         max_ses = reg & 0xffff;
2517         max_ies = (reg >> 16) & 0xffff;
2518         max_aes = (reg >> 32) & 0xffff;
2519
2520         e_max = max_ses + max_ies + max_aes;
2521
2522         seq_puts(filp, "===========================================\n");
2523         for (e = 0; e < e_max; e++) {
2524                 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2525                 seq_printf(filp, "CPT Engine[%u] Group Enable   0x%02llx\n", e,
2526                            reg & 0xff);
2527                 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2528                 seq_printf(filp, "CPT Engine[%u] Active Info    0x%llx\n", e,
2529                            reg);
2530                 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2531                 seq_printf(filp, "CPT Engine[%u] Control        0x%llx\n", e,
2532                            reg);
2533                 seq_puts(filp, "===========================================\n");
2534         }
2535         return 0;
2536 }
2537
2538 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2539
2540 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2541 {
2542         struct cpt_ctx *ctx = filp->private;
2543         int blkaddr = ctx->blkaddr;
2544         struct rvu *rvu = ctx->rvu;
2545         struct rvu_block *block;
2546         struct rvu_hwinfo *hw;
2547         u64 reg;
2548         u32 lf;
2549
2550         hw = rvu->hw;
2551         block = &hw->block[blkaddr];
2552         if (!block->lf.bmap)
2553                 return -ENODEV;
2554
2555         seq_puts(filp, "===========================================\n");
2556         for (lf = 0; lf < block->lf.max; lf++) {
2557                 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2558                 seq_printf(filp, "CPT Lf[%u] CTL          0x%llx\n", lf, reg);
2559                 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2560                 seq_printf(filp, "CPT Lf[%u] CTL2         0x%llx\n", lf, reg);
2561                 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2562                 seq_printf(filp, "CPT Lf[%u] PTR_CTL      0x%llx\n", lf, reg);
2563                 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2564                                 (lf << block->lfshift));
2565                 seq_printf(filp, "CPT Lf[%u] CFG          0x%llx\n", lf, reg);
2566                 seq_puts(filp, "===========================================\n");
2567         }
2568         return 0;
2569 }
2570
2571 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2572
2573 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2574 {
2575         struct cpt_ctx *ctx = filp->private;
2576         struct rvu *rvu = ctx->rvu;
2577         int blkaddr = ctx->blkaddr;
2578         u64 reg0, reg1;
2579
2580         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2581         reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2582         seq_printf(filp, "CPT_AF_FLTX_INT:       0x%llx 0x%llx\n", reg0, reg1);
2583         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2584         reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2585         seq_printf(filp, "CPT_AF_PSNX_EXE:       0x%llx 0x%llx\n", reg0, reg1);
2586         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2587         seq_printf(filp, "CPT_AF_PSNX_LF:        0x%llx\n", reg0);
2588         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2589         seq_printf(filp, "CPT_AF_RVU_INT:        0x%llx\n", reg0);
2590         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2591         seq_printf(filp, "CPT_AF_RAS_INT:        0x%llx\n", reg0);
2592         reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2593         seq_printf(filp, "CPT_AF_EXE_ERR_INFO:   0x%llx\n", reg0);
2594
2595         return 0;
2596 }
2597
2598 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2599
2600 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2601 {
2602         struct cpt_ctx *ctx = filp->private;
2603         struct rvu *rvu = ctx->rvu;
2604         int blkaddr = ctx->blkaddr;
2605         u64 reg;
2606
2607         reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2608         seq_printf(filp, "CPT instruction requests   %llu\n", reg);
2609         reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2610         seq_printf(filp, "CPT instruction latency    %llu\n", reg);
2611         reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2612         seq_printf(filp, "CPT NCB read requests      %llu\n", reg);
2613         reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2614         seq_printf(filp, "CPT NCB read latency       %llu\n", reg);
2615         reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2616         seq_printf(filp, "CPT read requests caused by UC fills   %llu\n", reg);
2617         reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2618         seq_printf(filp, "CPT active cycles pc       %llu\n", reg);
2619         reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2620         seq_printf(filp, "CPT clock count pc         %llu\n", reg);
2621
2622         return 0;
2623 }
2624
2625 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2626
2627 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2628 {
2629         struct cpt_ctx *ctx;
2630
2631         if (!is_block_implemented(rvu->hw, blkaddr))
2632                 return;
2633
2634         if (blkaddr == BLKADDR_CPT0) {
2635                 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2636                 ctx = &rvu->rvu_dbg.cpt_ctx[0];
2637                 ctx->blkaddr = BLKADDR_CPT0;
2638                 ctx->rvu = rvu;
2639         } else {
2640                 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2641                                                       rvu->rvu_dbg.root);
2642                 ctx = &rvu->rvu_dbg.cpt_ctx[1];
2643                 ctx->blkaddr = BLKADDR_CPT1;
2644                 ctx->rvu = rvu;
2645         }
2646
2647         debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2648                             &rvu_dbg_cpt_pc_fops);
2649         debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2650                             &rvu_dbg_cpt_ae_sts_fops);
2651         debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2652                             &rvu_dbg_cpt_se_sts_fops);
2653         debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2654                             &rvu_dbg_cpt_ie_sts_fops);
2655         debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2656                             &rvu_dbg_cpt_engines_info_fops);
2657         debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2658                             &rvu_dbg_cpt_lfs_info_fops);
2659         debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2660                             &rvu_dbg_cpt_err_info_fops);
2661 }
2662
2663 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2664 {
2665         if (!is_rvu_otx2(rvu))
2666                 return "cn10k";
2667         else
2668                 return "octeontx2";
2669 }
2670
2671 void rvu_dbg_init(struct rvu *rvu)
2672 {
2673         rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2674
2675         debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2676                             &rvu_dbg_rsrc_status_fops);
2677
2678         if (!cgx_get_cgxcnt_max())
2679                 goto create;
2680
2681         if (is_rvu_otx2(rvu))
2682                 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2683                                     rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2684         else
2685                 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2686                                     rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2687
2688 create:
2689         rvu_dbg_npa_init(rvu);
2690         rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2691
2692         rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2693         rvu_dbg_cgx_init(rvu);
2694         rvu_dbg_npc_init(rvu);
2695         rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2696         rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2697 }
2698
2699 void rvu_dbg_exit(struct rvu *rvu)
2700 {
2701         debugfs_remove_recursive(rvu->rvu_dbg.root);
2702 }
2703
2704 #endif /* CONFIG_DEBUG_FS */