1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2019 Marvell.
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
15 #include "rvu_struct.h"
19 #include "lmac_common.h"
22 #define DEBUGFS_DIR_NAME "octeontx2"
73 static char *cgx_rx_stats_fields[] = {
74 [CGX_STAT0] = "Received packets",
75 [CGX_STAT1] = "Octets of received packets",
76 [CGX_STAT2] = "Received PAUSE packets",
77 [CGX_STAT3] = "Received PAUSE and control packets",
78 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
79 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
80 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
81 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
82 [CGX_STAT8] = "Error packets",
83 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
84 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
85 [CGX_STAT11] = "NCSI-bound packets dropped",
86 [CGX_STAT12] = "NCSI-bound octets dropped",
89 static char *cgx_tx_stats_fields[] = {
90 [CGX_STAT0] = "Packets dropped due to excessive collisions",
91 [CGX_STAT1] = "Packets dropped due to excessive deferral",
92 [CGX_STAT2] = "Multiple collisions before successful transmission",
93 [CGX_STAT3] = "Single collisions before successful transmission",
94 [CGX_STAT4] = "Total octets sent on the interface",
95 [CGX_STAT5] = "Total frames sent on the interface",
96 [CGX_STAT6] = "Packets sent with an octet count < 64",
97 [CGX_STAT7] = "Packets sent with an octet count == 64",
98 [CGX_STAT8] = "Packets sent with an octet count of 65-127",
99 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
100 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
101 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
102 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
103 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
104 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
105 [CGX_STAT15] = "Packets sent to the multicast DMAC",
106 [CGX_STAT16] = "Transmit underflow and were truncated",
107 [CGX_STAT17] = "Control/PAUSE packets sent",
110 static char *rpm_rx_stats_fields[] = {
111 "Octets of received packets",
112 "Octets of received packets with out error",
113 "Received packets with alignment errors",
114 "Control/PAUSE packets received",
115 "Packets received with Frame too long Errors",
116 "Packets received with a1nrange length Errors",
118 "Packets received with FrameCheckSequenceErrors",
119 "Packets received with VLAN header",
121 "Packets received with unicast DMAC",
122 "Packets received with multicast DMAC",
123 "Packets received with broadcast DMAC",
125 "Total frames received on interface",
126 "Packets received with an octet count < 64",
127 "Packets received with an octet count == 64",
128 "Packets received with an octet count of 65-127",
129 "Packets received with an octet count of 128-255",
130 "Packets received with an octet count of 256-511",
131 "Packets received with an octet count of 512-1023",
132 "Packets received with an octet count of 1024-1518",
133 "Packets received with an octet count of > 1518",
136 "Fragmented Packets",
137 "CBFC(class based flow control) pause frames received for class 0",
138 "CBFC pause frames received for class 1",
139 "CBFC pause frames received for class 2",
140 "CBFC pause frames received for class 3",
141 "CBFC pause frames received for class 4",
142 "CBFC pause frames received for class 5",
143 "CBFC pause frames received for class 6",
144 "CBFC pause frames received for class 7",
145 "CBFC pause frames received for class 8",
146 "CBFC pause frames received for class 9",
147 "CBFC pause frames received for class 10",
148 "CBFC pause frames received for class 11",
149 "CBFC pause frames received for class 12",
150 "CBFC pause frames received for class 13",
151 "CBFC pause frames received for class 14",
152 "CBFC pause frames received for class 15",
153 "MAC control packets received",
156 static char *rpm_tx_stats_fields[] = {
157 "Total octets sent on the interface",
158 "Total octets transmitted OK",
159 "Control/Pause frames sent",
160 "Total frames transmitted OK",
161 "Total frames sent with VLAN header",
163 "Packets sent to unicast DMAC",
164 "Packets sent to the multicast DMAC",
165 "Packets sent to a broadcast DMAC",
166 "Packets sent with an octet count == 64",
167 "Packets sent with an octet count of 65-127",
168 "Packets sent with an octet count of 128-255",
169 "Packets sent with an octet count of 256-511",
170 "Packets sent with an octet count of 512-1023",
171 "Packets sent with an octet count of 1024-1518",
172 "Packets sent with an octet count of > 1518",
173 "CBFC(class based flow control) pause frames transmitted for class 0",
174 "CBFC pause frames transmitted for class 1",
175 "CBFC pause frames transmitted for class 2",
176 "CBFC pause frames transmitted for class 3",
177 "CBFC pause frames transmitted for class 4",
178 "CBFC pause frames transmitted for class 5",
179 "CBFC pause frames transmitted for class 6",
180 "CBFC pause frames transmitted for class 7",
181 "CBFC pause frames transmitted for class 8",
182 "CBFC pause frames transmitted for class 9",
183 "CBFC pause frames transmitted for class 10",
184 "CBFC pause frames transmitted for class 11",
185 "CBFC pause frames transmitted for class 12",
186 "CBFC pause frames transmitted for class 13",
187 "CBFC pause frames transmitted for class 14",
188 "CBFC pause frames transmitted for class 15",
189 "MAC control packets sent",
190 "Total frames sent on the interface"
199 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
200 blk_addr, NDC_AF_CONST) & 0xFF)
202 #define rvu_dbg_NULL NULL
203 #define rvu_dbg_open_NULL NULL
205 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
206 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
208 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
210 static const struct file_operations rvu_dbg_##name##_fops = { \
211 .owner = THIS_MODULE, \
212 .open = rvu_dbg_open_##name, \
214 .write = rvu_dbg_##write_op, \
215 .llseek = seq_lseek, \
216 .release = single_release, \
219 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
220 static const struct file_operations rvu_dbg_##name##_fops = { \
221 .owner = THIS_MODULE, \
222 .open = simple_open, \
223 .read = rvu_dbg_##read_op, \
224 .write = rvu_dbg_##write_op \
227 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
229 #define LMT_MAPTBL_ENTRY_SIZE 16
230 /* Dump LMTST map table */
231 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
233 size_t count, loff_t *ppos)
235 struct rvu *rvu = filp->private_data;
236 u64 lmt_addr, val, tbl_base;
237 int pf, vf, num_vfs, hw_vfs;
238 void __iomem *lmt_map_base;
239 int buf_size = 10240;
245 /* don't allow partial reads */
249 buf = kzalloc(buf_size, GFP_KERNEL);
253 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
255 lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
257 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
262 off += scnprintf(&buf[off], buf_size - 1 - off,
263 "\n\t\t\t\t\tLmtst Map Table Entries");
264 off += scnprintf(&buf[off], buf_size - 1 - off,
265 "\n\t\t\t\t\t=======================");
266 off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
267 off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
268 off += scnprintf(&buf[off], buf_size - 1 - off,
269 "Lmtline Base (word 0)\t\t");
270 off += scnprintf(&buf[off], buf_size - 1 - off,
271 "Lmt Map Entry (word 1)");
272 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
273 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
274 off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
277 index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
278 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
280 lmt_addr = readq(lmt_map_base + index);
281 off += scnprintf(&buf[off], buf_size - 1 - off,
282 " 0x%016llx\t\t", lmt_addr);
284 val = readq(lmt_map_base + index);
285 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
287 /* Reading num of VFs per PF */
288 rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
289 for (vf = 0; vf < num_vfs; vf++) {
290 index = (pf * rvu->hw->total_vfs * 16) +
291 ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
292 off += scnprintf(&buf[off], buf_size - 1 - off,
293 "PF%d:VF%d \t\t", pf, vf);
294 off += scnprintf(&buf[off], buf_size - 1 - off,
295 " 0x%llx\t\t", (tbl_base + index));
296 lmt_addr = readq(lmt_map_base + index);
297 off += scnprintf(&buf[off], buf_size - 1 - off,
298 " 0x%016llx\t\t", lmt_addr);
300 val = readq(lmt_map_base + index);
301 off += scnprintf(&buf[off], buf_size - 1 - off,
302 " 0x%016llx\n", val);
305 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
307 ret = min(off, count);
308 if (copy_to_user(buffer, buf, ret))
312 iounmap(lmt_map_base);
320 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
322 static void get_lf_str_list(struct rvu_block block, int pcifunc,
325 int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
327 for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
328 if (lf >= block.lf.max)
331 if (block.fn_map[lf] != pcifunc)
334 if (lf == prev_lf + 1) {
341 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
343 len += (len ? sprintf(lfs + len, ",%d", lf) :
344 sprintf(lfs + len, "%d", lf));
351 len += sprintf(lfs + len, "-%d", prev_lf);
356 static int get_max_column_width(struct rvu *rvu)
358 int index, pf, vf, lf_str_size = 12, buf_size = 256;
359 struct rvu_block block;
363 buf = kzalloc(buf_size, GFP_KERNEL);
367 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
368 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
369 pcifunc = pf << 10 | vf;
373 for (index = 0; index < BLK_COUNT; index++) {
374 block = rvu->hw->block[index];
375 if (!strlen(block.name))
378 get_lf_str_list(block, pcifunc, buf);
379 if (lf_str_size <= strlen(buf))
380 lf_str_size = strlen(buf) + 1;
389 /* Dumps current provisioning status of all RVU block LFs */
390 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
392 size_t count, loff_t *ppos)
394 int index, off = 0, flag = 0, len = 0, i = 0;
395 struct rvu *rvu = filp->private_data;
396 int bytes_not_copied = 0;
397 struct rvu_block block;
404 /* don't allow partial reads */
408 buf = kzalloc(buf_size, GFP_KERNEL);
412 /* Get the maximum width of a column */
413 lf_str_size = get_max_column_width(rvu);
415 lfs = kzalloc(lf_str_size, GFP_KERNEL);
420 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
422 for (index = 0; index < BLK_COUNT; index++)
423 if (strlen(rvu->hw->block[index].name)) {
424 off += scnprintf(&buf[off], buf_size - 1 - off,
426 rvu->hw->block[index].name);
429 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
430 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
431 if (bytes_not_copied)
436 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
437 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
440 pcifunc = pf << 10 | vf;
445 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
446 off = scnprintf(&buf[off],
448 "%-*s", lf_str_size, lfs);
450 sprintf(lfs, "PF%d", pf);
451 off = scnprintf(&buf[off],
453 "%-*s", lf_str_size, lfs);
456 for (index = 0; index < BLK_COUNT; index++) {
457 block = rvu->hw->block[index];
458 if (!strlen(block.name))
462 get_lf_str_list(block, pcifunc, lfs);
466 off += scnprintf(&buf[off], buf_size - 1 - off,
467 "%-*s", lf_str_size, lfs);
470 off += scnprintf(&buf[off],
471 buf_size - 1 - off, "\n");
472 bytes_not_copied = copy_to_user(buffer +
475 if (bytes_not_copied)
487 if (bytes_not_copied)
493 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
495 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
497 struct rvu *rvu = filp->private;
498 struct pci_dev *pdev = NULL;
499 struct mac_ops *mac_ops;
500 char cgx[10], lmac[10];
501 struct rvu_pfvf *pfvf;
502 int pf, domain, blkid;
507 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
508 /* There can be no CGX devices at all */
511 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
513 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
514 if (!is_pf_cgxmapped(rvu, pf))
517 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
524 pfvf = rvu_get_pfvf(rvu, pcifunc);
526 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
531 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
533 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
534 sprintf(lmac, "LMAC%d", lmac_id);
535 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
536 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
541 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
543 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
546 struct rvu_block *block;
547 struct rvu_hwinfo *hw;
550 block = &hw->block[blkaddr];
552 if (lf < 0 || lf >= block->lf.max) {
553 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
558 *pcifunc = block->fn_map[lf];
561 "This LF is not attached to any RVU PFFUNC\n");
567 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
571 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
575 if (!pfvf->aura_ctx) {
576 seq_puts(m, "Aura context is not initialized\n");
578 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
579 pfvf->aura_ctx->qsize);
580 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
581 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
584 if (!pfvf->pool_ctx) {
585 seq_puts(m, "Pool context is not initialized\n");
587 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
588 pfvf->pool_ctx->qsize);
589 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
590 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
595 /* The 'qsize' entry dumps current Aura/Pool context Qsize
596 * and each context's current enable/disable status in a bitmap.
598 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
601 void (*print_qsize)(struct seq_file *filp,
602 struct rvu_pfvf *pfvf) = NULL;
603 struct dentry *current_dir;
604 struct rvu_pfvf *pfvf;
613 qsize_id = rvu->rvu_dbg.npa_qsize_id;
614 print_qsize = print_npa_qsize;
618 qsize_id = rvu->rvu_dbg.nix_qsize_id;
619 print_qsize = print_nix_qsize;
626 if (blktype == BLKTYPE_NPA) {
627 blkaddr = BLKADDR_NPA;
629 current_dir = filp->file->f_path.dentry->d_parent;
630 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
631 BLKADDR_NIX1 : BLKADDR_NIX0);
634 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
637 pfvf = rvu_get_pfvf(rvu, pcifunc);
638 print_qsize(filp, pfvf);
643 static ssize_t rvu_dbg_qsize_write(struct file *filp,
644 const char __user *buffer, size_t count,
645 loff_t *ppos, int blktype)
647 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
648 struct seq_file *seqfile = filp->private_data;
649 char *cmd_buf, *cmd_buf_tmp, *subtoken;
650 struct rvu *rvu = seqfile->private;
651 struct dentry *current_dir;
656 cmd_buf = memdup_user(buffer, count + 1);
660 cmd_buf[count] = '\0';
662 cmd_buf_tmp = strchr(cmd_buf, '\n');
665 count = cmd_buf_tmp - cmd_buf + 1;
668 cmd_buf_tmp = cmd_buf;
669 subtoken = strsep(&cmd_buf, " ");
670 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
674 if (ret < 0 || !strncmp(subtoken, "help", 4)) {
675 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
676 goto qsize_write_done;
679 if (blktype == BLKTYPE_NPA) {
680 blkaddr = BLKADDR_NPA;
682 current_dir = filp->f_path.dentry->d_parent;
683 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
684 BLKADDR_NIX1 : BLKADDR_NIX0);
687 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
689 goto qsize_write_done;
691 if (blktype == BLKTYPE_NPA)
692 rvu->rvu_dbg.npa_qsize_id = lf;
694 rvu->rvu_dbg.nix_qsize_id = lf;
698 return ret ? ret : count;
701 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
702 const char __user *buffer,
703 size_t count, loff_t *ppos)
705 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
709 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
711 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
714 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
716 /* Dumps given NPA Aura's context */
717 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
719 struct npa_aura_s *aura = &rsp->aura;
720 struct rvu *rvu = m->private;
722 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
724 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
725 aura->ena, aura->pool_caching);
726 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
727 aura->pool_way_mask, aura->avg_con);
728 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
729 aura->pool_drop_ena, aura->aura_drop_ena);
730 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
731 aura->bp_ena, aura->aura_drop);
732 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
733 aura->shift, aura->avg_level);
735 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
736 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
738 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
739 (u64)aura->limit, aura->bp, aura->fc_ena);
741 if (!is_rvu_otx2(rvu))
742 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
743 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
744 aura->fc_up_crossing, aura->fc_stype);
745 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
747 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
749 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
750 aura->pool_drop, aura->update_time);
751 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
752 aura->err_int, aura->err_int_ena);
753 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
754 aura->thresh_int, aura->thresh_int_ena);
755 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
756 aura->thresh_up, aura->thresh_qint_idx);
757 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
759 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
760 if (!is_rvu_otx2(rvu))
761 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
764 /* Dumps given NPA Pool's context */
765 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
767 struct npa_pool_s *pool = &rsp->pool;
768 struct rvu *rvu = m->private;
770 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
772 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
773 pool->ena, pool->nat_align);
774 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
775 pool->stack_caching, pool->stack_way_mask);
776 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
777 pool->buf_offset, pool->buf_size);
779 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
780 pool->stack_max_pages, pool->stack_pages);
782 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
784 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
785 pool->stack_offset, pool->shift, pool->avg_level);
786 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
787 pool->avg_con, pool->fc_ena, pool->fc_stype);
788 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
789 pool->fc_hyst_bits, pool->fc_up_crossing);
790 if (!is_rvu_otx2(rvu))
791 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
792 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
794 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
796 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
798 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
800 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
801 pool->err_int, pool->err_int_ena);
802 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
803 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
804 pool->thresh_int_ena, pool->thresh_up);
805 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
806 pool->thresh_qint_idx, pool->err_qint_idx);
807 if (!is_rvu_otx2(rvu))
808 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
811 /* Reads aura/pool's ctx from admin queue */
812 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
814 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
815 struct npa_aq_enq_req aq_req;
816 struct npa_aq_enq_rsp rsp;
817 struct rvu_pfvf *pfvf;
818 int aura, rc, max_id;
826 case NPA_AQ_CTYPE_AURA:
827 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
828 id = rvu->rvu_dbg.npa_aura_ctx.id;
829 all = rvu->rvu_dbg.npa_aura_ctx.all;
832 case NPA_AQ_CTYPE_POOL:
833 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
834 id = rvu->rvu_dbg.npa_pool_ctx.id;
835 all = rvu->rvu_dbg.npa_pool_ctx.all;
841 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
844 pfvf = rvu_get_pfvf(rvu, pcifunc);
845 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
846 seq_puts(m, "Aura context is not initialized\n");
848 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
849 seq_puts(m, "Pool context is not initialized\n");
853 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
854 aq_req.hdr.pcifunc = pcifunc;
855 aq_req.ctype = ctype;
856 aq_req.op = NPA_AQ_INSTOP_READ;
857 if (ctype == NPA_AQ_CTYPE_AURA) {
858 max_id = pfvf->aura_ctx->qsize;
859 print_npa_ctx = print_npa_aura_ctx;
861 max_id = pfvf->pool_ctx->qsize;
862 print_npa_ctx = print_npa_pool_ctx;
865 if (id < 0 || id >= max_id) {
866 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
867 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
877 for (aura = id; aura < max_id; aura++) {
878 aq_req.aura_id = aura;
879 seq_printf(m, "======%s : %d=======\n",
880 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
882 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
884 seq_puts(m, "Failed to read context\n");
887 print_npa_ctx(m, &rsp);
892 static int write_npa_ctx(struct rvu *rvu, bool all,
893 int npalf, int id, int ctype)
895 struct rvu_pfvf *pfvf;
899 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
902 pfvf = rvu_get_pfvf(rvu, pcifunc);
904 if (ctype == NPA_AQ_CTYPE_AURA) {
905 if (!pfvf->aura_ctx) {
906 dev_warn(rvu->dev, "Aura context is not initialized\n");
909 max_id = pfvf->aura_ctx->qsize;
910 } else if (ctype == NPA_AQ_CTYPE_POOL) {
911 if (!pfvf->pool_ctx) {
912 dev_warn(rvu->dev, "Pool context is not initialized\n");
915 max_id = pfvf->pool_ctx->qsize;
918 if (id < 0 || id >= max_id) {
919 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
920 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
926 case NPA_AQ_CTYPE_AURA:
927 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
928 rvu->rvu_dbg.npa_aura_ctx.id = id;
929 rvu->rvu_dbg.npa_aura_ctx.all = all;
932 case NPA_AQ_CTYPE_POOL:
933 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
934 rvu->rvu_dbg.npa_pool_ctx.id = id;
935 rvu->rvu_dbg.npa_pool_ctx.all = all;
943 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
944 const char __user *buffer, int *npalf,
947 int bytes_not_copied;
952 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
953 if (bytes_not_copied)
956 cmd_buf[*count] = '\0';
957 cmd_buf_tmp = strchr(cmd_buf, '\n');
961 *count = cmd_buf_tmp - cmd_buf + 1;
964 subtoken = strsep(&cmd_buf, " ");
965 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
968 subtoken = strsep(&cmd_buf, " ");
969 if (subtoken && strcmp(subtoken, "all") == 0) {
972 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
981 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
982 const char __user *buffer,
983 size_t count, loff_t *ppos, int ctype)
985 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
987 struct seq_file *seqfp = filp->private_data;
988 struct rvu *rvu = seqfp->private;
989 int npalf, id = 0, ret;
992 if ((*ppos != 0) || !count)
995 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
998 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1002 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1003 ctype_string, ctype_string);
1006 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1010 return ret ? ret : count;
1013 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1014 const char __user *buffer,
1015 size_t count, loff_t *ppos)
1017 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1021 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1023 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1026 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1028 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1029 const char __user *buffer,
1030 size_t count, loff_t *ppos)
1032 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1036 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1038 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1041 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1043 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1044 int ctype, int transaction)
1046 u64 req, out_req, lat, cant_alloc;
1047 struct nix_hw *nix_hw;
1051 if (blk_addr == BLKADDR_NDC_NPA0) {
1054 nix_hw = s->private;
1058 for (port = 0; port < NDC_MAX_PORT; port++) {
1059 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1060 (port, ctype, transaction));
1061 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1062 (port, ctype, transaction));
1063 out_req = rvu_read64(rvu, blk_addr,
1064 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1065 (port, ctype, transaction));
1066 cant_alloc = rvu_read64(rvu, blk_addr,
1067 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1068 (port, transaction));
1069 seq_printf(s, "\nPort:%d\n", port);
1070 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1071 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1072 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1073 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1074 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1078 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1080 seq_puts(s, "\n***** CACHE mode read stats *****\n");
1081 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1082 seq_puts(s, "\n***** CACHE mode write stats *****\n");
1083 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1084 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1085 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1086 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1087 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1091 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1093 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1096 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1098 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1100 struct nix_hw *nix_hw;
1104 if (blk_addr == BLKADDR_NDC_NPA0) {
1107 nix_hw = s->private;
1111 max_bank = NDC_MAX_BANK(rvu, blk_addr);
1112 for (bank = 0; bank < max_bank; bank++) {
1113 seq_printf(s, "BANK:%d\n", bank);
1114 seq_printf(s, "\tHits:\t%lld\n",
1115 (u64)rvu_read64(rvu, blk_addr,
1116 NDC_AF_BANKX_HIT_PC(bank)));
1117 seq_printf(s, "\tMiss:\t%lld\n",
1118 (u64)rvu_read64(rvu, blk_addr,
1119 NDC_AF_BANKX_MISS_PC(bank)));
1124 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1126 struct nix_hw *nix_hw = filp->private;
1130 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1131 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1132 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1134 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1137 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1139 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1141 struct nix_hw *nix_hw = filp->private;
1145 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1146 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1147 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1149 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1152 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1154 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1157 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1160 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1162 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1165 struct nix_hw *nix_hw = filp->private;
1166 int ndc_idx = NPA0_U;
1169 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1170 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1172 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1175 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1177 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1180 struct nix_hw *nix_hw = filp->private;
1181 int ndc_idx = NPA0_U;
1184 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1185 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1187 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1190 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1192 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1193 struct nix_cn10k_sq_ctx_s *sq_ctx)
1195 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1196 sq_ctx->ena, sq_ctx->qint_idx);
1197 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1198 sq_ctx->substream, sq_ctx->sdp_mcast);
1199 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1200 sq_ctx->cq, sq_ctx->sqe_way_mask);
1202 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1203 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1204 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1205 sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1206 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1207 sq_ctx->default_chan, sq_ctx->sqb_count);
1209 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1210 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1211 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1212 sq_ctx->sqb_aura, sq_ctx->sq_int);
1213 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1214 sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1216 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1217 sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1218 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1219 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1220 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1221 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1222 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1223 sq_ctx->tail_offset, sq_ctx->smenq_offset);
1224 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1225 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1227 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1228 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1229 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1230 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1231 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1232 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1233 sq_ctx->smenq_next_sqb);
1235 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1237 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1238 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1239 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1240 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1241 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1242 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1243 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1245 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1246 (u64)sq_ctx->scm_lso_rem);
1247 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1248 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1249 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1250 (u64)sq_ctx->dropped_octs);
1251 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1252 (u64)sq_ctx->dropped_pkts);
1255 /* Dumps given nix_sq's context */
1256 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1258 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1259 struct nix_hw *nix_hw = m->private;
1260 struct rvu *rvu = nix_hw->rvu;
1262 if (!is_rvu_otx2(rvu)) {
1263 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1266 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1267 sq_ctx->sqe_way_mask, sq_ctx->cq);
1268 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1269 sq_ctx->sdp_mcast, sq_ctx->substream);
1270 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1271 sq_ctx->qint_idx, sq_ctx->ena);
1273 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1274 sq_ctx->sqb_count, sq_ctx->default_chan);
1275 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1276 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1277 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1278 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1280 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1281 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1282 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1283 sq_ctx->sq_int, sq_ctx->sqb_aura);
1284 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1286 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1287 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1288 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1289 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1290 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1291 sq_ctx->smenq_offset, sq_ctx->tail_offset);
1292 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1293 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1294 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1295 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1296 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1297 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1299 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1300 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1301 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1302 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1303 sq_ctx->smenq_next_sqb);
1305 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1307 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1308 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1309 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1310 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1311 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1312 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1313 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1315 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1316 (u64)sq_ctx->scm_lso_rem);
1317 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1318 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1319 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1320 (u64)sq_ctx->dropped_octs);
1321 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1322 (u64)sq_ctx->dropped_pkts);
1325 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1326 struct nix_cn10k_rq_ctx_s *rq_ctx)
1328 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1329 rq_ctx->ena, rq_ctx->sso_ena);
1330 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1331 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1332 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1333 rq_ctx->cq, rq_ctx->lenerr_dis);
1334 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1335 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1336 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1337 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1338 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1339 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1340 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1342 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1343 rq_ctx->spb_aura, rq_ctx->lpb_aura);
1344 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1345 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1346 rq_ctx->sso_grp, rq_ctx->sso_tt);
1347 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1348 rq_ctx->pb_caching, rq_ctx->wqe_caching);
1349 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1350 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1351 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1352 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1353 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1354 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1356 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1357 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1358 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1359 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1360 rq_ctx->wqe_skip, rq_ctx->spb_ena);
1361 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1362 rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1363 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1364 rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1365 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1366 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1368 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1369 rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1370 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1371 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1372 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1373 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1374 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1375 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1377 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1378 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1379 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1380 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1381 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1382 rq_ctx->rq_int, rq_ctx->rq_int_ena);
1383 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1385 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1386 rq_ctx->ltag, rq_ctx->good_utag);
1387 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1388 rq_ctx->bad_utag, rq_ctx->flow_tagw);
1389 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1390 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1391 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1392 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1393 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1395 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1396 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1397 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1398 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1399 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1402 /* Dumps given nix_rq's context */
1403 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1405 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1406 struct nix_hw *nix_hw = m->private;
1407 struct rvu *rvu = nix_hw->rvu;
1409 if (!is_rvu_otx2(rvu)) {
1410 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1414 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1415 rq_ctx->wqe_aura, rq_ctx->substream);
1416 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1417 rq_ctx->cq, rq_ctx->ena_wqwd);
1418 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1419 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1420 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1422 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1423 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1424 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1425 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1426 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1427 rq_ctx->pb_caching, rq_ctx->sso_tt);
1428 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1429 rq_ctx->sso_grp, rq_ctx->lpb_aura);
1430 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1432 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1433 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1434 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1435 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1436 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1437 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1438 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1439 rq_ctx->spb_ena, rq_ctx->wqe_skip);
1440 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1442 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1443 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1444 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1445 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1446 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1447 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1448 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1449 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1451 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1452 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1453 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1454 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1455 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1456 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1457 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1459 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1460 rq_ctx->flow_tagw, rq_ctx->bad_utag);
1461 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1462 rq_ctx->good_utag, rq_ctx->ltag);
1464 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1465 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1466 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1467 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1468 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1471 /* Dumps given nix_cq's context */
1472 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1474 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1476 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1478 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1479 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1480 cq_ctx->avg_con, cq_ctx->cint_idx);
1481 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1482 cq_ctx->cq_err, cq_ctx->qint_idx);
1483 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1484 cq_ctx->bpid, cq_ctx->bp_ena);
1486 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1487 cq_ctx->update_time, cq_ctx->avg_level);
1488 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1489 cq_ctx->head, cq_ctx->tail);
1491 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1492 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1493 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1494 cq_ctx->qsize, cq_ctx->caching);
1495 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1496 cq_ctx->substream, cq_ctx->ena);
1497 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1498 cq_ctx->drop_ena, cq_ctx->drop);
1499 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1502 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1503 void *unused, int ctype)
1505 void (*print_nix_ctx)(struct seq_file *filp,
1506 struct nix_aq_enq_rsp *rsp) = NULL;
1507 struct nix_hw *nix_hw = filp->private;
1508 struct rvu *rvu = nix_hw->rvu;
1509 struct nix_aq_enq_req aq_req;
1510 struct nix_aq_enq_rsp rsp;
1511 char *ctype_string = NULL;
1512 int qidx, rc, max_id = 0;
1513 struct rvu_pfvf *pfvf;
1518 case NIX_AQ_CTYPE_CQ:
1519 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1520 id = rvu->rvu_dbg.nix_cq_ctx.id;
1521 all = rvu->rvu_dbg.nix_cq_ctx.all;
1524 case NIX_AQ_CTYPE_SQ:
1525 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1526 id = rvu->rvu_dbg.nix_sq_ctx.id;
1527 all = rvu->rvu_dbg.nix_sq_ctx.all;
1530 case NIX_AQ_CTYPE_RQ:
1531 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1532 id = rvu->rvu_dbg.nix_rq_ctx.id;
1533 all = rvu->rvu_dbg.nix_rq_ctx.all;
1540 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1543 pfvf = rvu_get_pfvf(rvu, pcifunc);
1544 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1545 seq_puts(filp, "SQ context is not initialized\n");
1547 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1548 seq_puts(filp, "RQ context is not initialized\n");
1550 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1551 seq_puts(filp, "CQ context is not initialized\n");
1555 if (ctype == NIX_AQ_CTYPE_SQ) {
1556 max_id = pfvf->sq_ctx->qsize;
1557 ctype_string = "sq";
1558 print_nix_ctx = print_nix_sq_ctx;
1559 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1560 max_id = pfvf->rq_ctx->qsize;
1561 ctype_string = "rq";
1562 print_nix_ctx = print_nix_rq_ctx;
1563 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1564 max_id = pfvf->cq_ctx->qsize;
1565 ctype_string = "cq";
1566 print_nix_ctx = print_nix_cq_ctx;
1569 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1570 aq_req.hdr.pcifunc = pcifunc;
1571 aq_req.ctype = ctype;
1572 aq_req.op = NIX_AQ_INSTOP_READ;
1577 for (qidx = id; qidx < max_id; qidx++) {
1579 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1580 ctype_string, nixlf, aq_req.qidx);
1581 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1583 seq_puts(filp, "Failed to read the context\n");
1586 print_nix_ctx(filp, &rsp);
1591 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1592 int id, int ctype, char *ctype_string,
1595 struct nix_hw *nix_hw = m->private;
1596 struct rvu_pfvf *pfvf;
1600 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1603 pfvf = rvu_get_pfvf(rvu, pcifunc);
1605 if (ctype == NIX_AQ_CTYPE_SQ) {
1606 if (!pfvf->sq_ctx) {
1607 dev_warn(rvu->dev, "SQ context is not initialized\n");
1610 max_id = pfvf->sq_ctx->qsize;
1611 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1612 if (!pfvf->rq_ctx) {
1613 dev_warn(rvu->dev, "RQ context is not initialized\n");
1616 max_id = pfvf->rq_ctx->qsize;
1617 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1618 if (!pfvf->cq_ctx) {
1619 dev_warn(rvu->dev, "CQ context is not initialized\n");
1622 max_id = pfvf->cq_ctx->qsize;
1625 if (id < 0 || id >= max_id) {
1626 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1627 ctype_string, max_id - 1);
1631 case NIX_AQ_CTYPE_CQ:
1632 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1633 rvu->rvu_dbg.nix_cq_ctx.id = id;
1634 rvu->rvu_dbg.nix_cq_ctx.all = all;
1637 case NIX_AQ_CTYPE_SQ:
1638 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1639 rvu->rvu_dbg.nix_sq_ctx.id = id;
1640 rvu->rvu_dbg.nix_sq_ctx.all = all;
1643 case NIX_AQ_CTYPE_RQ:
1644 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1645 rvu->rvu_dbg.nix_rq_ctx.id = id;
1646 rvu->rvu_dbg.nix_rq_ctx.all = all;
1654 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1655 const char __user *buffer,
1656 size_t count, loff_t *ppos,
1659 struct seq_file *m = filp->private_data;
1660 struct nix_hw *nix_hw = m->private;
1661 struct rvu *rvu = nix_hw->rvu;
1662 char *cmd_buf, *ctype_string;
1663 int nixlf, id = 0, ret;
1666 if ((*ppos != 0) || !count)
1670 case NIX_AQ_CTYPE_SQ:
1671 ctype_string = "sq";
1673 case NIX_AQ_CTYPE_RQ:
1674 ctype_string = "rq";
1676 case NIX_AQ_CTYPE_CQ:
1677 ctype_string = "cq";
1683 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1688 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1692 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1693 ctype_string, ctype_string);
1696 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1701 return ret ? ret : count;
1704 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1705 const char __user *buffer,
1706 size_t count, loff_t *ppos)
1708 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1712 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1714 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1717 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1719 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1720 const char __user *buffer,
1721 size_t count, loff_t *ppos)
1723 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1727 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
1729 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
1732 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1734 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1735 const char __user *buffer,
1736 size_t count, loff_t *ppos)
1738 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1742 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1744 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1747 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1749 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1750 unsigned long *bmap, char *qtype)
1754 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1758 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1759 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1760 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1765 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1768 seq_puts(filp, "cq context is not initialized\n");
1770 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1774 seq_puts(filp, "rq context is not initialized\n");
1776 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1780 seq_puts(filp, "sq context is not initialized\n");
1782 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1786 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1787 const char __user *buffer,
1788 size_t count, loff_t *ppos)
1790 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1794 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1796 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1799 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1801 static void print_band_prof_ctx(struct seq_file *m,
1802 struct nix_bandprof_s *prof)
1806 switch (prof->pc_mode) {
1807 case NIX_RX_PC_MODE_VLAN:
1810 case NIX_RX_PC_MODE_DSCP:
1813 case NIX_RX_PC_MODE_GEN:
1816 case NIX_RX_PC_MODE_RSVD:
1820 seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1821 str = (prof->icolor == 3) ? "Color blind" :
1822 (prof->icolor == 0) ? "Green" :
1823 (prof->icolor == 1) ? "Yellow" : "Red";
1824 seq_printf(m, "W0: icolor\t\t%s\n", str);
1825 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1826 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1827 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1828 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1829 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1830 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1831 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1832 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1834 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1835 str = (prof->lmode == 0) ? "byte" : "packet";
1836 seq_printf(m, "W1: lmode\t\t%s\n", str);
1837 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1838 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1839 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1840 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1841 str = (prof->gc_action == 0) ? "PASS" :
1842 (prof->gc_action == 1) ? "DROP" : "RED";
1843 seq_printf(m, "W1: gc_action\t\t%s\n", str);
1844 str = (prof->yc_action == 0) ? "PASS" :
1845 (prof->yc_action == 1) ? "DROP" : "RED";
1846 seq_printf(m, "W1: yc_action\t\t%s\n", str);
1847 str = (prof->rc_action == 0) ? "PASS" :
1848 (prof->rc_action == 1) ? "DROP" : "RED";
1849 seq_printf(m, "W1: rc_action\t\t%s\n", str);
1850 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1851 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1852 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1854 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1855 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1856 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1857 seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1858 (u64)prof->green_pkt_pass);
1859 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1860 (u64)prof->yellow_pkt_pass);
1861 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1862 seq_printf(m, "W7: green_octs_pass\t%lld\n",
1863 (u64)prof->green_octs_pass);
1864 seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1865 (u64)prof->yellow_octs_pass);
1866 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1867 seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1868 (u64)prof->green_pkt_drop);
1869 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1870 (u64)prof->yellow_pkt_drop);
1871 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1872 seq_printf(m, "W13: green_octs_drop\t%lld\n",
1873 (u64)prof->green_octs_drop);
1874 seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1875 (u64)prof->yellow_octs_drop);
1876 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1877 seq_puts(m, "==============================\n");
1880 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1882 struct nix_hw *nix_hw = m->private;
1883 struct nix_cn10k_aq_enq_req aq_req;
1884 struct nix_cn10k_aq_enq_rsp aq_rsp;
1885 struct rvu *rvu = nix_hw->rvu;
1886 struct nix_ipolicer *ipolicer;
1887 int layer, prof_idx, idx, rc;
1891 /* Ingress policers do not exist on all platforms */
1892 if (!nix_hw->ipolicer)
1895 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1896 if (layer == BAND_PROF_INVAL_LAYER)
1898 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1899 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1901 seq_printf(m, "\n%s bandwidth profiles\n", str);
1902 seq_puts(m, "=======================\n");
1904 ipolicer = &nix_hw->ipolicer[layer];
1906 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1907 if (is_rsrc_free(&ipolicer->band_prof, idx))
1910 prof_idx = (idx & 0x3FFF) | (layer << 14);
1911 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1912 0x00, NIX_AQ_CTYPE_BANDPROF,
1916 "%s: Failed to fetch context of %s profile %d, err %d\n",
1917 __func__, str, idx, rc);
1920 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1921 pcifunc = ipolicer->pfvf_map[idx];
1922 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1923 seq_printf(m, "Allocated to :: PF %d\n",
1924 rvu_get_pf(pcifunc));
1926 seq_printf(m, "Allocated to :: PF %d VF %d\n",
1927 rvu_get_pf(pcifunc),
1928 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1929 print_band_prof_ctx(m, &aq_rsp.prof);
1935 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1937 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1939 struct nix_hw *nix_hw = m->private;
1940 struct nix_ipolicer *ipolicer;
1944 /* Ingress policers do not exist on all platforms */
1945 if (!nix_hw->ipolicer)
1948 seq_puts(m, "\nBandwidth profile resource free count\n");
1949 seq_puts(m, "=====================================\n");
1950 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1951 if (layer == BAND_PROF_INVAL_LAYER)
1953 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1954 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1956 ipolicer = &nix_hw->ipolicer[layer];
1957 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
1958 ipolicer->band_prof.max,
1959 rvu_rsrc_free_count(&ipolicer->band_prof));
1961 seq_puts(m, "=====================================\n");
1966 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1968 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1970 struct nix_hw *nix_hw;
1972 if (!is_block_implemented(rvu->hw, blkaddr))
1975 if (blkaddr == BLKADDR_NIX0) {
1976 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1977 nix_hw = &rvu->hw->nix[0];
1979 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1981 nix_hw = &rvu->hw->nix[1];
1984 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1985 &rvu_dbg_nix_sq_ctx_fops);
1986 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1987 &rvu_dbg_nix_rq_ctx_fops);
1988 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1989 &rvu_dbg_nix_cq_ctx_fops);
1990 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1991 &rvu_dbg_nix_ndc_tx_cache_fops);
1992 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1993 &rvu_dbg_nix_ndc_rx_cache_fops);
1994 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1995 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1996 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1997 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1998 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1999 &rvu_dbg_nix_qsize_fops);
2000 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2001 &rvu_dbg_nix_band_prof_ctx_fops);
2002 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2003 &rvu_dbg_nix_band_prof_rsrc_fops);
2006 static void rvu_dbg_npa_init(struct rvu *rvu)
2008 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2010 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2011 &rvu_dbg_npa_qsize_fops);
2012 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2013 &rvu_dbg_npa_aura_ctx_fops);
2014 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2015 &rvu_dbg_npa_pool_ctx_fops);
2016 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2017 &rvu_dbg_npa_ndc_cache_fops);
2018 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2019 &rvu_dbg_npa_ndc_hits_miss_fops);
2022 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
2025 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2026 NIX_STATS_RX, &(cnt)); \
2028 seq_printf(s, "%s: %llu\n", name, cnt); \
2032 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
2035 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2036 NIX_STATS_TX, &(cnt)); \
2038 seq_printf(s, "%s: %llu\n", name, cnt); \
2042 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2044 struct cgx_link_user_info linfo;
2045 struct mac_ops *mac_ops;
2046 void *cgxd = s->private;
2047 u64 ucast, mcast, bcast;
2048 int stat = 0, err = 0;
2049 u64 tx_stat, rx_stat;
2052 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2053 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2057 mac_ops = get_mac_ops(cgxd);
2058 /* There can be no CGX devices at all */
2063 seq_puts(s, "\n=======Link Status======\n\n");
2064 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2066 seq_puts(s, "Failed to read link status\n");
2067 seq_printf(s, "\nLink is %s %d Mbps\n\n",
2068 linfo.link_up ? "UP" : "DOWN", linfo.speed);
2071 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2073 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2076 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2079 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2082 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2083 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2086 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2089 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2094 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2096 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2099 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2102 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2105 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2106 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2109 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2114 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2115 while (stat < mac_ops->rx_stats_cnt) {
2116 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2119 if (is_rvu_otx2(rvu))
2120 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2123 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2130 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2131 while (stat < mac_ops->tx_stats_cnt) {
2132 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2136 if (is_rvu_otx2(rvu))
2137 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2140 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2148 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2150 struct dentry *current_dir;
2153 current_dir = filp->file->f_path.dentry->d_parent;
2154 buf = strrchr(current_dir->d_name.name, 'c');
2158 return kstrtoint(buf + 1, 10, lmac_id);
2161 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2165 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2167 return cgx_print_stats(filp, lmac_id);
2172 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2174 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2176 struct pci_dev *pdev = NULL;
2177 void *cgxd = s->private;
2178 char *bcast, *mcast;
2185 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2186 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2190 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2193 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2197 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2198 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2199 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2202 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
2203 seq_printf(s, "%s PF%d %9s %9s",
2204 dev_name(&pdev->dev), pf, bcast, mcast);
2205 if (cfg & CGX_DMAC_CAM_ACCEPT)
2206 seq_printf(s, "%12s\n\n", "UNICAST");
2208 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2210 seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
2212 for (index = 0 ; index < 32 ; index++) {
2213 cfg = cgx_read_dmac_entry(cgxd, index);
2214 /* Display enabled dmac entries associated with current lmac */
2215 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2216 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2217 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2218 u64_to_ether_addr(mac, dmac);
2219 seq_printf(s, "%7d %pM\n", index, dmac);
2226 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2230 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2232 return cgx_print_dmac_flt(filp, lmac_id);
2237 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2239 static void rvu_dbg_cgx_init(struct rvu *rvu)
2241 struct mac_ops *mac_ops;
2242 unsigned long lmac_bmap;
2247 if (!cgx_get_cgxcnt_max())
2250 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2254 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2257 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2258 cgx = rvu_cgx_pdata(i, rvu);
2261 lmac_bmap = cgx_get_lmac_bmap(cgx);
2262 /* cgx debugfs dir */
2263 sprintf(dname, "%s%d", mac_ops->name, i);
2264 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2265 rvu->rvu_dbg.cgx_root);
2267 for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2268 /* lmac debugfs dir */
2269 sprintf(dname, "lmac%d", lmac_id);
2271 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2273 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2274 cgx, &rvu_dbg_cgx_stat_fops);
2275 debugfs_create_file("mac_filter", 0600,
2276 rvu->rvu_dbg.lmac, cgx,
2277 &rvu_dbg_cgx_dmac_flt_fops);
2282 /* NPC debugfs APIs */
2283 static void rvu_print_npc_mcam_info(struct seq_file *s,
2284 u16 pcifunc, int blkaddr)
2286 struct rvu *rvu = s->private;
2287 int entry_acnt, entry_ecnt;
2288 int cntr_acnt, cntr_ecnt;
2290 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2291 &entry_acnt, &entry_ecnt);
2292 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2293 &cntr_acnt, &cntr_ecnt);
2294 if (!entry_acnt && !cntr_acnt)
2297 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2298 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2299 rvu_get_pf(pcifunc));
2301 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2302 rvu_get_pf(pcifunc),
2303 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2306 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2307 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2310 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2311 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2315 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2317 struct rvu *rvu = filp->private;
2318 int pf, vf, numvfs, blkaddr;
2319 struct npc_mcam *mcam;
2320 u16 pcifunc, counters;
2323 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2327 mcam = &rvu->hw->mcam;
2328 counters = rvu->hw->npc_counters;
2330 seq_puts(filp, "\nNPC MCAM info:\n");
2331 /* MCAM keywidth on receive and transmit sides */
2332 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2333 cfg = (cfg >> 32) & 0x07;
2334 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2335 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2336 "224bits" : "448bits"));
2337 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2338 cfg = (cfg >> 32) & 0x07;
2339 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2340 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2341 "224bits" : "448bits"));
2343 mutex_lock(&mcam->lock);
2345 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2346 seq_printf(filp, "\t\t Reserved \t: %d\n",
2347 mcam->total_entries - mcam->bmap_entries);
2348 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2351 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2352 seq_printf(filp, "\t\t Reserved \t: %d\n",
2353 counters - mcam->counters.max);
2354 seq_printf(filp, "\t\t Available \t: %d\n",
2355 rvu_rsrc_free_count(&mcam->counters));
2357 if (mcam->bmap_entries == mcam->bmap_fcnt) {
2358 mutex_unlock(&mcam->lock);
2362 seq_puts(filp, "\n\t\t Current allocation\n");
2363 seq_puts(filp, "\t\t====================\n");
2364 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2365 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2366 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2368 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2369 numvfs = (cfg >> 12) & 0xFF;
2370 for (vf = 0; vf < numvfs; vf++) {
2371 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2372 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2376 mutex_unlock(&mcam->lock);
2380 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2382 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2385 struct rvu *rvu = filp->private;
2386 struct npc_mcam *mcam;
2389 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2393 mcam = &rvu->hw->mcam;
2395 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2396 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2397 rvu_read64(rvu, blkaddr,
2398 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2403 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2405 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2406 struct rvu_npc_mcam_rule *rule)
2410 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2411 seq_printf(s, "\t%s ", npc_get_field_name(bit));
2414 seq_printf(s, "%pM ", rule->packet.dmac);
2415 seq_printf(s, "mask %pM\n", rule->mask.dmac);
2418 seq_printf(s, "%pM ", rule->packet.smac);
2419 seq_printf(s, "mask %pM\n", rule->mask.smac);
2422 seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2423 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2426 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2427 seq_printf(s, "mask 0x%x\n",
2428 ntohs(rule->mask.vlan_tci));
2431 seq_printf(s, "%d ", rule->packet.tos);
2432 seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2435 seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2436 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2439 seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2440 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2443 seq_printf(s, "%pI6 ", rule->packet.ip6src);
2444 seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2447 seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2448 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2452 case NPC_SPORT_SCTP:
2453 seq_printf(s, "%d ", ntohs(rule->packet.sport));
2454 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2458 case NPC_DPORT_SCTP:
2459 seq_printf(s, "%d ", ntohs(rule->packet.dport));
2460 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2469 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2470 struct rvu_npc_mcam_rule *rule)
2472 if (is_npc_intf_tx(rule->intf)) {
2473 switch (rule->tx_action.op) {
2474 case NIX_TX_ACTIONOP_DROP:
2475 seq_puts(s, "\taction: Drop\n");
2477 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2478 seq_puts(s, "\taction: Unicast to default channel\n");
2480 case NIX_TX_ACTIONOP_UCAST_CHAN:
2481 seq_printf(s, "\taction: Unicast to channel %d\n",
2482 rule->tx_action.index);
2484 case NIX_TX_ACTIONOP_MCAST:
2485 seq_puts(s, "\taction: Multicast\n");
2487 case NIX_TX_ACTIONOP_DROP_VIOL:
2488 seq_puts(s, "\taction: Lockdown Violation Drop\n");
2494 switch (rule->rx_action.op) {
2495 case NIX_RX_ACTIONOP_DROP:
2496 seq_puts(s, "\taction: Drop\n");
2498 case NIX_RX_ACTIONOP_UCAST:
2499 seq_printf(s, "\taction: Direct to queue %d\n",
2500 rule->rx_action.index);
2502 case NIX_RX_ACTIONOP_RSS:
2503 seq_puts(s, "\taction: RSS\n");
2505 case NIX_RX_ACTIONOP_UCAST_IPSEC:
2506 seq_puts(s, "\taction: Unicast ipsec\n");
2508 case NIX_RX_ACTIONOP_MCAST:
2509 seq_puts(s, "\taction: Multicast\n");
2517 static const char *rvu_dbg_get_intf_name(int intf)
2520 case NIX_INTFX_RX(0):
2522 case NIX_INTFX_RX(1):
2524 case NIX_INTFX_TX(0):
2526 case NIX_INTFX_TX(1):
2535 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2537 struct rvu_npc_mcam_rule *iter;
2538 struct rvu *rvu = s->private;
2539 struct npc_mcam *mcam;
2546 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2550 mcam = &rvu->hw->mcam;
2552 mutex_lock(&mcam->lock);
2553 list_for_each_entry(iter, &mcam->mcam_rules, list) {
2554 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2555 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2557 if (iter->owner & RVU_PFVF_FUNC_MASK) {
2558 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2559 seq_printf(s, "VF%d", vf);
2563 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2565 seq_printf(s, "\tinterface: %s\n",
2566 rvu_dbg_get_intf_name(iter->intf));
2567 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2569 rvu_dbg_npc_mcam_show_flows(s, iter);
2570 if (is_npc_intf_rx(iter->intf)) {
2571 target = iter->rx_action.pf_func;
2572 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2573 seq_printf(s, "\tForward to: PF%d ", pf);
2575 if (target & RVU_PFVF_FUNC_MASK) {
2576 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2577 seq_printf(s, "VF%d", vf);
2580 seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
2581 seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
2584 rvu_dbg_npc_mcam_show_action(s, iter);
2586 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2587 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2589 if (!iter->has_cntr)
2591 seq_printf(s, "\tcounter: %d\n", iter->cntr);
2593 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2594 seq_printf(s, "\thits: %lld\n", hits);
2596 mutex_unlock(&mcam->lock);
2601 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2603 static void rvu_dbg_npc_init(struct rvu *rvu)
2605 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2607 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2608 &rvu_dbg_npc_mcam_info_fops);
2609 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2610 &rvu_dbg_npc_mcam_rules_fops);
2611 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2612 &rvu_dbg_npc_rx_miss_act_fops);
2615 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2617 struct cpt_ctx *ctx = filp->private;
2618 u64 busy_sts = 0, free_sts = 0;
2619 u32 e_min = 0, e_max = 0, e, i;
2620 u16 max_ses, max_ies, max_aes;
2621 struct rvu *rvu = ctx->rvu;
2622 int blkaddr = ctx->blkaddr;
2625 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2626 max_ses = reg & 0xffff;
2627 max_ies = (reg >> 16) & 0xffff;
2628 max_aes = (reg >> 32) & 0xffff;
2632 e_min = max_ses + max_ies;
2633 e_max = max_ses + max_ies + max_aes;
2641 e_max = max_ses + max_ies;
2647 for (e = e_min, i = 0; e < e_max; e++, i++) {
2648 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2650 busy_sts |= 1ULL << i;
2653 free_sts |= 1ULL << i;
2655 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2656 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2661 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2663 return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2666 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2668 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2670 return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2673 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2675 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2677 return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2680 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2682 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2684 struct cpt_ctx *ctx = filp->private;
2685 u16 max_ses, max_ies, max_aes;
2686 struct rvu *rvu = ctx->rvu;
2687 int blkaddr = ctx->blkaddr;
2691 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2692 max_ses = reg & 0xffff;
2693 max_ies = (reg >> 16) & 0xffff;
2694 max_aes = (reg >> 32) & 0xffff;
2696 e_max = max_ses + max_ies + max_aes;
2698 seq_puts(filp, "===========================================\n");
2699 for (e = 0; e < e_max; e++) {
2700 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2701 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
2703 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2704 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
2706 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2707 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
2709 seq_puts(filp, "===========================================\n");
2714 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2716 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2718 struct cpt_ctx *ctx = filp->private;
2719 int blkaddr = ctx->blkaddr;
2720 struct rvu *rvu = ctx->rvu;
2721 struct rvu_block *block;
2722 struct rvu_hwinfo *hw;
2727 block = &hw->block[blkaddr];
2728 if (!block->lf.bmap)
2731 seq_puts(filp, "===========================================\n");
2732 for (lf = 0; lf < block->lf.max; lf++) {
2733 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2734 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
2735 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2736 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
2737 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2738 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
2739 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2740 (lf << block->lfshift));
2741 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
2742 seq_puts(filp, "===========================================\n");
2747 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2749 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2751 struct cpt_ctx *ctx = filp->private;
2752 struct rvu *rvu = ctx->rvu;
2753 int blkaddr = ctx->blkaddr;
2756 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2757 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2758 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
2759 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2760 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2761 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
2762 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2763 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
2764 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2765 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
2766 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2767 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
2768 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2769 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
2774 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2776 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2778 struct cpt_ctx *ctx = filp->private;
2779 struct rvu *rvu = ctx->rvu;
2780 int blkaddr = ctx->blkaddr;
2783 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2784 seq_printf(filp, "CPT instruction requests %llu\n", reg);
2785 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2786 seq_printf(filp, "CPT instruction latency %llu\n", reg);
2787 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2788 seq_printf(filp, "CPT NCB read requests %llu\n", reg);
2789 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2790 seq_printf(filp, "CPT NCB read latency %llu\n", reg);
2791 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2792 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
2793 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2794 seq_printf(filp, "CPT active cycles pc %llu\n", reg);
2795 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2796 seq_printf(filp, "CPT clock count pc %llu\n", reg);
2801 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2803 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2805 struct cpt_ctx *ctx;
2807 if (!is_block_implemented(rvu->hw, blkaddr))
2810 if (blkaddr == BLKADDR_CPT0) {
2811 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2812 ctx = &rvu->rvu_dbg.cpt_ctx[0];
2813 ctx->blkaddr = BLKADDR_CPT0;
2816 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2818 ctx = &rvu->rvu_dbg.cpt_ctx[1];
2819 ctx->blkaddr = BLKADDR_CPT1;
2823 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2824 &rvu_dbg_cpt_pc_fops);
2825 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2826 &rvu_dbg_cpt_ae_sts_fops);
2827 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2828 &rvu_dbg_cpt_se_sts_fops);
2829 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2830 &rvu_dbg_cpt_ie_sts_fops);
2831 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2832 &rvu_dbg_cpt_engines_info_fops);
2833 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2834 &rvu_dbg_cpt_lfs_info_fops);
2835 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2836 &rvu_dbg_cpt_err_info_fops);
2839 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2841 if (!is_rvu_otx2(rvu))
2847 void rvu_dbg_init(struct rvu *rvu)
2849 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2851 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2852 &rvu_dbg_rsrc_status_fops);
2854 if (!is_rvu_otx2(rvu))
2855 debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
2856 rvu, &rvu_dbg_lmtst_map_table_fops);
2858 if (!cgx_get_cgxcnt_max())
2861 if (is_rvu_otx2(rvu))
2862 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2863 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2865 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2866 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2869 rvu_dbg_npa_init(rvu);
2870 rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2872 rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2873 rvu_dbg_cgx_init(rvu);
2874 rvu_dbg_npc_init(rvu);
2875 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2876 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2879 void rvu_dbg_exit(struct rvu *rvu)
2881 debugfs_remove_recursive(rvu->rvu_dbg.root);
2884 #endif /* CONFIG_DEBUG_FS */