1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2019 Marvell.
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
15 #include "rvu_struct.h"
19 #include "lmac_common.h"
22 #define DEBUGFS_DIR_NAME "octeontx2"
73 static char *cgx_rx_stats_fields[] = {
74 [CGX_STAT0] = "Received packets",
75 [CGX_STAT1] = "Octets of received packets",
76 [CGX_STAT2] = "Received PAUSE packets",
77 [CGX_STAT3] = "Received PAUSE and control packets",
78 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
79 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
80 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
81 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
82 [CGX_STAT8] = "Error packets",
83 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
84 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
85 [CGX_STAT11] = "NCSI-bound packets dropped",
86 [CGX_STAT12] = "NCSI-bound octets dropped",
89 static char *cgx_tx_stats_fields[] = {
90 [CGX_STAT0] = "Packets dropped due to excessive collisions",
91 [CGX_STAT1] = "Packets dropped due to excessive deferral",
92 [CGX_STAT2] = "Multiple collisions before successful transmission",
93 [CGX_STAT3] = "Single collisions before successful transmission",
94 [CGX_STAT4] = "Total octets sent on the interface",
95 [CGX_STAT5] = "Total frames sent on the interface",
96 [CGX_STAT6] = "Packets sent with an octet count < 64",
97 [CGX_STAT7] = "Packets sent with an octet count == 64",
98 [CGX_STAT8] = "Packets sent with an octet count of 65-127",
99 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
100 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
101 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
102 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
103 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
104 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
105 [CGX_STAT15] = "Packets sent to the multicast DMAC",
106 [CGX_STAT16] = "Transmit underflow and were truncated",
107 [CGX_STAT17] = "Control/PAUSE packets sent",
110 static char *rpm_rx_stats_fields[] = {
111 "Octets of received packets",
112 "Octets of received packets with out error",
113 "Received packets with alignment errors",
114 "Control/PAUSE packets received",
115 "Packets received with Frame too long Errors",
116 "Packets received with a1nrange length Errors",
118 "Packets received with FrameCheckSequenceErrors",
119 "Packets received with VLAN header",
121 "Packets received with unicast DMAC",
122 "Packets received with multicast DMAC",
123 "Packets received with broadcast DMAC",
125 "Total frames received on interface",
126 "Packets received with an octet count < 64",
127 "Packets received with an octet count == 64",
128 "Packets received with an octet count of 65-127",
129 "Packets received with an octet count of 128-255",
130 "Packets received with an octet count of 256-511",
131 "Packets received with an octet count of 512-1023",
132 "Packets received with an octet count of 1024-1518",
133 "Packets received with an octet count of > 1518",
136 "Fragmented Packets",
137 "CBFC(class based flow control) pause frames received for class 0",
138 "CBFC pause frames received for class 1",
139 "CBFC pause frames received for class 2",
140 "CBFC pause frames received for class 3",
141 "CBFC pause frames received for class 4",
142 "CBFC pause frames received for class 5",
143 "CBFC pause frames received for class 6",
144 "CBFC pause frames received for class 7",
145 "CBFC pause frames received for class 8",
146 "CBFC pause frames received for class 9",
147 "CBFC pause frames received for class 10",
148 "CBFC pause frames received for class 11",
149 "CBFC pause frames received for class 12",
150 "CBFC pause frames received for class 13",
151 "CBFC pause frames received for class 14",
152 "CBFC pause frames received for class 15",
153 "MAC control packets received",
156 static char *rpm_tx_stats_fields[] = {
157 "Total octets sent on the interface",
158 "Total octets transmitted OK",
159 "Control/Pause frames sent",
160 "Total frames transmitted OK",
161 "Total frames sent with VLAN header",
163 "Packets sent to unicast DMAC",
164 "Packets sent to the multicast DMAC",
165 "Packets sent to a broadcast DMAC",
166 "Packets sent with an octet count == 64",
167 "Packets sent with an octet count of 65-127",
168 "Packets sent with an octet count of 128-255",
169 "Packets sent with an octet count of 256-511",
170 "Packets sent with an octet count of 512-1023",
171 "Packets sent with an octet count of 1024-1518",
172 "Packets sent with an octet count of > 1518",
173 "CBFC(class based flow control) pause frames transmitted for class 0",
174 "CBFC pause frames transmitted for class 1",
175 "CBFC pause frames transmitted for class 2",
176 "CBFC pause frames transmitted for class 3",
177 "CBFC pause frames transmitted for class 4",
178 "CBFC pause frames transmitted for class 5",
179 "CBFC pause frames transmitted for class 6",
180 "CBFC pause frames transmitted for class 7",
181 "CBFC pause frames transmitted for class 8",
182 "CBFC pause frames transmitted for class 9",
183 "CBFC pause frames transmitted for class 10",
184 "CBFC pause frames transmitted for class 11",
185 "CBFC pause frames transmitted for class 12",
186 "CBFC pause frames transmitted for class 13",
187 "CBFC pause frames transmitted for class 14",
188 "CBFC pause frames transmitted for class 15",
189 "MAC control packets sent",
190 "Total frames sent on the interface"
199 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
200 blk_addr, NDC_AF_CONST) & 0xFF)
202 #define rvu_dbg_NULL NULL
203 #define rvu_dbg_open_NULL NULL
205 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
206 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
208 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
210 static const struct file_operations rvu_dbg_##name##_fops = { \
211 .owner = THIS_MODULE, \
212 .open = rvu_dbg_open_##name, \
214 .write = rvu_dbg_##write_op, \
215 .llseek = seq_lseek, \
216 .release = single_release, \
219 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
220 static const struct file_operations rvu_dbg_##name##_fops = { \
221 .owner = THIS_MODULE, \
222 .open = simple_open, \
223 .read = rvu_dbg_##read_op, \
224 .write = rvu_dbg_##write_op \
227 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
229 #define LMT_MAPTBL_ENTRY_SIZE 16
230 /* Dump LMTST map table */
231 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
233 size_t count, loff_t *ppos)
235 struct rvu *rvu = filp->private_data;
236 u64 lmt_addr, val, tbl_base;
237 int pf, vf, num_vfs, hw_vfs;
238 void __iomem *lmt_map_base;
239 int buf_size = 10240;
245 /* don't allow partial reads */
249 buf = kzalloc(buf_size, GFP_KERNEL);
253 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
255 lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
257 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
262 off += scnprintf(&buf[off], buf_size - 1 - off,
263 "\n\t\t\t\t\tLmtst Map Table Entries");
264 off += scnprintf(&buf[off], buf_size - 1 - off,
265 "\n\t\t\t\t\t=======================");
266 off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
267 off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
268 off += scnprintf(&buf[off], buf_size - 1 - off,
269 "Lmtline Base (word 0)\t\t");
270 off += scnprintf(&buf[off], buf_size - 1 - off,
271 "Lmt Map Entry (word 1)");
272 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
273 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
274 off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
277 index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
278 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
280 lmt_addr = readq(lmt_map_base + index);
281 off += scnprintf(&buf[off], buf_size - 1 - off,
282 " 0x%016llx\t\t", lmt_addr);
284 val = readq(lmt_map_base + index);
285 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
287 /* Reading num of VFs per PF */
288 rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
289 for (vf = 0; vf < num_vfs; vf++) {
290 index = (pf * rvu->hw->total_vfs * 16) +
291 ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
292 off += scnprintf(&buf[off], buf_size - 1 - off,
293 "PF%d:VF%d \t\t", pf, vf);
294 off += scnprintf(&buf[off], buf_size - 1 - off,
295 " 0x%llx\t\t", (tbl_base + index));
296 lmt_addr = readq(lmt_map_base + index);
297 off += scnprintf(&buf[off], buf_size - 1 - off,
298 " 0x%016llx\t\t", lmt_addr);
300 val = readq(lmt_map_base + index);
301 off += scnprintf(&buf[off], buf_size - 1 - off,
302 " 0x%016llx\n", val);
305 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
307 ret = min(off, count);
308 if (copy_to_user(buffer, buf, ret))
312 iounmap(lmt_map_base);
320 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
322 static void get_lf_str_list(struct rvu_block block, int pcifunc,
325 int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
327 for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
328 if (lf >= block.lf.max)
331 if (block.fn_map[lf] != pcifunc)
334 if (lf == prev_lf + 1) {
341 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
343 len += (len ? sprintf(lfs + len, ",%d", lf) :
344 sprintf(lfs + len, "%d", lf));
351 len += sprintf(lfs + len, "-%d", prev_lf);
356 static int get_max_column_width(struct rvu *rvu)
358 int index, pf, vf, lf_str_size = 12, buf_size = 256;
359 struct rvu_block block;
363 buf = kzalloc(buf_size, GFP_KERNEL);
367 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
368 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
369 pcifunc = pf << 10 | vf;
373 for (index = 0; index < BLK_COUNT; index++) {
374 block = rvu->hw->block[index];
375 if (!strlen(block.name))
378 get_lf_str_list(block, pcifunc, buf);
379 if (lf_str_size <= strlen(buf))
380 lf_str_size = strlen(buf) + 1;
389 /* Dumps current provisioning status of all RVU block LFs */
390 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
392 size_t count, loff_t *ppos)
394 int index, off = 0, flag = 0, len = 0, i = 0;
395 struct rvu *rvu = filp->private_data;
396 int bytes_not_copied = 0;
397 struct rvu_block block;
404 /* don't allow partial reads */
408 buf = kzalloc(buf_size, GFP_KERNEL);
412 /* Get the maximum width of a column */
413 lf_str_size = get_max_column_width(rvu);
415 lfs = kzalloc(lf_str_size, GFP_KERNEL);
420 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
422 for (index = 0; index < BLK_COUNT; index++)
423 if (strlen(rvu->hw->block[index].name)) {
424 off += scnprintf(&buf[off], buf_size - 1 - off,
426 rvu->hw->block[index].name);
429 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
430 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
431 if (bytes_not_copied)
436 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
437 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
440 pcifunc = pf << 10 | vf;
445 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
446 off = scnprintf(&buf[off],
448 "%-*s", lf_str_size, lfs);
450 sprintf(lfs, "PF%d", pf);
451 off = scnprintf(&buf[off],
453 "%-*s", lf_str_size, lfs);
456 for (index = 0; index < BLK_COUNT; index++) {
457 block = rvu->hw->block[index];
458 if (!strlen(block.name))
462 get_lf_str_list(block, pcifunc, lfs);
466 off += scnprintf(&buf[off], buf_size - 1 - off,
467 "%-*s", lf_str_size, lfs);
470 off += scnprintf(&buf[off],
471 buf_size - 1 - off, "\n");
472 bytes_not_copied = copy_to_user(buffer +
475 if (bytes_not_copied)
487 if (bytes_not_copied)
493 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
495 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
497 struct rvu *rvu = filp->private;
498 struct pci_dev *pdev = NULL;
499 struct mac_ops *mac_ops;
500 char cgx[10], lmac[10];
501 struct rvu_pfvf *pfvf;
502 int pf, domain, blkid;
507 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
508 /* There can be no CGX devices at all */
511 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
513 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
514 if (!is_pf_cgxmapped(rvu, pf))
517 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
524 pfvf = rvu_get_pfvf(rvu, pcifunc);
526 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
531 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
533 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
534 sprintf(lmac, "LMAC%d", lmac_id);
535 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
536 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
541 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
543 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
546 struct rvu_block *block;
547 struct rvu_hwinfo *hw;
550 block = &hw->block[blkaddr];
552 if (lf < 0 || lf >= block->lf.max) {
553 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
558 *pcifunc = block->fn_map[lf];
561 "This LF is not attached to any RVU PFFUNC\n");
567 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
571 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
575 if (!pfvf->aura_ctx) {
576 seq_puts(m, "Aura context is not initialized\n");
578 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
579 pfvf->aura_ctx->qsize);
580 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
581 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
584 if (!pfvf->pool_ctx) {
585 seq_puts(m, "Pool context is not initialized\n");
587 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
588 pfvf->pool_ctx->qsize);
589 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
590 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
595 /* The 'qsize' entry dumps current Aura/Pool context Qsize
596 * and each context's current enable/disable status in a bitmap.
598 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
601 void (*print_qsize)(struct seq_file *filp,
602 struct rvu_pfvf *pfvf) = NULL;
603 struct dentry *current_dir;
604 struct rvu_pfvf *pfvf;
613 qsize_id = rvu->rvu_dbg.npa_qsize_id;
614 print_qsize = print_npa_qsize;
618 qsize_id = rvu->rvu_dbg.nix_qsize_id;
619 print_qsize = print_nix_qsize;
626 if (blktype == BLKTYPE_NPA) {
627 blkaddr = BLKADDR_NPA;
629 current_dir = filp->file->f_path.dentry->d_parent;
630 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
631 BLKADDR_NIX1 : BLKADDR_NIX0);
634 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
637 pfvf = rvu_get_pfvf(rvu, pcifunc);
638 print_qsize(filp, pfvf);
643 static ssize_t rvu_dbg_qsize_write(struct file *filp,
644 const char __user *buffer, size_t count,
645 loff_t *ppos, int blktype)
647 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
648 struct seq_file *seqfile = filp->private_data;
649 char *cmd_buf, *cmd_buf_tmp, *subtoken;
650 struct rvu *rvu = seqfile->private;
651 struct dentry *current_dir;
656 cmd_buf = memdup_user(buffer, count + 1);
660 cmd_buf[count] = '\0';
662 cmd_buf_tmp = strchr(cmd_buf, '\n');
665 count = cmd_buf_tmp - cmd_buf + 1;
668 cmd_buf_tmp = cmd_buf;
669 subtoken = strsep(&cmd_buf, " ");
670 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
674 if (ret < 0 || !strncmp(subtoken, "help", 4)) {
675 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
676 goto qsize_write_done;
679 if (blktype == BLKTYPE_NPA) {
680 blkaddr = BLKADDR_NPA;
682 current_dir = filp->f_path.dentry->d_parent;
683 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
684 BLKADDR_NIX1 : BLKADDR_NIX0);
687 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
689 goto qsize_write_done;
691 if (blktype == BLKTYPE_NPA)
692 rvu->rvu_dbg.npa_qsize_id = lf;
694 rvu->rvu_dbg.nix_qsize_id = lf;
698 return ret ? ret : count;
701 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
702 const char __user *buffer,
703 size_t count, loff_t *ppos)
705 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
709 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
711 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
714 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
716 /* Dumps given NPA Aura's context */
717 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
719 struct npa_aura_s *aura = &rsp->aura;
720 struct rvu *rvu = m->private;
722 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
724 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
725 aura->ena, aura->pool_caching);
726 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
727 aura->pool_way_mask, aura->avg_con);
728 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
729 aura->pool_drop_ena, aura->aura_drop_ena);
730 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
731 aura->bp_ena, aura->aura_drop);
732 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
733 aura->shift, aura->avg_level);
735 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
736 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
738 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
739 (u64)aura->limit, aura->bp, aura->fc_ena);
741 if (!is_rvu_otx2(rvu))
742 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
743 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
744 aura->fc_up_crossing, aura->fc_stype);
745 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
747 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
749 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
750 aura->pool_drop, aura->update_time);
751 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
752 aura->err_int, aura->err_int_ena);
753 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
754 aura->thresh_int, aura->thresh_int_ena);
755 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
756 aura->thresh_up, aura->thresh_qint_idx);
757 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
759 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
760 if (!is_rvu_otx2(rvu))
761 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
764 /* Dumps given NPA Pool's context */
765 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
767 struct npa_pool_s *pool = &rsp->pool;
768 struct rvu *rvu = m->private;
770 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
772 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
773 pool->ena, pool->nat_align);
774 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
775 pool->stack_caching, pool->stack_way_mask);
776 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
777 pool->buf_offset, pool->buf_size);
779 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
780 pool->stack_max_pages, pool->stack_pages);
782 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
784 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
785 pool->stack_offset, pool->shift, pool->avg_level);
786 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
787 pool->avg_con, pool->fc_ena, pool->fc_stype);
788 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
789 pool->fc_hyst_bits, pool->fc_up_crossing);
790 if (!is_rvu_otx2(rvu))
791 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
792 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
794 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
796 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
798 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
800 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
801 pool->err_int, pool->err_int_ena);
802 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
803 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
804 pool->thresh_int_ena, pool->thresh_up);
805 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
806 pool->thresh_qint_idx, pool->err_qint_idx);
807 if (!is_rvu_otx2(rvu))
808 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
811 /* Reads aura/pool's ctx from admin queue */
812 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
814 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
815 struct npa_aq_enq_req aq_req;
816 struct npa_aq_enq_rsp rsp;
817 struct rvu_pfvf *pfvf;
818 int aura, rc, max_id;
826 case NPA_AQ_CTYPE_AURA:
827 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
828 id = rvu->rvu_dbg.npa_aura_ctx.id;
829 all = rvu->rvu_dbg.npa_aura_ctx.all;
832 case NPA_AQ_CTYPE_POOL:
833 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
834 id = rvu->rvu_dbg.npa_pool_ctx.id;
835 all = rvu->rvu_dbg.npa_pool_ctx.all;
841 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
844 pfvf = rvu_get_pfvf(rvu, pcifunc);
845 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
846 seq_puts(m, "Aura context is not initialized\n");
848 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
849 seq_puts(m, "Pool context is not initialized\n");
853 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
854 aq_req.hdr.pcifunc = pcifunc;
855 aq_req.ctype = ctype;
856 aq_req.op = NPA_AQ_INSTOP_READ;
857 if (ctype == NPA_AQ_CTYPE_AURA) {
858 max_id = pfvf->aura_ctx->qsize;
859 print_npa_ctx = print_npa_aura_ctx;
861 max_id = pfvf->pool_ctx->qsize;
862 print_npa_ctx = print_npa_pool_ctx;
865 if (id < 0 || id >= max_id) {
866 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
867 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
877 for (aura = id; aura < max_id; aura++) {
878 aq_req.aura_id = aura;
879 seq_printf(m, "======%s : %d=======\n",
880 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
882 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
884 seq_puts(m, "Failed to read context\n");
887 print_npa_ctx(m, &rsp);
892 static int write_npa_ctx(struct rvu *rvu, bool all,
893 int npalf, int id, int ctype)
895 struct rvu_pfvf *pfvf;
899 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
902 pfvf = rvu_get_pfvf(rvu, pcifunc);
904 if (ctype == NPA_AQ_CTYPE_AURA) {
905 if (!pfvf->aura_ctx) {
906 dev_warn(rvu->dev, "Aura context is not initialized\n");
909 max_id = pfvf->aura_ctx->qsize;
910 } else if (ctype == NPA_AQ_CTYPE_POOL) {
911 if (!pfvf->pool_ctx) {
912 dev_warn(rvu->dev, "Pool context is not initialized\n");
915 max_id = pfvf->pool_ctx->qsize;
918 if (id < 0 || id >= max_id) {
919 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
920 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
926 case NPA_AQ_CTYPE_AURA:
927 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
928 rvu->rvu_dbg.npa_aura_ctx.id = id;
929 rvu->rvu_dbg.npa_aura_ctx.all = all;
932 case NPA_AQ_CTYPE_POOL:
933 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
934 rvu->rvu_dbg.npa_pool_ctx.id = id;
935 rvu->rvu_dbg.npa_pool_ctx.all = all;
943 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
944 const char __user *buffer, int *npalf,
947 int bytes_not_copied;
952 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
953 if (bytes_not_copied)
956 cmd_buf[*count] = '\0';
957 cmd_buf_tmp = strchr(cmd_buf, '\n');
961 *count = cmd_buf_tmp - cmd_buf + 1;
964 subtoken = strsep(&cmd_buf, " ");
965 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
968 subtoken = strsep(&cmd_buf, " ");
969 if (subtoken && strcmp(subtoken, "all") == 0) {
972 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
981 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
982 const char __user *buffer,
983 size_t count, loff_t *ppos, int ctype)
985 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
987 struct seq_file *seqfp = filp->private_data;
988 struct rvu *rvu = seqfp->private;
989 int npalf, id = 0, ret;
992 if ((*ppos != 0) || !count)
995 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
998 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1002 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1003 ctype_string, ctype_string);
1006 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1010 return ret ? ret : count;
1013 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1014 const char __user *buffer,
1015 size_t count, loff_t *ppos)
1017 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1021 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1023 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1026 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1028 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1029 const char __user *buffer,
1030 size_t count, loff_t *ppos)
1032 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1036 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1038 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1041 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1043 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1044 int ctype, int transaction)
1046 u64 req, out_req, lat, cant_alloc;
1047 struct nix_hw *nix_hw;
1051 if (blk_addr == BLKADDR_NDC_NPA0) {
1054 nix_hw = s->private;
1058 for (port = 0; port < NDC_MAX_PORT; port++) {
1059 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1060 (port, ctype, transaction));
1061 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1062 (port, ctype, transaction));
1063 out_req = rvu_read64(rvu, blk_addr,
1064 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1065 (port, ctype, transaction));
1066 cant_alloc = rvu_read64(rvu, blk_addr,
1067 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1068 (port, transaction));
1069 seq_printf(s, "\nPort:%d\n", port);
1070 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1071 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1072 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1073 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1074 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1078 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1080 seq_puts(s, "\n***** CACHE mode read stats *****\n");
1081 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1082 seq_puts(s, "\n***** CACHE mode write stats *****\n");
1083 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1084 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1085 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1086 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1087 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1091 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1093 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1096 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1098 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1100 struct nix_hw *nix_hw;
1104 if (blk_addr == BLKADDR_NDC_NPA0) {
1107 nix_hw = s->private;
1111 max_bank = NDC_MAX_BANK(rvu, blk_addr);
1112 for (bank = 0; bank < max_bank; bank++) {
1113 seq_printf(s, "BANK:%d\n", bank);
1114 seq_printf(s, "\tHits:\t%lld\n",
1115 (u64)rvu_read64(rvu, blk_addr,
1116 NDC_AF_BANKX_HIT_PC(bank)));
1117 seq_printf(s, "\tMiss:\t%lld\n",
1118 (u64)rvu_read64(rvu, blk_addr,
1119 NDC_AF_BANKX_MISS_PC(bank)));
1124 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1126 struct nix_hw *nix_hw = filp->private;
1130 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1131 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1132 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1134 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1137 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1139 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1141 struct nix_hw *nix_hw = filp->private;
1145 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1146 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1147 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1149 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1152 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1154 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1157 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1160 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1162 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1165 struct nix_hw *nix_hw = filp->private;
1166 int ndc_idx = NPA0_U;
1169 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1170 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1172 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1175 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1177 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1180 struct nix_hw *nix_hw = filp->private;
1181 int ndc_idx = NPA0_U;
1184 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1185 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1187 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1190 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1192 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1193 struct nix_cn10k_sq_ctx_s *sq_ctx)
1195 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1196 sq_ctx->ena, sq_ctx->qint_idx);
1197 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1198 sq_ctx->substream, sq_ctx->sdp_mcast);
1199 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1200 sq_ctx->cq, sq_ctx->sqe_way_mask);
1202 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1203 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1204 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1205 sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1206 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1207 sq_ctx->default_chan, sq_ctx->sqb_count);
1209 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1210 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1211 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1212 sq_ctx->sqb_aura, sq_ctx->sq_int);
1213 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1214 sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1216 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1217 sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1218 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1219 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1220 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1221 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1222 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1223 sq_ctx->tail_offset, sq_ctx->smenq_offset);
1224 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1225 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1227 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1228 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1229 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1230 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1231 sq_ctx->smenq_next_sqb);
1233 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1235 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1236 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1237 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1238 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1239 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1240 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1241 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1243 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1244 (u64)sq_ctx->scm_lso_rem);
1245 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1246 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1247 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1248 (u64)sq_ctx->dropped_octs);
1249 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1250 (u64)sq_ctx->dropped_pkts);
1253 /* Dumps given nix_sq's context */
1254 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1256 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1257 struct nix_hw *nix_hw = m->private;
1258 struct rvu *rvu = nix_hw->rvu;
1260 if (!is_rvu_otx2(rvu)) {
1261 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1264 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1265 sq_ctx->sqe_way_mask, sq_ctx->cq);
1266 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1267 sq_ctx->sdp_mcast, sq_ctx->substream);
1268 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1269 sq_ctx->qint_idx, sq_ctx->ena);
1271 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1272 sq_ctx->sqb_count, sq_ctx->default_chan);
1273 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1274 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1275 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1276 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1278 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1279 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1280 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1281 sq_ctx->sq_int, sq_ctx->sqb_aura);
1282 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1284 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1285 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1286 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1287 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1288 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1289 sq_ctx->smenq_offset, sq_ctx->tail_offset);
1290 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1291 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1292 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1293 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1294 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1295 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1297 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1298 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1299 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1300 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1301 sq_ctx->smenq_next_sqb);
1303 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1305 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1306 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1307 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1308 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1309 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1310 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1311 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1313 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1314 (u64)sq_ctx->scm_lso_rem);
1315 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1316 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1317 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1318 (u64)sq_ctx->dropped_octs);
1319 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1320 (u64)sq_ctx->dropped_pkts);
1323 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1324 struct nix_cn10k_rq_ctx_s *rq_ctx)
1326 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1327 rq_ctx->ena, rq_ctx->sso_ena);
1328 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1329 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1330 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1331 rq_ctx->cq, rq_ctx->lenerr_dis);
1332 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1333 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1334 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1335 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1336 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1337 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1338 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1340 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1341 rq_ctx->spb_aura, rq_ctx->lpb_aura);
1342 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1343 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1344 rq_ctx->sso_grp, rq_ctx->sso_tt);
1345 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1346 rq_ctx->pb_caching, rq_ctx->wqe_caching);
1347 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1348 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1349 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1350 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1351 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1352 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1354 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1355 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1356 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1357 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1358 rq_ctx->wqe_skip, rq_ctx->spb_ena);
1359 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1360 rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1361 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1362 rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1363 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1364 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1366 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1367 rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1368 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1369 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1370 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1371 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1372 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1373 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1375 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1376 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1377 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1378 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1379 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1380 rq_ctx->rq_int, rq_ctx->rq_int_ena);
1381 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1383 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1384 rq_ctx->ltag, rq_ctx->good_utag);
1385 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1386 rq_ctx->bad_utag, rq_ctx->flow_tagw);
1387 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1388 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1389 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1390 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1391 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1393 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1394 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1395 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1396 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1397 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1400 /* Dumps given nix_rq's context */
1401 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1403 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1404 struct nix_hw *nix_hw = m->private;
1405 struct rvu *rvu = nix_hw->rvu;
1407 if (!is_rvu_otx2(rvu)) {
1408 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1412 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1413 rq_ctx->wqe_aura, rq_ctx->substream);
1414 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1415 rq_ctx->cq, rq_ctx->ena_wqwd);
1416 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1417 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1418 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1420 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1421 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1422 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1423 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1424 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1425 rq_ctx->pb_caching, rq_ctx->sso_tt);
1426 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1427 rq_ctx->sso_grp, rq_ctx->lpb_aura);
1428 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1430 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1431 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1432 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1433 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1434 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1435 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1436 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1437 rq_ctx->spb_ena, rq_ctx->wqe_skip);
1438 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1440 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1441 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1442 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1443 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1444 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1445 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1446 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1447 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1449 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1450 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1451 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1452 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1453 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1454 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1455 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1457 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1458 rq_ctx->flow_tagw, rq_ctx->bad_utag);
1459 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1460 rq_ctx->good_utag, rq_ctx->ltag);
1462 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1463 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1464 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1465 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1466 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1469 /* Dumps given nix_cq's context */
1470 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1472 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1474 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1476 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1477 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1478 cq_ctx->avg_con, cq_ctx->cint_idx);
1479 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1480 cq_ctx->cq_err, cq_ctx->qint_idx);
1481 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1482 cq_ctx->bpid, cq_ctx->bp_ena);
1484 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1485 cq_ctx->update_time, cq_ctx->avg_level);
1486 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1487 cq_ctx->head, cq_ctx->tail);
1489 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1490 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1491 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1492 cq_ctx->qsize, cq_ctx->caching);
1493 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1494 cq_ctx->substream, cq_ctx->ena);
1495 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1496 cq_ctx->drop_ena, cq_ctx->drop);
1497 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1500 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1501 void *unused, int ctype)
1503 void (*print_nix_ctx)(struct seq_file *filp,
1504 struct nix_aq_enq_rsp *rsp) = NULL;
1505 struct nix_hw *nix_hw = filp->private;
1506 struct rvu *rvu = nix_hw->rvu;
1507 struct nix_aq_enq_req aq_req;
1508 struct nix_aq_enq_rsp rsp;
1509 char *ctype_string = NULL;
1510 int qidx, rc, max_id = 0;
1511 struct rvu_pfvf *pfvf;
1516 case NIX_AQ_CTYPE_CQ:
1517 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1518 id = rvu->rvu_dbg.nix_cq_ctx.id;
1519 all = rvu->rvu_dbg.nix_cq_ctx.all;
1522 case NIX_AQ_CTYPE_SQ:
1523 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1524 id = rvu->rvu_dbg.nix_sq_ctx.id;
1525 all = rvu->rvu_dbg.nix_sq_ctx.all;
1528 case NIX_AQ_CTYPE_RQ:
1529 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1530 id = rvu->rvu_dbg.nix_rq_ctx.id;
1531 all = rvu->rvu_dbg.nix_rq_ctx.all;
1538 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1541 pfvf = rvu_get_pfvf(rvu, pcifunc);
1542 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1543 seq_puts(filp, "SQ context is not initialized\n");
1545 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1546 seq_puts(filp, "RQ context is not initialized\n");
1548 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1549 seq_puts(filp, "CQ context is not initialized\n");
1553 if (ctype == NIX_AQ_CTYPE_SQ) {
1554 max_id = pfvf->sq_ctx->qsize;
1555 ctype_string = "sq";
1556 print_nix_ctx = print_nix_sq_ctx;
1557 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1558 max_id = pfvf->rq_ctx->qsize;
1559 ctype_string = "rq";
1560 print_nix_ctx = print_nix_rq_ctx;
1561 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1562 max_id = pfvf->cq_ctx->qsize;
1563 ctype_string = "cq";
1564 print_nix_ctx = print_nix_cq_ctx;
1567 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1568 aq_req.hdr.pcifunc = pcifunc;
1569 aq_req.ctype = ctype;
1570 aq_req.op = NIX_AQ_INSTOP_READ;
1575 for (qidx = id; qidx < max_id; qidx++) {
1577 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1578 ctype_string, nixlf, aq_req.qidx);
1579 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1581 seq_puts(filp, "Failed to read the context\n");
1584 print_nix_ctx(filp, &rsp);
1589 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1590 int id, int ctype, char *ctype_string,
1593 struct nix_hw *nix_hw = m->private;
1594 struct rvu_pfvf *pfvf;
1598 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1601 pfvf = rvu_get_pfvf(rvu, pcifunc);
1603 if (ctype == NIX_AQ_CTYPE_SQ) {
1604 if (!pfvf->sq_ctx) {
1605 dev_warn(rvu->dev, "SQ context is not initialized\n");
1608 max_id = pfvf->sq_ctx->qsize;
1609 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1610 if (!pfvf->rq_ctx) {
1611 dev_warn(rvu->dev, "RQ context is not initialized\n");
1614 max_id = pfvf->rq_ctx->qsize;
1615 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1616 if (!pfvf->cq_ctx) {
1617 dev_warn(rvu->dev, "CQ context is not initialized\n");
1620 max_id = pfvf->cq_ctx->qsize;
1623 if (id < 0 || id >= max_id) {
1624 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1625 ctype_string, max_id - 1);
1629 case NIX_AQ_CTYPE_CQ:
1630 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1631 rvu->rvu_dbg.nix_cq_ctx.id = id;
1632 rvu->rvu_dbg.nix_cq_ctx.all = all;
1635 case NIX_AQ_CTYPE_SQ:
1636 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1637 rvu->rvu_dbg.nix_sq_ctx.id = id;
1638 rvu->rvu_dbg.nix_sq_ctx.all = all;
1641 case NIX_AQ_CTYPE_RQ:
1642 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1643 rvu->rvu_dbg.nix_rq_ctx.id = id;
1644 rvu->rvu_dbg.nix_rq_ctx.all = all;
1652 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1653 const char __user *buffer,
1654 size_t count, loff_t *ppos,
1657 struct seq_file *m = filp->private_data;
1658 struct nix_hw *nix_hw = m->private;
1659 struct rvu *rvu = nix_hw->rvu;
1660 char *cmd_buf, *ctype_string;
1661 int nixlf, id = 0, ret;
1664 if ((*ppos != 0) || !count)
1668 case NIX_AQ_CTYPE_SQ:
1669 ctype_string = "sq";
1671 case NIX_AQ_CTYPE_RQ:
1672 ctype_string = "rq";
1674 case NIX_AQ_CTYPE_CQ:
1675 ctype_string = "cq";
1681 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1686 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1690 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1691 ctype_string, ctype_string);
1694 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1699 return ret ? ret : count;
1702 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1703 const char __user *buffer,
1704 size_t count, loff_t *ppos)
1706 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1710 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1712 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1715 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1717 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1718 const char __user *buffer,
1719 size_t count, loff_t *ppos)
1721 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1725 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
1727 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
1730 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1732 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1733 const char __user *buffer,
1734 size_t count, loff_t *ppos)
1736 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1740 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1742 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1745 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1747 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1748 unsigned long *bmap, char *qtype)
1752 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1756 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1757 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1758 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1763 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1766 seq_puts(filp, "cq context is not initialized\n");
1768 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1772 seq_puts(filp, "rq context is not initialized\n");
1774 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1778 seq_puts(filp, "sq context is not initialized\n");
1780 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1784 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1785 const char __user *buffer,
1786 size_t count, loff_t *ppos)
1788 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1792 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1794 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1797 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1799 static void print_band_prof_ctx(struct seq_file *m,
1800 struct nix_bandprof_s *prof)
1804 switch (prof->pc_mode) {
1805 case NIX_RX_PC_MODE_VLAN:
1808 case NIX_RX_PC_MODE_DSCP:
1811 case NIX_RX_PC_MODE_GEN:
1814 case NIX_RX_PC_MODE_RSVD:
1818 seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1819 str = (prof->icolor == 3) ? "Color blind" :
1820 (prof->icolor == 0) ? "Green" :
1821 (prof->icolor == 1) ? "Yellow" : "Red";
1822 seq_printf(m, "W0: icolor\t\t%s\n", str);
1823 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1824 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1825 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1826 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1827 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1828 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1829 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1830 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1832 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1833 str = (prof->lmode == 0) ? "byte" : "packet";
1834 seq_printf(m, "W1: lmode\t\t%s\n", str);
1835 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1836 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1837 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1838 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1839 str = (prof->gc_action == 0) ? "PASS" :
1840 (prof->gc_action == 1) ? "DROP" : "RED";
1841 seq_printf(m, "W1: gc_action\t\t%s\n", str);
1842 str = (prof->yc_action == 0) ? "PASS" :
1843 (prof->yc_action == 1) ? "DROP" : "RED";
1844 seq_printf(m, "W1: yc_action\t\t%s\n", str);
1845 str = (prof->rc_action == 0) ? "PASS" :
1846 (prof->rc_action == 1) ? "DROP" : "RED";
1847 seq_printf(m, "W1: rc_action\t\t%s\n", str);
1848 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1849 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1850 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1852 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1853 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1854 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1855 seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1856 (u64)prof->green_pkt_pass);
1857 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1858 (u64)prof->yellow_pkt_pass);
1859 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1860 seq_printf(m, "W7: green_octs_pass\t%lld\n",
1861 (u64)prof->green_octs_pass);
1862 seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1863 (u64)prof->yellow_octs_pass);
1864 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1865 seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1866 (u64)prof->green_pkt_drop);
1867 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1868 (u64)prof->yellow_pkt_drop);
1869 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1870 seq_printf(m, "W13: green_octs_drop\t%lld\n",
1871 (u64)prof->green_octs_drop);
1872 seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1873 (u64)prof->yellow_octs_drop);
1874 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1875 seq_puts(m, "==============================\n");
1878 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1880 struct nix_hw *nix_hw = m->private;
1881 struct nix_cn10k_aq_enq_req aq_req;
1882 struct nix_cn10k_aq_enq_rsp aq_rsp;
1883 struct rvu *rvu = nix_hw->rvu;
1884 struct nix_ipolicer *ipolicer;
1885 int layer, prof_idx, idx, rc;
1889 /* Ingress policers do not exist on all platforms */
1890 if (!nix_hw->ipolicer)
1893 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1894 if (layer == BAND_PROF_INVAL_LAYER)
1896 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1897 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1899 seq_printf(m, "\n%s bandwidth profiles\n", str);
1900 seq_puts(m, "=======================\n");
1902 ipolicer = &nix_hw->ipolicer[layer];
1904 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1905 if (is_rsrc_free(&ipolicer->band_prof, idx))
1908 prof_idx = (idx & 0x3FFF) | (layer << 14);
1909 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1910 0x00, NIX_AQ_CTYPE_BANDPROF,
1914 "%s: Failed to fetch context of %s profile %d, err %d\n",
1915 __func__, str, idx, rc);
1918 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1919 pcifunc = ipolicer->pfvf_map[idx];
1920 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1921 seq_printf(m, "Allocated to :: PF %d\n",
1922 rvu_get_pf(pcifunc));
1924 seq_printf(m, "Allocated to :: PF %d VF %d\n",
1925 rvu_get_pf(pcifunc),
1926 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1927 print_band_prof_ctx(m, &aq_rsp.prof);
1933 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1935 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1937 struct nix_hw *nix_hw = m->private;
1938 struct nix_ipolicer *ipolicer;
1942 /* Ingress policers do not exist on all platforms */
1943 if (!nix_hw->ipolicer)
1946 seq_puts(m, "\nBandwidth profile resource free count\n");
1947 seq_puts(m, "=====================================\n");
1948 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1949 if (layer == BAND_PROF_INVAL_LAYER)
1951 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1952 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1954 ipolicer = &nix_hw->ipolicer[layer];
1955 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
1956 ipolicer->band_prof.max,
1957 rvu_rsrc_free_count(&ipolicer->band_prof));
1959 seq_puts(m, "=====================================\n");
1964 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1966 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1968 struct nix_hw *nix_hw;
1970 if (!is_block_implemented(rvu->hw, blkaddr))
1973 if (blkaddr == BLKADDR_NIX0) {
1974 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1975 nix_hw = &rvu->hw->nix[0];
1977 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1979 nix_hw = &rvu->hw->nix[1];
1982 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1983 &rvu_dbg_nix_sq_ctx_fops);
1984 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1985 &rvu_dbg_nix_rq_ctx_fops);
1986 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1987 &rvu_dbg_nix_cq_ctx_fops);
1988 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1989 &rvu_dbg_nix_ndc_tx_cache_fops);
1990 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1991 &rvu_dbg_nix_ndc_rx_cache_fops);
1992 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1993 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1994 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1995 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1996 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1997 &rvu_dbg_nix_qsize_fops);
1998 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1999 &rvu_dbg_nix_band_prof_ctx_fops);
2000 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2001 &rvu_dbg_nix_band_prof_rsrc_fops);
2004 static void rvu_dbg_npa_init(struct rvu *rvu)
2006 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2008 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2009 &rvu_dbg_npa_qsize_fops);
2010 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2011 &rvu_dbg_npa_aura_ctx_fops);
2012 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2013 &rvu_dbg_npa_pool_ctx_fops);
2014 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2015 &rvu_dbg_npa_ndc_cache_fops);
2016 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2017 &rvu_dbg_npa_ndc_hits_miss_fops);
2020 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
2023 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2024 NIX_STATS_RX, &(cnt)); \
2026 seq_printf(s, "%s: %llu\n", name, cnt); \
2030 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
2033 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2034 NIX_STATS_TX, &(cnt)); \
2036 seq_printf(s, "%s: %llu\n", name, cnt); \
2040 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2042 struct cgx_link_user_info linfo;
2043 struct mac_ops *mac_ops;
2044 void *cgxd = s->private;
2045 u64 ucast, mcast, bcast;
2046 int stat = 0, err = 0;
2047 u64 tx_stat, rx_stat;
2050 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2051 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2055 mac_ops = get_mac_ops(cgxd);
2056 /* There can be no CGX devices at all */
2061 seq_puts(s, "\n=======Link Status======\n\n");
2062 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2064 seq_puts(s, "Failed to read link status\n");
2065 seq_printf(s, "\nLink is %s %d Mbps\n\n",
2066 linfo.link_up ? "UP" : "DOWN", linfo.speed);
2069 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2071 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2074 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2077 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2080 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2081 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2084 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2087 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2092 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2094 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2097 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2100 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2103 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2104 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2107 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2112 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2113 while (stat < mac_ops->rx_stats_cnt) {
2114 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2117 if (is_rvu_otx2(rvu))
2118 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2121 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2128 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2129 while (stat < mac_ops->tx_stats_cnt) {
2130 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2134 if (is_rvu_otx2(rvu))
2135 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2138 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2146 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2148 struct dentry *current_dir;
2151 current_dir = filp->file->f_path.dentry->d_parent;
2152 buf = strrchr(current_dir->d_name.name, 'c');
2156 return kstrtoint(buf + 1, 10, lmac_id);
2159 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2163 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2165 return cgx_print_stats(filp, lmac_id);
2170 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2172 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2174 struct pci_dev *pdev = NULL;
2175 void *cgxd = s->private;
2176 char *bcast, *mcast;
2183 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2184 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2188 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2191 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2195 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2196 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2197 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2200 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
2201 seq_printf(s, "%s PF%d %9s %9s",
2202 dev_name(&pdev->dev), pf, bcast, mcast);
2203 if (cfg & CGX_DMAC_CAM_ACCEPT)
2204 seq_printf(s, "%12s\n\n", "UNICAST");
2206 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2208 seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
2210 for (index = 0 ; index < 32 ; index++) {
2211 cfg = cgx_read_dmac_entry(cgxd, index);
2212 /* Display enabled dmac entries associated with current lmac */
2213 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2214 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2215 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2216 u64_to_ether_addr(mac, dmac);
2217 seq_printf(s, "%7d %pM\n", index, dmac);
2224 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2228 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2230 return cgx_print_dmac_flt(filp, lmac_id);
2235 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2237 static void rvu_dbg_cgx_init(struct rvu *rvu)
2239 struct mac_ops *mac_ops;
2240 unsigned long lmac_bmap;
2245 if (!cgx_get_cgxcnt_max())
2248 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2252 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2255 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2256 cgx = rvu_cgx_pdata(i, rvu);
2259 lmac_bmap = cgx_get_lmac_bmap(cgx);
2260 /* cgx debugfs dir */
2261 sprintf(dname, "%s%d", mac_ops->name, i);
2262 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2263 rvu->rvu_dbg.cgx_root);
2265 for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2266 /* lmac debugfs dir */
2267 sprintf(dname, "lmac%d", lmac_id);
2269 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2271 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2272 cgx, &rvu_dbg_cgx_stat_fops);
2273 debugfs_create_file("mac_filter", 0600,
2274 rvu->rvu_dbg.lmac, cgx,
2275 &rvu_dbg_cgx_dmac_flt_fops);
2280 /* NPC debugfs APIs */
2281 static void rvu_print_npc_mcam_info(struct seq_file *s,
2282 u16 pcifunc, int blkaddr)
2284 struct rvu *rvu = s->private;
2285 int entry_acnt, entry_ecnt;
2286 int cntr_acnt, cntr_ecnt;
2288 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2289 &entry_acnt, &entry_ecnt);
2290 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2291 &cntr_acnt, &cntr_ecnt);
2292 if (!entry_acnt && !cntr_acnt)
2295 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2296 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2297 rvu_get_pf(pcifunc));
2299 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2300 rvu_get_pf(pcifunc),
2301 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2304 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2305 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2308 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2309 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2313 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2315 struct rvu *rvu = filp->private;
2316 int pf, vf, numvfs, blkaddr;
2317 struct npc_mcam *mcam;
2318 u16 pcifunc, counters;
2321 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2325 mcam = &rvu->hw->mcam;
2326 counters = rvu->hw->npc_counters;
2328 seq_puts(filp, "\nNPC MCAM info:\n");
2329 /* MCAM keywidth on receive and transmit sides */
2330 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2331 cfg = (cfg >> 32) & 0x07;
2332 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2333 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2334 "224bits" : "448bits"));
2335 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2336 cfg = (cfg >> 32) & 0x07;
2337 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2338 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2339 "224bits" : "448bits"));
2341 mutex_lock(&mcam->lock);
2343 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2344 seq_printf(filp, "\t\t Reserved \t: %d\n",
2345 mcam->total_entries - mcam->bmap_entries);
2346 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2349 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2350 seq_printf(filp, "\t\t Reserved \t: %d\n",
2351 counters - mcam->counters.max);
2352 seq_printf(filp, "\t\t Available \t: %d\n",
2353 rvu_rsrc_free_count(&mcam->counters));
2355 if (mcam->bmap_entries == mcam->bmap_fcnt) {
2356 mutex_unlock(&mcam->lock);
2360 seq_puts(filp, "\n\t\t Current allocation\n");
2361 seq_puts(filp, "\t\t====================\n");
2362 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2363 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2364 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2366 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2367 numvfs = (cfg >> 12) & 0xFF;
2368 for (vf = 0; vf < numvfs; vf++) {
2369 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2370 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2374 mutex_unlock(&mcam->lock);
2378 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2380 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2383 struct rvu *rvu = filp->private;
2384 struct npc_mcam *mcam;
2387 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2391 mcam = &rvu->hw->mcam;
2393 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2394 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2395 rvu_read64(rvu, blkaddr,
2396 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2401 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2403 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2404 struct rvu_npc_mcam_rule *rule)
2408 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2409 seq_printf(s, "\t%s ", npc_get_field_name(bit));
2412 seq_printf(s, "%pM ", rule->packet.dmac);
2413 seq_printf(s, "mask %pM\n", rule->mask.dmac);
2416 seq_printf(s, "%pM ", rule->packet.smac);
2417 seq_printf(s, "mask %pM\n", rule->mask.smac);
2420 seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2421 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2424 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2425 seq_printf(s, "mask 0x%x\n",
2426 ntohs(rule->mask.vlan_tci));
2429 seq_printf(s, "%d ", rule->packet.tos);
2430 seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2433 seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2434 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2437 seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2438 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2441 seq_printf(s, "%pI6 ", rule->packet.ip6src);
2442 seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2445 seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2446 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2450 case NPC_SPORT_SCTP:
2451 seq_printf(s, "%d ", ntohs(rule->packet.sport));
2452 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2456 case NPC_DPORT_SCTP:
2457 seq_printf(s, "%d ", ntohs(rule->packet.dport));
2458 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2467 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2468 struct rvu_npc_mcam_rule *rule)
2470 if (is_npc_intf_tx(rule->intf)) {
2471 switch (rule->tx_action.op) {
2472 case NIX_TX_ACTIONOP_DROP:
2473 seq_puts(s, "\taction: Drop\n");
2475 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2476 seq_puts(s, "\taction: Unicast to default channel\n");
2478 case NIX_TX_ACTIONOP_UCAST_CHAN:
2479 seq_printf(s, "\taction: Unicast to channel %d\n",
2480 rule->tx_action.index);
2482 case NIX_TX_ACTIONOP_MCAST:
2483 seq_puts(s, "\taction: Multicast\n");
2485 case NIX_TX_ACTIONOP_DROP_VIOL:
2486 seq_puts(s, "\taction: Lockdown Violation Drop\n");
2492 switch (rule->rx_action.op) {
2493 case NIX_RX_ACTIONOP_DROP:
2494 seq_puts(s, "\taction: Drop\n");
2496 case NIX_RX_ACTIONOP_UCAST:
2497 seq_printf(s, "\taction: Direct to queue %d\n",
2498 rule->rx_action.index);
2500 case NIX_RX_ACTIONOP_RSS:
2501 seq_puts(s, "\taction: RSS\n");
2503 case NIX_RX_ACTIONOP_UCAST_IPSEC:
2504 seq_puts(s, "\taction: Unicast ipsec\n");
2506 case NIX_RX_ACTIONOP_MCAST:
2507 seq_puts(s, "\taction: Multicast\n");
2515 static const char *rvu_dbg_get_intf_name(int intf)
2518 case NIX_INTFX_RX(0):
2520 case NIX_INTFX_RX(1):
2522 case NIX_INTFX_TX(0):
2524 case NIX_INTFX_TX(1):
2533 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2535 struct rvu_npc_mcam_rule *iter;
2536 struct rvu *rvu = s->private;
2537 struct npc_mcam *mcam;
2544 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2548 mcam = &rvu->hw->mcam;
2550 mutex_lock(&mcam->lock);
2551 list_for_each_entry(iter, &mcam->mcam_rules, list) {
2552 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2553 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2555 if (iter->owner & RVU_PFVF_FUNC_MASK) {
2556 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2557 seq_printf(s, "VF%d", vf);
2561 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2563 seq_printf(s, "\tinterface: %s\n",
2564 rvu_dbg_get_intf_name(iter->intf));
2565 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2567 rvu_dbg_npc_mcam_show_flows(s, iter);
2568 if (is_npc_intf_rx(iter->intf)) {
2569 target = iter->rx_action.pf_func;
2570 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2571 seq_printf(s, "\tForward to: PF%d ", pf);
2573 if (target & RVU_PFVF_FUNC_MASK) {
2574 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2575 seq_printf(s, "VF%d", vf);
2578 seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
2579 seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
2582 rvu_dbg_npc_mcam_show_action(s, iter);
2584 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2585 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2587 if (!iter->has_cntr)
2589 seq_printf(s, "\tcounter: %d\n", iter->cntr);
2591 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2592 seq_printf(s, "\thits: %lld\n", hits);
2594 mutex_unlock(&mcam->lock);
2599 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2601 static void rvu_dbg_npc_init(struct rvu *rvu)
2603 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2605 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2606 &rvu_dbg_npc_mcam_info_fops);
2607 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2608 &rvu_dbg_npc_mcam_rules_fops);
2609 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2610 &rvu_dbg_npc_rx_miss_act_fops);
2613 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2615 struct cpt_ctx *ctx = filp->private;
2616 u64 busy_sts = 0, free_sts = 0;
2617 u32 e_min = 0, e_max = 0, e, i;
2618 u16 max_ses, max_ies, max_aes;
2619 struct rvu *rvu = ctx->rvu;
2620 int blkaddr = ctx->blkaddr;
2623 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2624 max_ses = reg & 0xffff;
2625 max_ies = (reg >> 16) & 0xffff;
2626 max_aes = (reg >> 32) & 0xffff;
2630 e_min = max_ses + max_ies;
2631 e_max = max_ses + max_ies + max_aes;
2639 e_max = max_ses + max_ies;
2645 for (e = e_min, i = 0; e < e_max; e++, i++) {
2646 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2648 busy_sts |= 1ULL << i;
2651 free_sts |= 1ULL << i;
2653 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2654 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2659 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2661 return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2664 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2666 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2668 return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2671 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2673 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2675 return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2678 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2680 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2682 struct cpt_ctx *ctx = filp->private;
2683 u16 max_ses, max_ies, max_aes;
2684 struct rvu *rvu = ctx->rvu;
2685 int blkaddr = ctx->blkaddr;
2689 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2690 max_ses = reg & 0xffff;
2691 max_ies = (reg >> 16) & 0xffff;
2692 max_aes = (reg >> 32) & 0xffff;
2694 e_max = max_ses + max_ies + max_aes;
2696 seq_puts(filp, "===========================================\n");
2697 for (e = 0; e < e_max; e++) {
2698 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2699 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
2701 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2702 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
2704 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2705 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
2707 seq_puts(filp, "===========================================\n");
2712 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2714 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2716 struct cpt_ctx *ctx = filp->private;
2717 int blkaddr = ctx->blkaddr;
2718 struct rvu *rvu = ctx->rvu;
2719 struct rvu_block *block;
2720 struct rvu_hwinfo *hw;
2725 block = &hw->block[blkaddr];
2726 if (!block->lf.bmap)
2729 seq_puts(filp, "===========================================\n");
2730 for (lf = 0; lf < block->lf.max; lf++) {
2731 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2732 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
2733 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2734 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
2735 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2736 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
2737 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2738 (lf << block->lfshift));
2739 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
2740 seq_puts(filp, "===========================================\n");
2745 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2747 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2749 struct cpt_ctx *ctx = filp->private;
2750 struct rvu *rvu = ctx->rvu;
2751 int blkaddr = ctx->blkaddr;
2754 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2755 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2756 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
2757 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2758 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2759 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
2760 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2761 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
2762 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2763 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
2764 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2765 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
2766 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2767 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
2772 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2774 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2776 struct cpt_ctx *ctx = filp->private;
2777 struct rvu *rvu = ctx->rvu;
2778 int blkaddr = ctx->blkaddr;
2781 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2782 seq_printf(filp, "CPT instruction requests %llu\n", reg);
2783 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2784 seq_printf(filp, "CPT instruction latency %llu\n", reg);
2785 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2786 seq_printf(filp, "CPT NCB read requests %llu\n", reg);
2787 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2788 seq_printf(filp, "CPT NCB read latency %llu\n", reg);
2789 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2790 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
2791 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2792 seq_printf(filp, "CPT active cycles pc %llu\n", reg);
2793 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2794 seq_printf(filp, "CPT clock count pc %llu\n", reg);
2799 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2801 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2803 struct cpt_ctx *ctx;
2805 if (!is_block_implemented(rvu->hw, blkaddr))
2808 if (blkaddr == BLKADDR_CPT0) {
2809 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2810 ctx = &rvu->rvu_dbg.cpt_ctx[0];
2811 ctx->blkaddr = BLKADDR_CPT0;
2814 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2816 ctx = &rvu->rvu_dbg.cpt_ctx[1];
2817 ctx->blkaddr = BLKADDR_CPT1;
2821 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2822 &rvu_dbg_cpt_pc_fops);
2823 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2824 &rvu_dbg_cpt_ae_sts_fops);
2825 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2826 &rvu_dbg_cpt_se_sts_fops);
2827 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2828 &rvu_dbg_cpt_ie_sts_fops);
2829 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2830 &rvu_dbg_cpt_engines_info_fops);
2831 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2832 &rvu_dbg_cpt_lfs_info_fops);
2833 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2834 &rvu_dbg_cpt_err_info_fops);
2837 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2839 if (!is_rvu_otx2(rvu))
2845 void rvu_dbg_init(struct rvu *rvu)
2847 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2849 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2850 &rvu_dbg_rsrc_status_fops);
2852 if (!is_rvu_otx2(rvu))
2853 debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
2854 rvu, &rvu_dbg_lmtst_map_table_fops);
2856 if (!cgx_get_cgxcnt_max())
2859 if (is_rvu_otx2(rvu))
2860 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2861 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2863 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2864 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2867 rvu_dbg_npa_init(rvu);
2868 rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2870 rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2871 rvu_dbg_cgx_init(rvu);
2872 rvu_dbg_npc_init(rvu);
2873 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2874 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2877 void rvu_dbg_exit(struct rvu *rvu)
2879 debugfs_remove_recursive(rvu->rvu_dbg.root);
2882 #endif /* CONFIG_DEBUG_FS */