1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
19 #include "lmac_common.h"
21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
23 int type, int chan_id);
24 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
26 static int nix_setup_ipolicers(struct rvu *rvu,
27 struct nix_hw *nix_hw, int blkaddr);
28 static void nix_ipolicer_freemem(struct nix_hw *nix_hw);
29 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
30 struct nix_hw *nix_hw, u16 pcifunc);
31 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
32 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
59 enum nix_makr_fmt_indexes {
60 NIX_MARK_CFG_IP_DSCP_RED,
61 NIX_MARK_CFG_IP_DSCP_YELLOW,
62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
63 NIX_MARK_CFG_IP_ECN_RED,
64 NIX_MARK_CFG_IP_ECN_YELLOW,
65 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
66 NIX_MARK_CFG_VLAN_DEI_RED,
67 NIX_MARK_CFG_VLAN_DEI_YELLOW,
68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
72 /* For now considering MC resources needed for broadcast
73 * pkt replication only. i.e 256 HWVFs + 12 PFs.
75 #define MC_TBL_SIZE MC_TBL_SZ_512
76 #define MC_BUF_CNT MC_BUF_CNT_128
79 struct hlist_node node;
83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
87 /*If blkaddr is 0, return the first nix block address*/
89 return rvu->nix_blkaddr[blkaddr];
91 while (i + 1 < MAX_NIX_BLKS) {
92 if (rvu->nix_blkaddr[i] == blkaddr)
93 return rvu->nix_blkaddr[i + 1];
100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
102 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
106 if (!pfvf->nixlf || blkaddr < 0)
111 int rvu_get_nixlf_count(struct rvu *rvu)
113 int blkaddr = 0, max = 0;
114 struct rvu_block *block;
116 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
118 block = &rvu->hw->block[blkaddr];
119 max += block->lf.max;
120 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
127 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
128 struct rvu_hwinfo *hw = rvu->hw;
131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
132 if (!pfvf->nixlf || blkaddr < 0)
133 return NIX_AF_ERR_AF_LF_INVALID;
135 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
137 return NIX_AF_ERR_AF_LF_INVALID;
140 *nix_blkaddr = blkaddr;
145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
146 struct nix_hw **nix_hw, int *blkaddr)
148 struct rvu_pfvf *pfvf;
150 pfvf = rvu_get_pfvf(rvu, pcifunc);
151 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
152 if (!pfvf->nixlf || *blkaddr < 0)
153 return NIX_AF_ERR_AF_LF_INVALID;
155 *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
157 return NIX_AF_ERR_INVALID_NIXBLK;
161 static void nix_mce_list_init(struct nix_mce_list *list, int max)
163 INIT_HLIST_HEAD(&list->head);
168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
175 idx = mcast->next_free_mce;
176 mcast->next_free_mce += count;
180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
182 int nix_blkaddr = 0, i = 0;
183 struct rvu *rvu = hw->rvu;
185 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
186 while (nix_blkaddr) {
187 if (blkaddr == nix_blkaddr && hw->nix)
189 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
195 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
199 /*Sync all in flight RX packets to LLC/DRAM */
200 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
201 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
203 dev_err(rvu->dev, "NIX RX software sync failed\n");
206 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
207 int lvl, u16 pcifunc, u16 schq)
209 struct rvu_hwinfo *hw = rvu->hw;
210 struct nix_txsch *txsch;
211 struct nix_hw *nix_hw;
214 nix_hw = get_nix_hw(rvu->hw, blkaddr);
218 txsch = &nix_hw->txsch[lvl];
219 /* Check out of bounds */
220 if (schq >= txsch->schq.max)
223 mutex_lock(&rvu->rsrc_lock);
224 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
225 mutex_unlock(&rvu->rsrc_lock);
227 /* TLs aggegating traffic are shared across PF and VFs */
228 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
229 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
235 if (map_func != pcifunc)
241 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
243 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
244 struct mac_ops *mac_ops;
245 int pkind, pf, vf, lbkid;
249 pf = rvu_get_pf(pcifunc);
250 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
254 case NIX_INTF_TYPE_CGX:
255 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
256 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
258 pkind = rvu_npc_get_pkind(rvu, pf);
261 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
264 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
265 pfvf->tx_chan_base = pfvf->rx_chan_base;
266 pfvf->rx_chan_cnt = 1;
267 pfvf->tx_chan_cnt = 1;
268 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
269 rvu_npc_set_pkind(rvu, pkind, pfvf);
271 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
272 /* By default we enable pause frames */
273 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
274 mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
276 lmac_id, true, true);
278 case NIX_INTF_TYPE_LBK:
279 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
281 /* If NIX1 block is present on the silicon then NIXes are
282 * assigned alternatively for lbk interfaces. NIX0 should
283 * send packets on lbk link 1 channels and NIX1 should send
284 * on lbk link 0 channels for the communication between
288 if (rvu->hw->lbk_links > 1)
289 lbkid = vf & 0x1 ? 0 : 1;
291 /* Note that AF's VFs work in pairs and talk over consecutive
292 * loopback channels.Therefore if odd number of AF VFs are
293 * enabled then the last VF remains with no pair.
295 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
296 pfvf->tx_chan_base = vf & 0x1 ?
297 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
298 rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
299 pfvf->rx_chan_cnt = 1;
300 pfvf->tx_chan_cnt = 1;
301 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
307 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
308 * RVU PF/VF's MAC address.
310 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
311 pfvf->rx_chan_base, pfvf->mac_addr);
313 /* Add this PF_FUNC to bcast pkt replication list */
314 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
317 "Bcast list, failed to enable PF_FUNC 0x%x\n",
321 /* Install MCAM rule matching Ethernet broadcast mac address */
322 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
323 nixlf, pfvf->rx_chan_base);
325 pfvf->maxlen = NIC_HW_MIN_FRS;
326 pfvf->minlen = NIC_HW_MIN_FRS;
331 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
333 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
339 /* Remove this PF_FUNC from bcast pkt replication list */
340 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
343 "Bcast list, failed to disable PF_FUNC 0x%x\n",
347 /* Free and disable any MCAM entries used by this NIX LF */
348 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
351 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
352 struct nix_bp_cfg_req *req,
355 u16 pcifunc = req->hdr.pcifunc;
356 struct rvu_pfvf *pfvf;
357 int blkaddr, pf, type;
361 pf = rvu_get_pf(pcifunc);
362 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
363 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
366 pfvf = rvu_get_pfvf(rvu, pcifunc);
367 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
369 chan_base = pfvf->rx_chan_base + req->chan_base;
370 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
371 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
372 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
378 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
379 int type, int chan_id)
381 int bpid, blkaddr, lmac_chan_cnt;
382 struct rvu_hwinfo *hw = rvu->hw;
383 u16 cgx_bpid_cnt, lbk_bpid_cnt;
384 struct rvu_pfvf *pfvf;
388 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
389 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
390 lmac_chan_cnt = cfg & 0xFF;
392 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
393 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
395 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
397 /* Backpressure IDs range division
398 * CGX channles are mapped to (0 - 191) BPIDs
399 * LBK channles are mapped to (192 - 255) BPIDs
400 * SDP channles are mapped to (256 - 511) BPIDs
402 * Lmac channles and bpids mapped as follows
403 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
404 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
405 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
408 case NIX_INTF_TYPE_CGX:
409 if ((req->chan_base + req->chan_cnt) > 15)
411 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
412 /* Assign bpid based on cgx, lmac and chan id */
413 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
414 (lmac_id * lmac_chan_cnt) + req->chan_base;
416 if (req->bpid_per_chan)
418 if (bpid > cgx_bpid_cnt)
422 case NIX_INTF_TYPE_LBK:
423 if ((req->chan_base + req->chan_cnt) > 63)
425 bpid = cgx_bpid_cnt + req->chan_base;
426 if (req->bpid_per_chan)
428 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
437 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
438 struct nix_bp_cfg_req *req,
439 struct nix_bp_cfg_rsp *rsp)
441 int blkaddr, pf, type, chan_id = 0;
442 u16 pcifunc = req->hdr.pcifunc;
443 struct rvu_pfvf *pfvf;
448 pf = rvu_get_pf(pcifunc);
449 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
451 /* Enable backpressure only for CGX mapped PFs and LBK interface */
452 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
455 pfvf = rvu_get_pfvf(rvu, pcifunc);
456 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
458 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
459 chan_base = pfvf->rx_chan_base + req->chan_base;
462 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
464 dev_warn(rvu->dev, "Fail to enable backpressure\n");
468 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
469 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
470 cfg | (bpid & 0xFF) | BIT_ULL(16));
472 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
475 for (chan = 0; chan < req->chan_cnt; chan++) {
476 /* Map channel and bpid assign to it */
477 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
479 if (req->bpid_per_chan)
482 rsp->chan_cnt = req->chan_cnt;
487 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
488 u64 format, bool v4, u64 *fidx)
490 struct nix_lso_format field = {0};
492 /* IP's Length field */
493 field.layer = NIX_TXLAYER_OL3;
494 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
495 field.offset = v4 ? 2 : 4;
496 field.sizem1 = 1; /* i.e 2 bytes */
497 field.alg = NIX_LSOALG_ADD_PAYLEN;
498 rvu_write64(rvu, blkaddr,
499 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
502 /* No ID field in IPv6 header */
507 field.layer = NIX_TXLAYER_OL3;
509 field.sizem1 = 1; /* i.e 2 bytes */
510 field.alg = NIX_LSOALG_ADD_SEGNUM;
511 rvu_write64(rvu, blkaddr,
512 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
516 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
517 u64 format, u64 *fidx)
519 struct nix_lso_format field = {0};
521 /* TCP's sequence number field */
522 field.layer = NIX_TXLAYER_OL4;
524 field.sizem1 = 3; /* i.e 4 bytes */
525 field.alg = NIX_LSOALG_ADD_OFFSET;
526 rvu_write64(rvu, blkaddr,
527 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
530 /* TCP's flags field */
531 field.layer = NIX_TXLAYER_OL4;
533 field.sizem1 = 1; /* 2 bytes */
534 field.alg = NIX_LSOALG_TCP_FLAGS;
535 rvu_write64(rvu, blkaddr,
536 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
540 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
542 u64 cfg, idx, fidx = 0;
544 /* Get max HW supported format indices */
545 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
546 nix_hw->lso.total = cfg;
549 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
550 /* For TSO, set first and middle segment flags to
551 * mask out PSH, RST & FIN flags in TCP packet
553 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
554 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
555 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
557 /* Setup default static LSO formats
559 * Configure format fields for TCPv4 segmentation offload
561 idx = NIX_LSO_FORMAT_IDX_TSOV4;
562 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
563 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
565 /* Set rest of the fields to NOP */
566 for (; fidx < 8; fidx++) {
567 rvu_write64(rvu, blkaddr,
568 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
570 nix_hw->lso.in_use++;
572 /* Configure format fields for TCPv6 segmentation offload */
573 idx = NIX_LSO_FORMAT_IDX_TSOV6;
575 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
576 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
578 /* Set rest of the fields to NOP */
579 for (; fidx < 8; fidx++) {
580 rvu_write64(rvu, blkaddr,
581 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
583 nix_hw->lso.in_use++;
586 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
588 kfree(pfvf->rq_bmap);
589 kfree(pfvf->sq_bmap);
590 kfree(pfvf->cq_bmap);
592 qmem_free(rvu->dev, pfvf->rq_ctx);
594 qmem_free(rvu->dev, pfvf->sq_ctx);
596 qmem_free(rvu->dev, pfvf->cq_ctx);
598 qmem_free(rvu->dev, pfvf->rss_ctx);
599 if (pfvf->nix_qints_ctx)
600 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
601 if (pfvf->cq_ints_ctx)
602 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
604 pfvf->rq_bmap = NULL;
605 pfvf->cq_bmap = NULL;
606 pfvf->sq_bmap = NULL;
610 pfvf->rss_ctx = NULL;
611 pfvf->nix_qints_ctx = NULL;
612 pfvf->cq_ints_ctx = NULL;
615 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
616 struct rvu_pfvf *pfvf, int nixlf,
617 int rss_sz, int rss_grps, int hwctx_size,
620 int err, grp, num_indices;
622 /* RSS is not requested for this NIXLF */
625 num_indices = rss_sz * rss_grps;
627 /* Alloc NIX RSS HW context memory and config the base */
628 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
632 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
633 (u64)pfvf->rss_ctx->iova);
635 /* Config full RSS table size, enable RSS and caching */
636 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
637 BIT_ULL(36) | BIT_ULL(4) |
638 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
640 /* Config RSS group offset and sizes */
641 for (grp = 0; grp < rss_grps; grp++)
642 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
643 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
647 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
648 struct nix_aq_inst_s *inst)
650 struct admin_queue *aq = block->aq;
651 struct nix_aq_res_s *result;
655 result = (struct nix_aq_res_s *)aq->res->base;
657 /* Get current head pointer where to append this instruction */
658 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
659 head = (reg >> 4) & AQ_PTR_MASK;
661 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
662 (void *)inst, aq->inst->entry_sz);
663 memset(result, 0, sizeof(*result));
664 /* sync into memory */
667 /* Ring the doorbell and wait for result */
668 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
669 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
677 if (result->compcode != NIX_AQ_COMP_GOOD)
678 /* TODO: Replace this with some error code */
684 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
685 struct nix_aq_enq_req *req,
686 struct nix_aq_enq_rsp *rsp)
688 struct rvu_hwinfo *hw = rvu->hw;
689 u16 pcifunc = req->hdr.pcifunc;
690 int nixlf, blkaddr, rc = 0;
691 struct nix_aq_inst_s inst;
692 struct rvu_block *block;
693 struct admin_queue *aq;
694 struct rvu_pfvf *pfvf;
699 blkaddr = nix_hw->blkaddr;
700 block = &hw->block[blkaddr];
703 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
704 return NIX_AF_ERR_AQ_ENQUEUE;
707 pfvf = rvu_get_pfvf(rvu, pcifunc);
708 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
710 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
711 * operations done by AF itself.
713 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
714 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
715 if (!pfvf->nixlf || nixlf < 0)
716 return NIX_AF_ERR_AF_LF_INVALID;
719 switch (req->ctype) {
720 case NIX_AQ_CTYPE_RQ:
721 /* Check if index exceeds max no of queues */
722 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
723 rc = NIX_AF_ERR_AQ_ENQUEUE;
725 case NIX_AQ_CTYPE_SQ:
726 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
727 rc = NIX_AF_ERR_AQ_ENQUEUE;
729 case NIX_AQ_CTYPE_CQ:
730 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
731 rc = NIX_AF_ERR_AQ_ENQUEUE;
733 case NIX_AQ_CTYPE_RSS:
734 /* Check if RSS is enabled and qidx is within range */
735 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
736 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
737 (req->qidx >= (256UL << (cfg & 0xF))))
738 rc = NIX_AF_ERR_AQ_ENQUEUE;
740 case NIX_AQ_CTYPE_MCE:
741 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
743 /* Check if index exceeds MCE list length */
744 if (!nix_hw->mcast.mce_ctx ||
745 (req->qidx >= (256UL << (cfg & 0xF))))
746 rc = NIX_AF_ERR_AQ_ENQUEUE;
748 /* Adding multicast lists for requests from PF/VFs is not
749 * yet supported, so ignore this.
752 rc = NIX_AF_ERR_AQ_ENQUEUE;
754 case NIX_AQ_CTYPE_BANDPROF:
755 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
757 rc = NIX_AF_ERR_INVALID_BANDPROF;
760 rc = NIX_AF_ERR_AQ_ENQUEUE;
766 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
767 if (req->ctype == NIX_AQ_CTYPE_SQ &&
768 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
769 (req->op == NIX_AQ_INSTOP_WRITE &&
770 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
771 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
772 pcifunc, req->sq.smq))
773 return NIX_AF_ERR_AQ_ENQUEUE;
776 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
778 inst.cindex = req->qidx;
779 inst.ctype = req->ctype;
781 /* Currently we are not supporting enqueuing multiple instructions,
782 * so always choose first entry in result memory.
784 inst.res_addr = (u64)aq->res->iova;
786 /* Hardware uses same aq->res->base for updating result of
787 * previous instruction hence wait here till it is done.
789 spin_lock(&aq->lock);
791 /* Clean result + context memory */
792 memset(aq->res->base, 0, aq->res->entry_sz);
793 /* Context needs to be written at RES_ADDR + 128 */
794 ctx = aq->res->base + 128;
795 /* Mask needs to be written at RES_ADDR + 256 */
796 mask = aq->res->base + 256;
799 case NIX_AQ_INSTOP_WRITE:
800 if (req->ctype == NIX_AQ_CTYPE_RQ)
801 memcpy(mask, &req->rq_mask,
802 sizeof(struct nix_rq_ctx_s));
803 else if (req->ctype == NIX_AQ_CTYPE_SQ)
804 memcpy(mask, &req->sq_mask,
805 sizeof(struct nix_sq_ctx_s));
806 else if (req->ctype == NIX_AQ_CTYPE_CQ)
807 memcpy(mask, &req->cq_mask,
808 sizeof(struct nix_cq_ctx_s));
809 else if (req->ctype == NIX_AQ_CTYPE_RSS)
810 memcpy(mask, &req->rss_mask,
811 sizeof(struct nix_rsse_s));
812 else if (req->ctype == NIX_AQ_CTYPE_MCE)
813 memcpy(mask, &req->mce_mask,
814 sizeof(struct nix_rx_mce_s));
815 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
816 memcpy(mask, &req->prof_mask,
817 sizeof(struct nix_bandprof_s));
819 case NIX_AQ_INSTOP_INIT:
820 if (req->ctype == NIX_AQ_CTYPE_RQ)
821 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
822 else if (req->ctype == NIX_AQ_CTYPE_SQ)
823 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
824 else if (req->ctype == NIX_AQ_CTYPE_CQ)
825 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
826 else if (req->ctype == NIX_AQ_CTYPE_RSS)
827 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
828 else if (req->ctype == NIX_AQ_CTYPE_MCE)
829 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
830 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
831 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
833 case NIX_AQ_INSTOP_NOP:
834 case NIX_AQ_INSTOP_READ:
835 case NIX_AQ_INSTOP_LOCK:
836 case NIX_AQ_INSTOP_UNLOCK:
839 rc = NIX_AF_ERR_AQ_ENQUEUE;
840 spin_unlock(&aq->lock);
844 /* Submit the instruction to AQ */
845 rc = nix_aq_enqueue_wait(rvu, block, &inst);
847 spin_unlock(&aq->lock);
851 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
852 if (req->op == NIX_AQ_INSTOP_INIT) {
853 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
854 __set_bit(req->qidx, pfvf->rq_bmap);
855 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
856 __set_bit(req->qidx, pfvf->sq_bmap);
857 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
858 __set_bit(req->qidx, pfvf->cq_bmap);
861 if (req->op == NIX_AQ_INSTOP_WRITE) {
862 if (req->ctype == NIX_AQ_CTYPE_RQ) {
863 ena = (req->rq.ena & req->rq_mask.ena) |
864 (test_bit(req->qidx, pfvf->rq_bmap) &
867 __set_bit(req->qidx, pfvf->rq_bmap);
869 __clear_bit(req->qidx, pfvf->rq_bmap);
871 if (req->ctype == NIX_AQ_CTYPE_SQ) {
872 ena = (req->rq.ena & req->sq_mask.ena) |
873 (test_bit(req->qidx, pfvf->sq_bmap) &
876 __set_bit(req->qidx, pfvf->sq_bmap);
878 __clear_bit(req->qidx, pfvf->sq_bmap);
880 if (req->ctype == NIX_AQ_CTYPE_CQ) {
881 ena = (req->rq.ena & req->cq_mask.ena) |
882 (test_bit(req->qidx, pfvf->cq_bmap) &
885 __set_bit(req->qidx, pfvf->cq_bmap);
887 __clear_bit(req->qidx, pfvf->cq_bmap);
892 /* Copy read context into mailbox */
893 if (req->op == NIX_AQ_INSTOP_READ) {
894 if (req->ctype == NIX_AQ_CTYPE_RQ)
895 memcpy(&rsp->rq, ctx,
896 sizeof(struct nix_rq_ctx_s));
897 else if (req->ctype == NIX_AQ_CTYPE_SQ)
898 memcpy(&rsp->sq, ctx,
899 sizeof(struct nix_sq_ctx_s));
900 else if (req->ctype == NIX_AQ_CTYPE_CQ)
901 memcpy(&rsp->cq, ctx,
902 sizeof(struct nix_cq_ctx_s));
903 else if (req->ctype == NIX_AQ_CTYPE_RSS)
904 memcpy(&rsp->rss, ctx,
905 sizeof(struct nix_rsse_s));
906 else if (req->ctype == NIX_AQ_CTYPE_MCE)
907 memcpy(&rsp->mce, ctx,
908 sizeof(struct nix_rx_mce_s));
909 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
910 memcpy(&rsp->prof, ctx,
911 sizeof(struct nix_bandprof_s));
915 spin_unlock(&aq->lock);
919 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
920 struct nix_aq_enq_rsp *rsp)
922 struct nix_hw *nix_hw;
925 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
927 return NIX_AF_ERR_AF_LF_INVALID;
929 nix_hw = get_nix_hw(rvu->hw, blkaddr);
933 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
936 static const char *nix_get_ctx_name(int ctype)
939 case NIX_AQ_CTYPE_CQ:
941 case NIX_AQ_CTYPE_SQ:
943 case NIX_AQ_CTYPE_RQ:
945 case NIX_AQ_CTYPE_RSS:
951 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
953 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
954 struct nix_aq_enq_req aq_req;
959 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
960 return NIX_AF_ERR_AQ_ENQUEUE;
962 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
963 aq_req.hdr.pcifunc = req->hdr.pcifunc;
965 if (req->ctype == NIX_AQ_CTYPE_CQ) {
967 aq_req.cq_mask.ena = 1;
968 aq_req.cq.bp_ena = 0;
969 aq_req.cq_mask.bp_ena = 1;
970 q_cnt = pfvf->cq_ctx->qsize;
971 bmap = pfvf->cq_bmap;
973 if (req->ctype == NIX_AQ_CTYPE_SQ) {
975 aq_req.sq_mask.ena = 1;
976 q_cnt = pfvf->sq_ctx->qsize;
977 bmap = pfvf->sq_bmap;
979 if (req->ctype == NIX_AQ_CTYPE_RQ) {
981 aq_req.rq_mask.ena = 1;
982 q_cnt = pfvf->rq_ctx->qsize;
983 bmap = pfvf->rq_bmap;
986 aq_req.ctype = req->ctype;
987 aq_req.op = NIX_AQ_INSTOP_WRITE;
989 for (qidx = 0; qidx < q_cnt; qidx++) {
990 if (!test_bit(qidx, bmap))
993 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
996 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
997 nix_get_ctx_name(req->ctype), qidx);
1004 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1005 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1007 struct nix_aq_enq_req lock_ctx_req;
1010 if (req->op != NIX_AQ_INSTOP_INIT)
1013 if (req->ctype == NIX_AQ_CTYPE_MCE ||
1014 req->ctype == NIX_AQ_CTYPE_DYNO)
1017 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1018 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1019 lock_ctx_req.ctype = req->ctype;
1020 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1021 lock_ctx_req.qidx = req->qidx;
1022 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1025 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1027 nix_get_ctx_name(req->ctype), req->qidx);
1031 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1032 struct nix_aq_enq_req *req,
1033 struct nix_aq_enq_rsp *rsp)
1037 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1039 err = nix_lf_hwctx_lockdown(rvu, req);
1044 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1045 struct nix_aq_enq_req *req,
1046 struct nix_aq_enq_rsp *rsp)
1048 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1051 /* CN10K mbox handler */
1052 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1053 struct nix_cn10k_aq_enq_req *req,
1054 struct nix_cn10k_aq_enq_rsp *rsp)
1056 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1057 (struct nix_aq_enq_rsp *)rsp);
1060 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1061 struct hwctx_disable_req *req,
1062 struct msg_rsp *rsp)
1064 return nix_lf_hwctx_disable(rvu, req);
1067 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1068 struct nix_lf_alloc_req *req,
1069 struct nix_lf_alloc_rsp *rsp)
1071 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1072 struct rvu_hwinfo *hw = rvu->hw;
1073 u16 pcifunc = req->hdr.pcifunc;
1074 struct rvu_block *block;
1075 struct rvu_pfvf *pfvf;
1079 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1080 return NIX_AF_ERR_PARAM;
1083 req->way_mask &= 0xFFFF;
1085 pfvf = rvu_get_pfvf(rvu, pcifunc);
1086 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1087 if (!pfvf->nixlf || blkaddr < 0)
1088 return NIX_AF_ERR_AF_LF_INVALID;
1090 block = &hw->block[blkaddr];
1091 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1093 return NIX_AF_ERR_AF_LF_INVALID;
1095 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1096 if (req->npa_func) {
1097 /* If default, use 'this' NIXLF's PFFUNC */
1098 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1099 req->npa_func = pcifunc;
1100 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1101 return NIX_AF_INVAL_NPA_PF_FUNC;
1104 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1105 if (req->sso_func) {
1106 /* If default, use 'this' NIXLF's PFFUNC */
1107 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1108 req->sso_func = pcifunc;
1109 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1110 return NIX_AF_INVAL_SSO_PF_FUNC;
1113 /* If RSS is being enabled, check if requested config is valid.
1114 * RSS table size should be power of two, otherwise
1115 * RSS_GRP::OFFSET + adder might go beyond that group or
1116 * won't be able to use entire table.
1118 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1119 !is_power_of_2(req->rss_sz)))
1120 return NIX_AF_ERR_RSS_SIZE_INVALID;
1123 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1124 return NIX_AF_ERR_RSS_GRPS_INVALID;
1126 /* Reset this NIX LF */
1127 err = rvu_lf_reset(rvu, block, nixlf);
1129 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1130 block->addr - BLKADDR_NIX0, nixlf);
1131 return NIX_AF_ERR_LF_RESET;
1134 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1136 /* Alloc NIX RQ HW context memory and config the base */
1137 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1138 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1142 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1146 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1147 (u64)pfvf->rq_ctx->iova);
1149 /* Set caching and queue count in HW */
1150 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1151 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1153 /* Alloc NIX SQ HW context memory and config the base */
1154 hwctx_size = 1UL << (ctx_cfg & 0xF);
1155 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1159 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1163 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1164 (u64)pfvf->sq_ctx->iova);
1166 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1167 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1169 /* Alloc NIX CQ HW context memory and config the base */
1170 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1171 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1175 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1179 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1180 (u64)pfvf->cq_ctx->iova);
1182 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1183 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1185 /* Initialize receive side scaling (RSS) */
1186 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1187 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1188 req->rss_grps, hwctx_size, req->way_mask);
1192 /* Alloc memory for CQINT's HW contexts */
1193 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1194 qints = (cfg >> 24) & 0xFFF;
1195 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1196 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1200 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1201 (u64)pfvf->cq_ints_ctx->iova);
1203 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1204 BIT_ULL(36) | req->way_mask << 20);
1206 /* Alloc memory for QINT's HW contexts */
1207 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1208 qints = (cfg >> 12) & 0xFFF;
1209 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1210 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1214 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1215 (u64)pfvf->nix_qints_ctx->iova);
1216 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1217 BIT_ULL(36) | req->way_mask << 20);
1219 /* Setup VLANX TPID's.
1220 * Use VLAN1 for 802.1Q
1221 * and VLAN0 for 802.1AD.
1223 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1224 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1226 /* Enable LMTST for this NIX LF */
1227 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1229 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1231 cfg = req->npa_func;
1233 cfg |= (u64)req->sso_func << 16;
1235 cfg |= (u64)req->xqe_sz << 33;
1236 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1238 /* Config Rx pkt length, csum checks and apad enable / disable */
1239 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1241 /* Configure pkind for TX parse config */
1242 cfg = NPC_TX_DEF_PKIND;
1243 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1245 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1246 err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1250 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1251 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1253 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1254 rvu_write64(rvu, blkaddr,
1255 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1256 VTAGSIZE_T4 | VTAG_STRIP);
1261 nix_ctx_free(rvu, pfvf);
1265 /* Set macaddr of this PF/VF */
1266 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1268 /* set SQB size info */
1269 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1270 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1271 rsp->rx_chan_base = pfvf->rx_chan_base;
1272 rsp->tx_chan_base = pfvf->tx_chan_base;
1273 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1274 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1275 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1276 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1277 /* Get HW supported stat count */
1278 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1279 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1280 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1281 /* Get count of CQ IRQs and error IRQs supported per LF */
1282 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1283 rsp->qints = ((cfg >> 12) & 0xFFF);
1284 rsp->cints = ((cfg >> 24) & 0xFFF);
1285 rsp->cgx_links = hw->cgx_links;
1286 rsp->lbk_links = hw->lbk_links;
1287 rsp->sdp_links = hw->sdp_links;
1292 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1293 struct msg_rsp *rsp)
1295 struct rvu_hwinfo *hw = rvu->hw;
1296 u16 pcifunc = req->hdr.pcifunc;
1297 struct rvu_block *block;
1298 int blkaddr, nixlf, err;
1299 struct rvu_pfvf *pfvf;
1301 pfvf = rvu_get_pfvf(rvu, pcifunc);
1302 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1303 if (!pfvf->nixlf || blkaddr < 0)
1304 return NIX_AF_ERR_AF_LF_INVALID;
1306 block = &hw->block[blkaddr];
1307 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1309 return NIX_AF_ERR_AF_LF_INVALID;
1311 if (req->flags & NIX_LF_DISABLE_FLOWS)
1312 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1314 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1316 /* Free any tx vtag def entries used by this NIX LF */
1317 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1318 nix_free_tx_vtag_entries(rvu, pcifunc);
1320 nix_interface_deinit(rvu, pcifunc, nixlf);
1322 /* Reset this NIX LF */
1323 err = rvu_lf_reset(rvu, block, nixlf);
1325 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1326 block->addr - BLKADDR_NIX0, nixlf);
1327 return NIX_AF_ERR_LF_RESET;
1330 nix_ctx_free(rvu, pfvf);
1335 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1336 struct nix_mark_format_cfg *req,
1337 struct nix_mark_format_cfg_rsp *rsp)
1339 u16 pcifunc = req->hdr.pcifunc;
1340 struct nix_hw *nix_hw;
1341 struct rvu_pfvf *pfvf;
1345 pfvf = rvu_get_pfvf(rvu, pcifunc);
1346 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1347 if (!pfvf->nixlf || blkaddr < 0)
1348 return NIX_AF_ERR_AF_LF_INVALID;
1350 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1354 cfg = (((u32)req->offset & 0x7) << 16) |
1355 (((u32)req->y_mask & 0xF) << 12) |
1356 (((u32)req->y_val & 0xF) << 8) |
1357 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1359 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1361 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1362 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1363 return NIX_AF_ERR_MARK_CFG_FAIL;
1366 rsp->mark_format_idx = rc;
1370 /* Disable shaping of pkts by a scheduler queue
1371 * at a given scheduler level.
1373 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1376 u64 cir_reg = 0, pir_reg = 0;
1380 case NIX_TXSCH_LVL_TL1:
1381 cir_reg = NIX_AF_TL1X_CIR(schq);
1382 pir_reg = 0; /* PIR not available at TL1 */
1384 case NIX_TXSCH_LVL_TL2:
1385 cir_reg = NIX_AF_TL2X_CIR(schq);
1386 pir_reg = NIX_AF_TL2X_PIR(schq);
1388 case NIX_TXSCH_LVL_TL3:
1389 cir_reg = NIX_AF_TL3X_CIR(schq);
1390 pir_reg = NIX_AF_TL3X_PIR(schq);
1392 case NIX_TXSCH_LVL_TL4:
1393 cir_reg = NIX_AF_TL4X_CIR(schq);
1394 pir_reg = NIX_AF_TL4X_PIR(schq);
1400 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1401 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1405 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1406 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1409 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1412 struct rvu_hwinfo *hw = rvu->hw;
1415 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1418 /* Reset TL4's SDP link config */
1419 if (lvl == NIX_TXSCH_LVL_TL4)
1420 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1422 if (lvl != NIX_TXSCH_LVL_TL2)
1425 /* Reset TL2's CGX or LBK link config */
1426 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1427 rvu_write64(rvu, blkaddr,
1428 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1431 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1433 struct rvu_hwinfo *hw = rvu->hw;
1434 int pf = rvu_get_pf(pcifunc);
1435 u8 cgx_id = 0, lmac_id = 0;
1437 if (is_afvf(pcifunc)) {/* LBK links */
1438 return hw->cgx_links;
1439 } else if (is_pf_cgxmapped(rvu, pf)) {
1440 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1441 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1445 return hw->cgx_links + hw->lbk_links;
1448 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1449 int link, int *start, int *end)
1451 struct rvu_hwinfo *hw = rvu->hw;
1452 int pf = rvu_get_pf(pcifunc);
1454 if (is_afvf(pcifunc)) { /* LBK links */
1455 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1456 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1457 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1458 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1459 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1460 } else { /* SDP link */
1461 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1462 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1463 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1467 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1468 struct nix_hw *nix_hw,
1469 struct nix_txsch_alloc_req *req)
1471 struct rvu_hwinfo *hw = rvu->hw;
1472 int schq, req_schq, free_cnt;
1473 struct nix_txsch *txsch;
1474 int link, start, end;
1476 txsch = &nix_hw->txsch[lvl];
1477 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1482 link = nix_get_tx_link(rvu, pcifunc);
1484 /* For traffic aggregating scheduler level, one queue is enough */
1485 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1487 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1491 /* Get free SCHQ count and check if request can be accomodated */
1492 if (hw->cap.nix_fixed_txschq_mapping) {
1493 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1494 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1495 if (end <= txsch->schq.max && schq < end &&
1496 !test_bit(schq, txsch->schq.bmap))
1501 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1504 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1505 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1507 /* If contiguous queues are needed, check for availability */
1508 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1509 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1510 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1515 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1516 struct nix_txsch_alloc_rsp *rsp,
1517 int lvl, int start, int end)
1519 struct rvu_hwinfo *hw = rvu->hw;
1520 u16 pcifunc = rsp->hdr.pcifunc;
1523 /* For traffic aggregating levels, queue alloc is based
1524 * on transmit link to which PF_FUNC is mapped to.
1526 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1527 /* A single TL queue is allocated */
1528 if (rsp->schq_contig[lvl]) {
1529 rsp->schq_contig[lvl] = 1;
1530 rsp->schq_contig_list[lvl][0] = start;
1533 /* Both contig and non-contig reqs doesn't make sense here */
1534 if (rsp->schq_contig[lvl])
1537 if (rsp->schq[lvl]) {
1539 rsp->schq_list[lvl][0] = start;
1544 /* Adjust the queue request count if HW supports
1545 * only one queue per level configuration.
1547 if (hw->cap.nix_fixed_txschq_mapping) {
1548 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1550 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1551 rsp->schq_contig[lvl] = 0;
1556 if (rsp->schq_contig[lvl]) {
1557 rsp->schq_contig[lvl] = 1;
1558 set_bit(schq, txsch->schq.bmap);
1559 rsp->schq_contig_list[lvl][0] = schq;
1561 } else if (rsp->schq[lvl]) {
1563 set_bit(schq, txsch->schq.bmap);
1564 rsp->schq_list[lvl][0] = schq;
1569 /* Allocate contiguous queue indices requesty first */
1570 if (rsp->schq_contig[lvl]) {
1571 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1572 txsch->schq.max, start,
1573 rsp->schq_contig[lvl], 0);
1575 rsp->schq_contig[lvl] = 0;
1576 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1577 set_bit(schq, txsch->schq.bmap);
1578 rsp->schq_contig_list[lvl][idx] = schq;
1583 /* Allocate non-contiguous queue indices */
1584 if (rsp->schq[lvl]) {
1586 for (schq = start; schq < end; schq++) {
1587 if (!test_bit(schq, txsch->schq.bmap)) {
1588 set_bit(schq, txsch->schq.bmap);
1589 rsp->schq_list[lvl][idx++] = schq;
1591 if (idx == rsp->schq[lvl])
1594 /* Update how many were allocated */
1595 rsp->schq[lvl] = idx;
1599 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1600 struct nix_txsch_alloc_req *req,
1601 struct nix_txsch_alloc_rsp *rsp)
1603 struct rvu_hwinfo *hw = rvu->hw;
1604 u16 pcifunc = req->hdr.pcifunc;
1605 int link, blkaddr, rc = 0;
1606 int lvl, idx, start, end;
1607 struct nix_txsch *txsch;
1608 struct rvu_pfvf *pfvf;
1609 struct nix_hw *nix_hw;
1613 pfvf = rvu_get_pfvf(rvu, pcifunc);
1614 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1615 if (!pfvf->nixlf || blkaddr < 0)
1616 return NIX_AF_ERR_AF_LF_INVALID;
1618 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1622 mutex_lock(&rvu->rsrc_lock);
1624 /* Check if request is valid as per HW capabilities
1625 * and can be accomodated.
1627 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1628 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1633 /* Allocate requested Tx scheduler queues */
1634 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1635 txsch = &nix_hw->txsch[lvl];
1636 pfvf_map = txsch->pfvf_map;
1638 if (!req->schq[lvl] && !req->schq_contig[lvl])
1641 rsp->schq[lvl] = req->schq[lvl];
1642 rsp->schq_contig[lvl] = req->schq_contig[lvl];
1644 link = nix_get_tx_link(rvu, pcifunc);
1646 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1649 } else if (hw->cap.nix_fixed_txschq_mapping) {
1650 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1653 end = txsch->schq.max;
1656 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1658 /* Reset queue config */
1659 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1660 schq = rsp->schq_contig_list[lvl][idx];
1661 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1662 NIX_TXSCHQ_CFG_DONE))
1663 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1664 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1665 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1668 for (idx = 0; idx < req->schq[lvl]; idx++) {
1669 schq = rsp->schq_list[lvl][idx];
1670 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1671 NIX_TXSCHQ_CFG_DONE))
1672 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1673 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1674 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1678 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1679 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1680 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1681 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1682 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1685 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1687 mutex_unlock(&rvu->rsrc_lock);
1691 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1692 int smq, u16 pcifunc, int nixlf)
1694 int pf = rvu_get_pf(pcifunc);
1695 u8 cgx_id = 0, lmac_id = 0;
1696 int err, restore_tx_en = 0;
1699 /* enable cgx tx if disabled */
1700 if (is_pf_cgxmapped(rvu, pf)) {
1701 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1702 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1706 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1707 /* Do SMQ flush and set enqueue xoff */
1708 cfg |= BIT_ULL(50) | BIT_ULL(49);
1709 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1711 /* Disable backpressure from physical link,
1712 * otherwise SMQ flush may stall.
1714 rvu_cgx_enadis_rx_bp(rvu, pf, false);
1716 /* Wait for flush to complete */
1717 err = rvu_poll_reg(rvu, blkaddr,
1718 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1721 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1723 rvu_cgx_enadis_rx_bp(rvu, pf, true);
1724 /* restore cgx tx state */
1726 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1729 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1731 int blkaddr, nixlf, lvl, schq, err;
1732 struct rvu_hwinfo *hw = rvu->hw;
1733 struct nix_txsch *txsch;
1734 struct nix_hw *nix_hw;
1736 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1738 return NIX_AF_ERR_AF_LF_INVALID;
1740 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1744 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1746 return NIX_AF_ERR_AF_LF_INVALID;
1748 /* Disable TL2/3 queue links before SMQ flush*/
1749 mutex_lock(&rvu->rsrc_lock);
1750 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1751 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1754 txsch = &nix_hw->txsch[lvl];
1755 for (schq = 0; schq < txsch->schq.max; schq++) {
1756 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1758 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1763 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1764 for (schq = 0; schq < txsch->schq.max; schq++) {
1765 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1767 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1770 /* Now free scheduler queues to free pool */
1771 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1772 /* TLs above aggregation level are shared across all PF
1773 * and it's VFs, hence skip freeing them.
1775 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1778 txsch = &nix_hw->txsch[lvl];
1779 for (schq = 0; schq < txsch->schq.max; schq++) {
1780 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1782 rvu_free_rsrc(&txsch->schq, schq);
1783 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1786 mutex_unlock(&rvu->rsrc_lock);
1788 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1789 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1790 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1792 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1797 static int nix_txschq_free_one(struct rvu *rvu,
1798 struct nix_txsch_free_req *req)
1800 struct rvu_hwinfo *hw = rvu->hw;
1801 u16 pcifunc = req->hdr.pcifunc;
1802 int lvl, schq, nixlf, blkaddr;
1803 struct nix_txsch *txsch;
1804 struct nix_hw *nix_hw;
1807 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1809 return NIX_AF_ERR_AF_LF_INVALID;
1811 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1815 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1817 return NIX_AF_ERR_AF_LF_INVALID;
1819 lvl = req->schq_lvl;
1821 txsch = &nix_hw->txsch[lvl];
1823 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1826 pfvf_map = txsch->pfvf_map;
1827 mutex_lock(&rvu->rsrc_lock);
1829 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1830 mutex_unlock(&rvu->rsrc_lock);
1834 /* Flush if it is a SMQ. Onus of disabling
1835 * TL2/3 queue links before SMQ flush is on user
1837 if (lvl == NIX_TXSCH_LVL_SMQ)
1838 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1840 /* Free the resource */
1841 rvu_free_rsrc(&txsch->schq, schq);
1842 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1843 mutex_unlock(&rvu->rsrc_lock);
1846 return NIX_AF_ERR_TLX_INVALID;
1849 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1850 struct nix_txsch_free_req *req,
1851 struct msg_rsp *rsp)
1853 if (req->flags & TXSCHQ_FREE_ALL)
1854 return nix_txschq_free(rvu, req->hdr.pcifunc);
1856 return nix_txschq_free_one(rvu, req);
1859 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1860 int lvl, u64 reg, u64 regval)
1862 u64 regbase = reg & 0xFFFF;
1865 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1868 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1869 /* Check if this schq belongs to this PF/VF or not */
1870 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1873 parent = (regval >> 16) & 0x1FF;
1874 /* Validate MDQ's TL4 parent */
1875 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1876 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1879 /* Validate TL4's TL3 parent */
1880 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1881 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1884 /* Validate TL3's TL2 parent */
1885 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1886 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1889 /* Validate TL2's TL1 parent */
1890 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1891 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1897 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1901 if (hw->cap.nix_shaping)
1904 /* If shaping and coloring is not supported, then
1905 * *_CIR and *_PIR registers should not be configured.
1907 regbase = reg & 0xFFFF;
1910 case NIX_TXSCH_LVL_TL1:
1911 if (regbase == NIX_AF_TL1X_CIR(0))
1914 case NIX_TXSCH_LVL_TL2:
1915 if (regbase == NIX_AF_TL2X_CIR(0) ||
1916 regbase == NIX_AF_TL2X_PIR(0))
1919 case NIX_TXSCH_LVL_TL3:
1920 if (regbase == NIX_AF_TL3X_CIR(0) ||
1921 regbase == NIX_AF_TL3X_PIR(0))
1924 case NIX_TXSCH_LVL_TL4:
1925 if (regbase == NIX_AF_TL4X_CIR(0) ||
1926 regbase == NIX_AF_TL4X_PIR(0))
1933 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1934 u16 pcifunc, int blkaddr)
1939 schq = nix_get_tx_link(rvu, pcifunc);
1940 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1941 /* Skip if PF has already done the config */
1942 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1944 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1945 (TXSCH_TL1_DFLT_RR_PRIO << 1));
1946 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1947 TXSCH_TL1_DFLT_RR_QTM);
1948 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1949 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1952 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1953 struct nix_txschq_config *req,
1954 struct msg_rsp *rsp)
1956 struct rvu_hwinfo *hw = rvu->hw;
1957 u16 pcifunc = req->hdr.pcifunc;
1958 u64 reg, regval, schq_regbase;
1959 struct nix_txsch *txsch;
1960 struct nix_hw *nix_hw;
1961 int blkaddr, idx, err;
1965 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1966 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1967 return NIX_AF_INVAL_TXSCHQ_CFG;
1969 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1973 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1977 txsch = &nix_hw->txsch[req->lvl];
1978 pfvf_map = txsch->pfvf_map;
1980 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1981 pcifunc & RVU_PFVF_FUNC_MASK) {
1982 mutex_lock(&rvu->rsrc_lock);
1983 if (req->lvl == NIX_TXSCH_LVL_TL1)
1984 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1985 mutex_unlock(&rvu->rsrc_lock);
1989 for (idx = 0; idx < req->num_regs; idx++) {
1990 reg = req->reg[idx];
1991 regval = req->regval[idx];
1992 schq_regbase = reg & 0xFFFF;
1994 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1995 txsch->lvl, reg, regval))
1996 return NIX_AF_INVAL_TXSCHQ_CFG;
1998 /* Check if shaping and coloring is supported */
1999 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2002 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2003 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2004 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2006 regval &= ~(0x7FULL << 24);
2007 regval |= ((u64)nixlf << 24);
2010 /* Clear 'BP_ENA' config, if it's not allowed */
2011 if (!hw->cap.nix_tx_link_bp) {
2012 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2013 (schq_regbase & 0xFF00) ==
2014 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2015 regval &= ~BIT_ULL(13);
2018 /* Mark config as done for TL1 by PF */
2019 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2020 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2021 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2022 mutex_lock(&rvu->rsrc_lock);
2023 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2024 NIX_TXSCHQ_CFG_DONE);
2025 mutex_unlock(&rvu->rsrc_lock);
2028 /* SMQ flush is special hence split register writes such
2029 * that flush first and write rest of the bits later.
2031 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2032 (regval & BIT_ULL(49))) {
2033 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2034 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2035 regval &= ~BIT_ULL(49);
2037 rvu_write64(rvu, blkaddr, reg, regval);
2043 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2044 struct nix_vtag_config *req)
2046 u64 regval = req->vtag_size;
2048 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2049 req->vtag_size > VTAGSIZE_T8)
2052 /* RX VTAG Type 7 reserved for vf vlan */
2053 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2054 return NIX_AF_ERR_RX_VTAG_INUSE;
2056 if (req->rx.capture_vtag)
2057 regval |= BIT_ULL(5);
2058 if (req->rx.strip_vtag)
2059 regval |= BIT_ULL(4);
2061 rvu_write64(rvu, blkaddr,
2062 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2066 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2067 u16 pcifunc, int index)
2069 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2070 struct nix_txvlan *vlan = &nix_hw->txvlan;
2072 if (vlan->entry2pfvf_map[index] != pcifunc)
2073 return NIX_AF_ERR_PARAM;
2075 rvu_write64(rvu, blkaddr,
2076 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2077 rvu_write64(rvu, blkaddr,
2078 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2080 vlan->entry2pfvf_map[index] = 0;
2081 rvu_free_rsrc(&vlan->rsrc, index);
2086 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2088 struct nix_txvlan *vlan;
2089 struct nix_hw *nix_hw;
2092 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2096 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2097 vlan = &nix_hw->txvlan;
2099 mutex_lock(&vlan->rsrc_lock);
2100 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2101 for (index = 0; index < vlan->rsrc.max; index++) {
2102 if (vlan->entry2pfvf_map[index] == pcifunc)
2103 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2105 mutex_unlock(&vlan->rsrc_lock);
2108 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2111 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2112 struct nix_txvlan *vlan = &nix_hw->txvlan;
2116 mutex_lock(&vlan->rsrc_lock);
2118 index = rvu_alloc_rsrc(&vlan->rsrc);
2120 mutex_unlock(&vlan->rsrc_lock);
2124 mutex_unlock(&vlan->rsrc_lock);
2126 regval = size ? vtag : vtag << 32;
2128 rvu_write64(rvu, blkaddr,
2129 NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2130 rvu_write64(rvu, blkaddr,
2131 NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2136 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2137 struct nix_vtag_config *req)
2139 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2140 struct nix_txvlan *vlan = &nix_hw->txvlan;
2141 u16 pcifunc = req->hdr.pcifunc;
2142 int idx0 = req->tx.vtag0_idx;
2143 int idx1 = req->tx.vtag1_idx;
2146 if (req->tx.free_vtag0 && req->tx.free_vtag1)
2147 if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2148 vlan->entry2pfvf_map[idx1] != pcifunc)
2149 return NIX_AF_ERR_PARAM;
2151 mutex_lock(&vlan->rsrc_lock);
2153 if (req->tx.free_vtag0) {
2154 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2159 if (req->tx.free_vtag1)
2160 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2163 mutex_unlock(&vlan->rsrc_lock);
2167 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2168 struct nix_vtag_config *req,
2169 struct nix_vtag_config_rsp *rsp)
2171 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2172 struct nix_txvlan *vlan = &nix_hw->txvlan;
2173 u16 pcifunc = req->hdr.pcifunc;
2175 if (req->tx.cfg_vtag0) {
2177 nix_tx_vtag_alloc(rvu, blkaddr,
2178 req->tx.vtag0, req->vtag_size);
2180 if (rsp->vtag0_idx < 0)
2181 return NIX_AF_ERR_TX_VTAG_NOSPC;
2183 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2186 if (req->tx.cfg_vtag1) {
2188 nix_tx_vtag_alloc(rvu, blkaddr,
2189 req->tx.vtag1, req->vtag_size);
2191 if (rsp->vtag1_idx < 0)
2194 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2200 if (req->tx.cfg_vtag0)
2201 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2203 return NIX_AF_ERR_TX_VTAG_NOSPC;
2206 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2207 struct nix_vtag_config *req,
2208 struct nix_vtag_config_rsp *rsp)
2210 u16 pcifunc = req->hdr.pcifunc;
2211 int blkaddr, nixlf, err;
2213 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2217 if (req->cfg_type) {
2218 /* rx vtag configuration */
2219 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2221 return NIX_AF_ERR_PARAM;
2223 /* tx vtag configuration */
2224 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2225 (req->tx.free_vtag0 || req->tx.free_vtag1))
2226 return NIX_AF_ERR_PARAM;
2228 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2229 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2231 if (req->tx.free_vtag0 || req->tx.free_vtag1)
2232 return nix_tx_vtag_decfg(rvu, blkaddr, req);
2238 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2239 int mce, u8 op, u16 pcifunc, int next, bool eol)
2241 struct nix_aq_enq_req aq_req;
2244 aq_req.hdr.pcifunc = 0;
2245 aq_req.ctype = NIX_AQ_CTYPE_MCE;
2249 /* Use RSS with RSS index 0 */
2251 aq_req.mce.index = 0;
2252 aq_req.mce.eol = eol;
2253 aq_req.mce.pf_func = pcifunc;
2254 aq_req.mce.next = next;
2256 /* All fields valid */
2257 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
2259 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2261 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2262 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2268 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2269 u16 pcifunc, bool add)
2271 struct mce *mce, *tail = NULL;
2272 bool delete = false;
2274 /* Scan through the current list */
2275 hlist_for_each_entry(mce, &mce_list->head, node) {
2276 /* If already exists, then delete */
2277 if (mce->pcifunc == pcifunc && !add) {
2280 } else if (mce->pcifunc == pcifunc && add) {
2281 /* entry already exists */
2288 hlist_del(&mce->node);
2297 /* Add a new one to the list, at the tail */
2298 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2301 mce->pcifunc = pcifunc;
2303 hlist_add_head(&mce->node, &mce_list->head);
2305 hlist_add_behind(&mce->node, &tail->node);
2310 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
2311 struct nix_mce_list *mce_list,
2312 int mce_idx, int mcam_index, bool add)
2314 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
2315 struct npc_mcam *mcam = &rvu->hw->mcam;
2316 struct nix_mcast *mcast;
2317 struct nix_hw *nix_hw;
2323 /* Get this PF/VF func's MCE index */
2324 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2326 if (idx > (mce_idx + mce_list->max)) {
2328 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2329 __func__, idx, mce_list->max,
2330 pcifunc >> RVU_PFVF_PF_SHIFT);
2334 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
2338 mcast = &nix_hw->mcast;
2339 mutex_lock(&mcast->mce_lock);
2341 err = nix_update_mce_list_entry(mce_list, pcifunc, add);
2345 /* Disable MCAM entry in NPC */
2346 if (!mce_list->count) {
2347 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2348 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
2352 /* Dump the updated list to HW */
2354 last_idx = idx + mce_list->count - 1;
2355 hlist_for_each_entry(mce, &mce_list->head, node) {
2360 /* EOL should be set in last MCE */
2361 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2362 mce->pcifunc, next_idx,
2363 (next_idx > last_idx) ? true : false);
2370 mutex_unlock(&mcast->mce_lock);
2374 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
2375 struct nix_mce_list **mce_list, int *mce_idx)
2377 struct rvu_hwinfo *hw = rvu->hw;
2378 struct rvu_pfvf *pfvf;
2380 if (!hw->cap.nix_rx_multicast ||
2381 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
2387 /* Get this PF/VF func's MCE index */
2388 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2390 if (type == NIXLF_BCAST_ENTRY) {
2391 *mce_list = &pfvf->bcast_mce_list;
2392 *mce_idx = pfvf->bcast_mce_idx;
2393 } else if (type == NIXLF_ALLMULTI_ENTRY) {
2394 *mce_list = &pfvf->mcast_mce_list;
2395 *mce_idx = pfvf->mcast_mce_idx;
2396 } else if (type == NIXLF_PROMISC_ENTRY) {
2397 *mce_list = &pfvf->promisc_mce_list;
2398 *mce_idx = pfvf->promisc_mce_idx;
2405 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
2408 int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
2409 struct npc_mcam *mcam = &rvu->hw->mcam;
2410 struct rvu_hwinfo *hw = rvu->hw;
2411 struct nix_mce_list *mce_list;
2413 /* skip multicast pkt replication for AF's VFs */
2414 if (is_afvf(pcifunc))
2417 if (!hw->cap.nix_rx_multicast)
2420 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2424 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2428 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
2430 mcam_index = npc_get_nixlf_mcam_index(mcam,
2431 pcifunc & ~RVU_PFVF_FUNC_MASK,
2433 err = nix_update_mce_list(rvu, pcifunc, mce_list,
2434 mce_idx, mcam_index, add);
2438 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2440 struct nix_mcast *mcast = &nix_hw->mcast;
2441 int err, pf, numvfs, idx;
2442 struct rvu_pfvf *pfvf;
2446 /* Skip PF0 (i.e AF) */
2447 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2448 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2449 /* If PF is not enabled, nothing to do */
2450 if (!((cfg >> 20) & 0x01))
2452 /* Get numVFs attached to this PF */
2453 numvfs = (cfg >> 12) & 0xFF;
2455 pfvf = &rvu->pf[pf];
2457 /* This NIX0/1 block mapped to PF ? */
2458 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2461 /* save start idx of broadcast mce list */
2462 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2463 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2465 /* save start idx of multicast mce list */
2466 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2467 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
2469 /* save the start idx of promisc mce list */
2470 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2471 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
2473 for (idx = 0; idx < (numvfs + 1); idx++) {
2474 /* idx-0 is for PF, followed by VFs */
2475 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2477 /* Add dummy entries now, so that we don't have to check
2478 * for whether AQ_OP should be INIT/WRITE later on.
2479 * Will be updated when a NIXLF is attached/detached to
2482 err = nix_blk_setup_mce(rvu, nix_hw,
2483 pfvf->bcast_mce_idx + idx,
2489 /* add dummy entries to multicast mce list */
2490 err = nix_blk_setup_mce(rvu, nix_hw,
2491 pfvf->mcast_mce_idx + idx,
2497 /* add dummy entries to promisc mce list */
2498 err = nix_blk_setup_mce(rvu, nix_hw,
2499 pfvf->promisc_mce_idx + idx,
2509 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2511 struct nix_mcast *mcast = &nix_hw->mcast;
2512 struct rvu_hwinfo *hw = rvu->hw;
2515 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2516 size = (1ULL << size);
2518 /* Alloc memory for multicast/mirror replication entries */
2519 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2520 (256UL << MC_TBL_SIZE), size);
2524 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2525 (u64)mcast->mce_ctx->iova);
2527 /* Set max list length equal to max no of VFs per PF + PF itself */
2528 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2529 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2531 /* Alloc memory for multicast replication buffers */
2532 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2533 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2534 (8UL << MC_BUF_CNT), size);
2538 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2539 (u64)mcast->mcast_buf->iova);
2541 /* Alloc pkind for NIX internal RX multicast/mirror replay */
2542 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2544 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2545 BIT_ULL(63) | (mcast->replay_pkind << 24) |
2546 BIT_ULL(20) | MC_BUF_CNT);
2548 mutex_init(&mcast->mce_lock);
2550 return nix_setup_mce_tables(rvu, nix_hw);
2553 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2555 struct nix_txvlan *vlan = &nix_hw->txvlan;
2558 /* Allocate resource bimap for tx vtag def registers*/
2559 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2560 err = rvu_alloc_bitmap(&vlan->rsrc);
2564 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2565 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
2566 sizeof(u16), GFP_KERNEL);
2567 if (!vlan->entry2pfvf_map)
2570 mutex_init(&vlan->rsrc_lock);
2574 kfree(vlan->rsrc.bmap);
2578 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2580 struct nix_txsch *txsch;
2584 /* Get scheduler queue count of each type and alloc
2585 * bitmap for each for alloc/free/attach operations.
2587 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2588 txsch = &nix_hw->txsch[lvl];
2591 case NIX_TXSCH_LVL_SMQ:
2592 reg = NIX_AF_MDQ_CONST;
2594 case NIX_TXSCH_LVL_TL4:
2595 reg = NIX_AF_TL4_CONST;
2597 case NIX_TXSCH_LVL_TL3:
2598 reg = NIX_AF_TL3_CONST;
2600 case NIX_TXSCH_LVL_TL2:
2601 reg = NIX_AF_TL2_CONST;
2603 case NIX_TXSCH_LVL_TL1:
2604 reg = NIX_AF_TL1_CONST;
2607 cfg = rvu_read64(rvu, blkaddr, reg);
2608 txsch->schq.max = cfg & 0xFFFF;
2609 err = rvu_alloc_bitmap(&txsch->schq);
2613 /* Allocate memory for scheduler queues to
2614 * PF/VF pcifunc mapping info.
2616 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2617 sizeof(u32), GFP_KERNEL);
2618 if (!txsch->pfvf_map)
2620 for (schq = 0; schq < txsch->schq.max; schq++)
2621 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2626 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2627 int blkaddr, u32 cfg)
2631 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2632 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2635 if (fmt_idx >= nix_hw->mark_format.total)
2638 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2639 nix_hw->mark_format.cfg[fmt_idx] = cfg;
2640 nix_hw->mark_format.in_use++;
2644 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2648 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
2649 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
2650 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
2651 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
2652 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
2653 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
2654 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
2655 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
2656 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2661 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2662 nix_hw->mark_format.total = (u8)total;
2663 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2665 if (!nix_hw->mark_format.cfg)
2667 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2668 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2670 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2677 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2679 /* CN10K supports LBK FIFO size 72 KB */
2680 if (rvu->hw->lbk_bufsize == 0x12000)
2681 *max_mtu = CN10K_LBK_LINK_MAX_FRS;
2683 *max_mtu = NIC_HW_MAX_FRS;
2686 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2688 /* RPM supports FIFO len 128 KB */
2689 if (rvu_cgx_get_fifolen(rvu) == 0x20000)
2690 *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
2692 *max_mtu = NIC_HW_MAX_FRS;
2695 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
2696 struct nix_hw_info *rsp)
2698 u16 pcifunc = req->hdr.pcifunc;
2701 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2703 return NIX_AF_ERR_AF_LF_INVALID;
2705 if (is_afvf(pcifunc))
2706 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
2708 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
2710 rsp->min_mtu = NIC_HW_MIN_FRS;
2714 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2715 struct msg_rsp *rsp)
2717 u16 pcifunc = req->hdr.pcifunc;
2718 int i, nixlf, blkaddr, err;
2721 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2725 /* Get stats count supported by HW */
2726 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2728 /* Reset tx stats */
2729 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2730 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2732 /* Reset rx stats */
2733 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2734 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2739 /* Returns the ALG index to be set into NPC_RX_ACTION */
2740 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2744 /* Scan over exiting algo entries to find a match */
2745 for (i = 0; i < nix_hw->flowkey.in_use; i++)
2746 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2752 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2754 int idx, nr_field, key_off, field_marker, keyoff_marker;
2755 int max_key_off, max_bit_pos, group_member;
2756 struct nix_rx_flowkey_alg *field;
2757 struct nix_rx_flowkey_alg tmp;
2758 u32 key_type, valid_key;
2759 int l4_key_offset = 0;
2764 #define FIELDS_PER_ALG 5
2765 #define MAX_KEY_OFF 40
2766 /* Clear all fields */
2767 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2769 /* Each of the 32 possible flow key algorithm definitions should
2770 * fall into above incremental config (except ALG0). Otherwise a
2771 * single NPC MCAM entry is not sufficient for supporting RSS.
2773 * If a different definition or combination needed then NPC MCAM
2774 * has to be programmed to filter such pkts and it's action should
2775 * point to this definition to calculate flowtag or hash.
2777 * The `for loop` goes over _all_ protocol field and the following
2778 * variables depicts the state machine forward progress logic.
2780 * keyoff_marker - Enabled when hash byte length needs to be accounted
2781 * in field->key_offset update.
2782 * field_marker - Enabled when a new field needs to be selected.
2783 * group_member - Enabled when protocol is part of a group.
2786 keyoff_marker = 0; max_key_off = 0; group_member = 0;
2787 nr_field = 0; key_off = 0; field_marker = 1;
2788 field = &tmp; max_bit_pos = fls(flow_cfg);
2790 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2791 key_off < MAX_KEY_OFF; idx++) {
2792 key_type = BIT(idx);
2793 valid_key = flow_cfg & key_type;
2794 /* Found a field marker, reset the field values */
2796 memset(&tmp, 0, sizeof(tmp));
2798 field_marker = true;
2799 keyoff_marker = true;
2801 case NIX_FLOW_KEY_TYPE_PORT:
2802 field->sel_chan = true;
2803 /* This should be set to 1, when SEL_CHAN is set */
2806 case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
2807 field->lid = NPC_LID_LC;
2808 field->hdr_offset = 9; /* offset */
2809 field->bytesm1 = 0; /* 1 byte */
2810 field->ltype_match = NPC_LT_LC_IP;
2811 field->ltype_mask = 0xF;
2813 case NIX_FLOW_KEY_TYPE_IPV4:
2814 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2815 field->lid = NPC_LID_LC;
2816 field->ltype_match = NPC_LT_LC_IP;
2817 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2818 field->lid = NPC_LID_LG;
2819 field->ltype_match = NPC_LT_LG_TU_IP;
2821 field->hdr_offset = 12; /* SIP offset */
2822 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2823 field->ltype_mask = 0xF; /* Match only IPv4 */
2824 keyoff_marker = false;
2826 case NIX_FLOW_KEY_TYPE_IPV6:
2827 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2828 field->lid = NPC_LID_LC;
2829 field->ltype_match = NPC_LT_LC_IP6;
2830 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2831 field->lid = NPC_LID_LG;
2832 field->ltype_match = NPC_LT_LG_TU_IP6;
2834 field->hdr_offset = 8; /* SIP offset */
2835 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2836 field->ltype_mask = 0xF; /* Match only IPv6 */
2838 case NIX_FLOW_KEY_TYPE_TCP:
2839 case NIX_FLOW_KEY_TYPE_UDP:
2840 case NIX_FLOW_KEY_TYPE_SCTP:
2841 case NIX_FLOW_KEY_TYPE_INNR_TCP:
2842 case NIX_FLOW_KEY_TYPE_INNR_UDP:
2843 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2844 field->lid = NPC_LID_LD;
2845 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2846 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2847 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2848 field->lid = NPC_LID_LH;
2849 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2851 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2852 * so no need to change the ltype_match, just change
2853 * the lid for inner protocols
2855 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2856 (int)NPC_LT_LH_TU_TCP);
2857 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2858 (int)NPC_LT_LH_TU_UDP);
2859 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2860 (int)NPC_LT_LH_TU_SCTP);
2862 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2863 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2865 field->ltype_match |= NPC_LT_LD_TCP;
2866 group_member = true;
2867 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2868 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2870 field->ltype_match |= NPC_LT_LD_UDP;
2871 group_member = true;
2872 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2873 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2875 field->ltype_match |= NPC_LT_LD_SCTP;
2876 group_member = true;
2878 field->ltype_mask = ~field->ltype_match;
2879 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2880 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2881 /* Handle the case where any of the group item
2882 * is enabled in the group but not the final one
2886 group_member = false;
2889 field_marker = false;
2890 keyoff_marker = false;
2893 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
2894 * remember the TCP key offset of 40 byte hash key.
2896 if (key_type == NIX_FLOW_KEY_TYPE_TCP)
2897 l4_key_offset = key_off;
2899 case NIX_FLOW_KEY_TYPE_NVGRE:
2900 field->lid = NPC_LID_LD;
2901 field->hdr_offset = 4; /* VSID offset */
2903 field->ltype_match = NPC_LT_LD_NVGRE;
2904 field->ltype_mask = 0xF;
2906 case NIX_FLOW_KEY_TYPE_VXLAN:
2907 case NIX_FLOW_KEY_TYPE_GENEVE:
2908 field->lid = NPC_LID_LE;
2910 field->hdr_offset = 4;
2911 field->ltype_mask = 0xF;
2912 field_marker = false;
2913 keyoff_marker = false;
2915 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2916 field->ltype_match |= NPC_LT_LE_VXLAN;
2917 group_member = true;
2920 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2921 field->ltype_match |= NPC_LT_LE_GENEVE;
2922 group_member = true;
2925 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2927 field->ltype_mask = ~field->ltype_match;
2928 field_marker = true;
2929 keyoff_marker = true;
2931 group_member = false;
2935 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2936 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2937 field->lid = NPC_LID_LA;
2938 field->ltype_match = NPC_LT_LA_ETHER;
2939 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2940 field->lid = NPC_LID_LF;
2941 field->ltype_match = NPC_LT_LF_TU_ETHER;
2943 field->hdr_offset = 0;
2944 field->bytesm1 = 5; /* DMAC 6 Byte */
2945 field->ltype_mask = 0xF;
2947 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2948 field->lid = NPC_LID_LC;
2949 field->hdr_offset = 40; /* IPV6 hdr */
2950 field->bytesm1 = 0; /* 1 Byte ext hdr*/
2951 field->ltype_match = NPC_LT_LC_IP6_EXT;
2952 field->ltype_mask = 0xF;
2954 case NIX_FLOW_KEY_TYPE_GTPU:
2955 field->lid = NPC_LID_LE;
2956 field->hdr_offset = 4;
2957 field->bytesm1 = 3; /* 4 bytes TID*/
2958 field->ltype_match = NPC_LT_LE_GTPU;
2959 field->ltype_mask = 0xF;
2961 case NIX_FLOW_KEY_TYPE_VLAN:
2962 field->lid = NPC_LID_LB;
2963 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2964 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
2965 field->ltype_match = NPC_LT_LB_CTAG;
2966 field->ltype_mask = 0xF;
2967 field->fn_mask = 1; /* Mask out the first nibble */
2969 case NIX_FLOW_KEY_TYPE_AH:
2970 case NIX_FLOW_KEY_TYPE_ESP:
2971 field->hdr_offset = 0;
2972 field->bytesm1 = 7; /* SPI + sequence number */
2973 field->ltype_mask = 0xF;
2974 field->lid = NPC_LID_LE;
2975 field->ltype_match = NPC_LT_LE_ESP;
2976 if (key_type == NIX_FLOW_KEY_TYPE_AH) {
2977 field->lid = NPC_LID_LD;
2978 field->ltype_match = NPC_LT_LD_AH;
2979 field->hdr_offset = 4;
2980 keyoff_marker = false;
2986 /* Found a valid flow key type */
2988 /* Use the key offset of TCP/UDP/SCTP fields
2989 * for ESP/AH fields.
2991 if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
2992 key_type == NIX_FLOW_KEY_TYPE_AH)
2993 key_off = l4_key_offset;
2994 field->key_offset = key_off;
2995 memcpy(&alg[nr_field], field, sizeof(*field));
2996 max_key_off = max(max_key_off, field->bytesm1 + 1);
2998 /* Found a field marker, get the next field */
3003 /* Found a keyoff marker, update the new key_off */
3004 if (keyoff_marker) {
3005 key_off += max_key_off;
3009 /* Processed all the flow key types */
3010 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3013 return NIX_AF_ERR_RSS_NOSPC_FIELD;
3016 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3018 u64 field[FIELDS_PER_ALG];
3022 hw = get_nix_hw(rvu->hw, blkaddr);
3026 /* No room to add new flow hash algoritham */
3027 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3028 return NIX_AF_ERR_RSS_NOSPC_ALGO;
3030 /* Generate algo fields for the given flow_cfg */
3031 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3035 /* Update ALGX_FIELDX register with generated fields */
3036 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3037 rvu_write64(rvu, blkaddr,
3038 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3041 /* Store the flow_cfg for futher lookup */
3042 rc = hw->flowkey.in_use;
3043 hw->flowkey.flowkey[rc] = flow_cfg;
3044 hw->flowkey.in_use++;
3049 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3050 struct nix_rss_flowkey_cfg *req,
3051 struct nix_rss_flowkey_cfg_rsp *rsp)
3053 u16 pcifunc = req->hdr.pcifunc;
3054 int alg_idx, nixlf, blkaddr;
3055 struct nix_hw *nix_hw;
3058 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3062 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3066 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3067 /* Failed to get algo index from the exiting list, reserve new */
3069 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3074 rsp->alg_idx = alg_idx;
3075 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3076 alg_idx, req->mcam_index);
3080 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3082 u32 flowkey_cfg, minkey_cfg;
3085 /* Disable all flow key algx fieldx */
3086 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3087 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3088 rvu_write64(rvu, blkaddr,
3089 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3093 /* IPv4/IPv6 SIP/DIPs */
3094 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3095 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3099 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3100 minkey_cfg = flowkey_cfg;
3101 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3102 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3106 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3107 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3108 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3112 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3113 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3114 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3118 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3119 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3120 NIX_FLOW_KEY_TYPE_UDP;
3121 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3125 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3126 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3127 NIX_FLOW_KEY_TYPE_SCTP;
3128 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3132 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3133 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3134 NIX_FLOW_KEY_TYPE_SCTP;
3135 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3139 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3140 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3141 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3142 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3149 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3150 struct nix_set_mac_addr *req,
3151 struct msg_rsp *rsp)
3153 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3154 u16 pcifunc = req->hdr.pcifunc;
3155 int blkaddr, nixlf, err;
3156 struct rvu_pfvf *pfvf;
3158 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3162 pfvf = rvu_get_pfvf(rvu, pcifunc);
3164 /* untrusted VF can't overwrite admin(PF) changes */
3165 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3166 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3168 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3172 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3174 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3175 pfvf->rx_chan_base, req->mac_addr);
3177 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3178 ether_addr_copy(pfvf->default_mac, req->mac_addr);
3183 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3184 struct msg_req *req,
3185 struct nix_get_mac_addr_rsp *rsp)
3187 u16 pcifunc = req->hdr.pcifunc;
3188 struct rvu_pfvf *pfvf;
3190 if (!is_nixlf_attached(rvu, pcifunc))
3191 return NIX_AF_ERR_AF_LF_INVALID;
3193 pfvf = rvu_get_pfvf(rvu, pcifunc);
3195 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3200 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3201 struct msg_rsp *rsp)
3203 bool allmulti, promisc, nix_rx_multicast;
3204 u16 pcifunc = req->hdr.pcifunc;
3205 struct rvu_pfvf *pfvf;
3208 pfvf = rvu_get_pfvf(rvu, pcifunc);
3209 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
3210 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
3211 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
3213 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
3215 if (is_vf(pcifunc) && !nix_rx_multicast &&
3216 (promisc || allmulti)) {
3217 dev_warn_ratelimited(rvu->dev,
3218 "VF promisc/multicast not supported\n");
3222 /* untrusted VF can't configure promisc/allmulti */
3223 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3224 (promisc || allmulti))
3227 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3231 if (nix_rx_multicast) {
3232 /* add/del this PF_FUNC to/from mcast pkt replication list */
3233 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
3237 "Failed to update pcifunc 0x%x to multicast list\n",
3242 /* add/del this PF_FUNC to/from promisc pkt replication list */
3243 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
3247 "Failed to update pcifunc 0x%x to promisc list\n",
3253 /* install/uninstall allmulti entry */
3255 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
3256 pfvf->rx_chan_base);
3258 if (!nix_rx_multicast)
3259 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
3262 /* install/uninstall promisc entry */
3264 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3268 if (!nix_rx_multicast)
3269 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
3275 static void nix_find_link_frs(struct rvu *rvu,
3276 struct nix_frs_cfg *req, u16 pcifunc)
3278 int pf = rvu_get_pf(pcifunc);
3279 struct rvu_pfvf *pfvf;
3284 /* Update with requester's min/max lengths */
3285 pfvf = rvu_get_pfvf(rvu, pcifunc);
3286 pfvf->maxlen = req->maxlen;
3287 if (req->update_minlen)
3288 pfvf->minlen = req->minlen;
3290 maxlen = req->maxlen;
3291 minlen = req->update_minlen ? req->minlen : 0;
3293 /* Get this PF's numVFs and starting hwvf */
3294 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3296 /* For each VF, compare requested max/minlen */
3297 for (vf = 0; vf < numvfs; vf++) {
3298 pfvf = &rvu->hwvf[hwvf + vf];
3299 if (pfvf->maxlen > maxlen)
3300 maxlen = pfvf->maxlen;
3301 if (req->update_minlen &&
3302 pfvf->minlen && pfvf->minlen < minlen)
3303 minlen = pfvf->minlen;
3306 /* Compare requested max/minlen with PF's max/minlen */
3307 pfvf = &rvu->pf[pf];
3308 if (pfvf->maxlen > maxlen)
3309 maxlen = pfvf->maxlen;
3310 if (req->update_minlen &&
3311 pfvf->minlen && pfvf->minlen < minlen)
3312 minlen = pfvf->minlen;
3314 /* Update the request with max/min PF's and it's VF's max/min */
3315 req->maxlen = maxlen;
3316 if (req->update_minlen)
3317 req->minlen = minlen;
3320 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3321 struct msg_rsp *rsp)
3323 struct rvu_hwinfo *hw = rvu->hw;
3324 u16 pcifunc = req->hdr.pcifunc;
3325 int pf = rvu_get_pf(pcifunc);
3326 int blkaddr, schq, link = -1;
3327 struct nix_txsch *txsch;
3328 u64 cfg, lmac_fifo_len;
3329 struct nix_hw *nix_hw;
3330 u8 cgx = 0, lmac = 0;
3333 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3335 return NIX_AF_ERR_AF_LF_INVALID;
3337 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3341 if (is_afvf(pcifunc))
3342 rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3344 rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3346 if (!req->sdp_link && req->maxlen > max_mtu)
3347 return NIX_AF_ERR_FRS_INVALID;
3349 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3350 return NIX_AF_ERR_FRS_INVALID;
3352 /* Check if requester wants to update SMQ's */
3353 if (!req->update_smq)
3356 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
3357 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3358 mutex_lock(&rvu->rsrc_lock);
3359 for (schq = 0; schq < txsch->schq.max; schq++) {
3360 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3362 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3363 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3364 if (req->update_minlen)
3365 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3366 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3368 mutex_unlock(&rvu->rsrc_lock);
3371 /* Check if config is for SDP link */
3372 if (req->sdp_link) {
3374 return NIX_AF_ERR_RX_LINK_INVALID;
3375 link = hw->cgx_links + hw->lbk_links;
3379 /* Check if the request is from CGX mapped RVU PF */
3380 if (is_pf_cgxmapped(rvu, pf)) {
3381 /* Get CGX and LMAC to which this PF is mapped and find link */
3382 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3383 link = (cgx * hw->lmac_per_cgx) + lmac;
3384 } else if (pf == 0) {
3385 /* For VFs of PF0 ingress is LBK port, so config LBK link */
3386 link = hw->cgx_links;
3390 return NIX_AF_ERR_RX_LINK_INVALID;
3392 nix_find_link_frs(rvu, req, pcifunc);
3395 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3396 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3397 if (req->update_minlen)
3398 cfg = (cfg & ~0xFFFFULL) | req->minlen;
3399 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3401 if (req->sdp_link || pf == 0)
3404 /* Update transmit credits for CGX links */
3406 rvu_cgx_get_fifolen(rvu) /
3407 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3408 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
3409 cfg &= ~(0xFFFFFULL << 12);
3410 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
3411 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3415 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3416 struct msg_rsp *rsp)
3418 int nixlf, blkaddr, err;
3421 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3425 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3426 /* Set the interface configuration */
3427 if (req->len_verify & BIT(0))
3430 cfg &= ~BIT_ULL(41);
3432 if (req->len_verify & BIT(1))
3435 cfg &= ~BIT_ULL(40);
3437 if (req->csum_verify & BIT(0))
3440 cfg &= ~BIT_ULL(37);
3442 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3447 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3449 /* CN10k supports 72KB FIFO size and max packet size of 64k */
3450 if (rvu->hw->lbk_bufsize == 0x12000)
3451 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3453 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3456 static void nix_link_config(struct rvu *rvu, int blkaddr)
3458 struct rvu_hwinfo *hw = rvu->hw;
3459 int cgx, lmac_cnt, slink, link;
3460 u16 lbk_max_frs, lmac_max_frs;
3463 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3464 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3466 /* Set default min/max packet lengths allowed on NIX Rx links.
3468 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3469 * as undersize and report them to SW as error pkts, hence
3470 * setting it to 40 bytes.
3472 for (link = 0; link < hw->cgx_links; link++) {
3473 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3474 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
3477 for (link = hw->cgx_links; link < hw->lbk_links; link++) {
3478 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3479 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
3481 if (hw->sdp_links) {
3482 link = hw->cgx_links + hw->lbk_links;
3483 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3484 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3487 /* Set credits for Tx links assuming max packet length allowed.
3488 * This will be reconfigured based on MTU set for PF/VF.
3490 for (cgx = 0; cgx < hw->cgx; cgx++) {
3491 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3492 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
3494 /* Enable credits and set credit pkt count to max allowed */
3495 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3496 slink = cgx * hw->lmac_per_cgx;
3497 for (link = slink; link < (slink + lmac_cnt); link++) {
3498 rvu_write64(rvu, blkaddr,
3499 NIX_AF_TX_LINKX_NORM_CREDIT(link),
3504 /* Set Tx credits for LBK link */
3505 slink = hw->cgx_links;
3506 for (link = slink; link < (slink + hw->lbk_links); link++) {
3507 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
3508 /* Enable credits and set credit pkt count to max allowed */
3509 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3510 rvu_write64(rvu, blkaddr,
3511 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3515 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3520 /* Start X2P bus calibration */
3521 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3522 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3523 /* Wait for calibration to complete */
3524 err = rvu_poll_reg(rvu, blkaddr,
3525 NIX_AF_STATUS, BIT_ULL(10), false);
3527 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3531 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3532 /* Check if CGX devices are ready */
3533 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3534 /* Skip when cgx port is not available */
3535 if (!rvu_cgx_pdata(idx, rvu) ||
3536 (status & (BIT_ULL(16 + idx))))
3539 "CGX%d didn't respond to NIX X2P calibration\n", idx);
3543 /* Check if LBK is ready */
3544 if (!(status & BIT_ULL(19))) {
3546 "LBK didn't respond to NIX X2P calibration\n");
3550 /* Clear 'calibrate_x2p' bit */
3551 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3552 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3553 if (err || (status & 0x3FFULL))
3555 "NIX X2P calibration failed, status 0x%llx\n", status);
3561 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3566 /* Set admin queue endianness */
3567 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3570 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3573 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3576 /* Do not bypass NDC cache */
3577 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3579 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3580 /* Disable caching of SQB aka SQEs */
3583 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3585 /* Result structure can be followed by RQ/SQ/CQ context at
3586 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3587 * operation type. Alloc sufficient result memory for all operations.
3589 err = rvu_aq_alloc(rvu, &block->aq,
3590 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3591 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3595 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3596 rvu_write64(rvu, block->addr,
3597 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3601 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3603 const struct npc_lt_def_cfg *ltdefs;
3604 struct rvu_hwinfo *hw = rvu->hw;
3605 int blkaddr = nix_hw->blkaddr;
3606 struct rvu_block *block;
3610 block = &hw->block[blkaddr];
3612 if (is_rvu_96xx_B0(rvu)) {
3613 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3614 * internal state when conditional clocks are turned off.
3615 * Hence enable them.
3617 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3618 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3620 /* Set chan/link to backpressure TL3 instead of TL2 */
3621 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3623 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
3624 * This sticky mode is known to cause SQ stalls when multiple
3625 * SQs are mapped to same SMQ and transmitting pkts at a time.
3627 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3628 cfg &= ~BIT_ULL(15);
3629 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3632 ltdefs = rvu->kpu.lt_def;
3633 /* Calibrate X2P bus to check if CGX/LBK links are fine */
3634 err = nix_calibrate_x2p(rvu, blkaddr);
3638 /* Initialize admin queue */
3639 err = nix_aq_init(rvu, block);
3643 /* Restore CINT timer delay to HW reset values */
3644 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3646 if (is_block_implemented(hw, blkaddr)) {
3647 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3651 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
3655 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3659 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3663 err = nix_setup_txvlan(rvu, nix_hw);
3667 /* Configure segmentation offload formats */
3668 nix_setup_lso(rvu, nix_hw, blkaddr);
3670 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3671 * This helps HW protocol checker to identify headers
3672 * and validate length and checksums.
3674 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3675 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3676 ltdefs->rx_ol2.ltype_mask);
3677 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3678 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3679 ltdefs->rx_oip4.ltype_mask);
3680 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3681 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3682 ltdefs->rx_iip4.ltype_mask);
3683 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3684 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3685 ltdefs->rx_oip6.ltype_mask);
3686 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3687 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3688 ltdefs->rx_iip6.ltype_mask);
3689 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3690 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3691 ltdefs->rx_otcp.ltype_mask);
3692 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3693 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3694 ltdefs->rx_itcp.ltype_mask);
3695 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3696 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3697 ltdefs->rx_oudp.ltype_mask);
3698 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3699 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3700 ltdefs->rx_iudp.ltype_mask);
3701 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3702 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3703 ltdefs->rx_osctp.ltype_mask);
3704 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3705 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3706 ltdefs->rx_isctp.ltype_mask);
3708 if (!is_rvu_otx2(rvu)) {
3709 /* Enable APAD calculation for other protocols
3710 * matching APAD0 and APAD1 lt def registers.
3712 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
3713 (ltdefs->rx_apad0.valid << 11) |
3714 (ltdefs->rx_apad0.lid << 8) |
3715 (ltdefs->rx_apad0.ltype_match << 4) |
3716 ltdefs->rx_apad0.ltype_mask);
3717 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
3718 (ltdefs->rx_apad1.valid << 11) |
3719 (ltdefs->rx_apad1.lid << 8) |
3720 (ltdefs->rx_apad1.ltype_match << 4) |
3721 ltdefs->rx_apad1.ltype_mask);
3723 /* Receive ethertype defination register defines layer
3724 * information in NPC_RESULT_S to identify the Ethertype
3725 * location in L2 header. Used for Ethertype overwriting
3726 * in inline IPsec flow.
3728 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
3729 (ltdefs->rx_et[0].offset << 12) |
3730 (ltdefs->rx_et[0].valid << 11) |
3731 (ltdefs->rx_et[0].lid << 8) |
3732 (ltdefs->rx_et[0].ltype_match << 4) |
3733 ltdefs->rx_et[0].ltype_mask);
3734 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
3735 (ltdefs->rx_et[1].offset << 12) |
3736 (ltdefs->rx_et[1].valid << 11) |
3737 (ltdefs->rx_et[1].lid << 8) |
3738 (ltdefs->rx_et[1].ltype_match << 4) |
3739 ltdefs->rx_et[1].ltype_mask);
3742 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3746 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3747 nix_link_config(rvu, blkaddr);
3749 /* Enable Channel backpressure */
3750 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3755 int rvu_nix_init(struct rvu *rvu)
3757 struct rvu_hwinfo *hw = rvu->hw;
3758 struct nix_hw *nix_hw;
3759 int blkaddr = 0, err;
3762 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3767 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3769 nix_hw = &hw->nix[i];
3771 nix_hw->blkaddr = blkaddr;
3772 err = rvu_nix_block_init(rvu, nix_hw);
3775 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3782 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3783 struct rvu_block *block)
3785 struct nix_txsch *txsch;
3786 struct nix_mcast *mcast;
3787 struct nix_txvlan *vlan;
3788 struct nix_hw *nix_hw;
3791 rvu_aq_free(rvu, block->aq);
3793 if (is_block_implemented(rvu->hw, blkaddr)) {
3794 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3798 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3799 txsch = &nix_hw->txsch[lvl];
3800 kfree(txsch->schq.bmap);
3803 nix_ipolicer_freemem(nix_hw);
3805 vlan = &nix_hw->txvlan;
3806 kfree(vlan->rsrc.bmap);
3807 mutex_destroy(&vlan->rsrc_lock);
3808 devm_kfree(rvu->dev, vlan->entry2pfvf_map);
3810 mcast = &nix_hw->mcast;
3811 qmem_free(rvu->dev, mcast->mce_ctx);
3812 qmem_free(rvu->dev, mcast->mcast_buf);
3813 mutex_destroy(&mcast->mce_lock);
3817 void rvu_nix_freemem(struct rvu *rvu)
3819 struct rvu_hwinfo *hw = rvu->hw;
3820 struct rvu_block *block;
3823 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3825 block = &hw->block[blkaddr];
3826 rvu_nix_block_freemem(rvu, blkaddr, block);
3827 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3831 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3832 struct msg_rsp *rsp)
3834 u16 pcifunc = req->hdr.pcifunc;
3835 struct rvu_pfvf *pfvf;
3838 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3842 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3844 npc_mcam_enable_flows(rvu, pcifunc);
3846 pfvf = rvu_get_pfvf(rvu, pcifunc);
3847 set_bit(NIXLF_INITIALIZED, &pfvf->flags);
3849 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3852 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3853 struct msg_rsp *rsp)
3855 u16 pcifunc = req->hdr.pcifunc;
3856 struct rvu_pfvf *pfvf;
3859 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3863 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3865 pfvf = rvu_get_pfvf(rvu, pcifunc);
3866 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3868 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3871 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3873 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3874 struct hwctx_disable_req ctx_req;
3877 ctx_req.hdr.pcifunc = pcifunc;
3879 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3880 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3881 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
3882 nix_interface_deinit(rvu, pcifunc, nixlf);
3883 nix_rx_sync(rvu, blkaddr);
3884 nix_txschq_free(rvu, pcifunc);
3886 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3888 rvu_cgx_start_stop_io(rvu, pcifunc, false);
3891 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3892 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3894 dev_err(rvu->dev, "SQ ctx disable failed\n");
3898 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3899 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3901 dev_err(rvu->dev, "RQ ctx disable failed\n");
3905 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3906 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3908 dev_err(rvu->dev, "CQ ctx disable failed\n");
3911 nix_ctx_free(rvu, pfvf);
3913 nix_free_all_bandprof(rvu, pcifunc);
3916 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
3918 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3920 struct rvu_hwinfo *hw = rvu->hw;
3921 struct rvu_block *block;
3926 pf = rvu_get_pf(pcifunc);
3927 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
3930 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3932 return NIX_AF_ERR_AF_LF_INVALID;
3934 block = &hw->block[blkaddr];
3935 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3937 return NIX_AF_ERR_AF_LF_INVALID;
3939 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3942 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3944 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3946 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3951 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3952 struct msg_rsp *rsp)
3954 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3957 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3958 struct msg_rsp *rsp)
3960 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3963 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3964 struct nix_lso_format_cfg *req,
3965 struct nix_lso_format_cfg_rsp *rsp)
3967 u16 pcifunc = req->hdr.pcifunc;
3968 struct nix_hw *nix_hw;
3969 struct rvu_pfvf *pfvf;
3970 int blkaddr, idx, f;
3973 pfvf = rvu_get_pfvf(rvu, pcifunc);
3974 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3975 if (!pfvf->nixlf || blkaddr < 0)
3976 return NIX_AF_ERR_AF_LF_INVALID;
3978 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3982 /* Find existing matching LSO format, if any */
3983 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3984 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3985 reg = rvu_read64(rvu, blkaddr,
3986 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3987 if (req->fields[f] != (reg & req->field_mask))
3991 if (f == NIX_LSO_FIELD_MAX)
3995 if (idx < nix_hw->lso.in_use) {
3997 rsp->lso_format_idx = idx;
4001 if (nix_hw->lso.in_use == nix_hw->lso.total)
4002 return NIX_AF_ERR_LSO_CFG_FAIL;
4004 rsp->lso_format_idx = nix_hw->lso.in_use++;
4006 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
4007 rvu_write64(rvu, blkaddr,
4008 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
4014 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
4016 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
4018 /* overwrite vf mac address with default_mac */
4020 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
4023 /* NIX ingress policers or bandwidth profiles APIs */
4024 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
4026 struct npc_lt_def_cfg defs, *ltdefs;
4029 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
4031 /* Extract PCP and DEI fields from outer VLAN from byte offset
4032 * 2 from the start of LB_PTR (ie TAG).
4033 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
4034 * fields are considered when 'Tunnel enable' is set in profile.
4036 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
4037 (2UL << 12) | (ltdefs->ovlan.lid << 8) |
4038 (ltdefs->ovlan.ltype_match << 4) |
4039 ltdefs->ovlan.ltype_mask);
4040 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
4041 (2UL << 12) | (ltdefs->ivlan.lid << 8) |
4042 (ltdefs->ivlan.ltype_match << 4) |
4043 ltdefs->ivlan.ltype_mask);
4045 /* DSCP field in outer and tunneled IPv4 packets */
4046 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
4047 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
4048 (ltdefs->rx_oip4.ltype_match << 4) |
4049 ltdefs->rx_oip4.ltype_mask);
4050 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
4051 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
4052 (ltdefs->rx_iip4.ltype_match << 4) |
4053 ltdefs->rx_iip4.ltype_mask);
4055 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
4056 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
4057 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
4058 (ltdefs->rx_oip6.ltype_match << 4) |
4059 ltdefs->rx_oip6.ltype_mask);
4060 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
4061 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
4062 (ltdefs->rx_iip6.ltype_match << 4) |
4063 ltdefs->rx_iip6.ltype_mask);
4066 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
4067 int layer, int prof_idx)
4069 struct nix_cn10k_aq_enq_req aq_req;
4072 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4074 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
4075 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4076 aq_req.op = NIX_AQ_INSTOP_INIT;
4078 /* Context is all zeros, submit to AQ */
4079 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4080 (struct nix_aq_enq_req *)&aq_req, NULL);
4082 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
4087 static int nix_setup_ipolicers(struct rvu *rvu,
4088 struct nix_hw *nix_hw, int blkaddr)
4090 struct rvu_hwinfo *hw = rvu->hw;
4091 struct nix_ipolicer *ipolicer;
4092 int err, layer, prof_idx;
4095 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
4096 if (!(cfg & BIT_ULL(61))) {
4097 hw->cap.ipolicer = false;
4101 hw->cap.ipolicer = true;
4102 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
4103 sizeof(*ipolicer), GFP_KERNEL);
4104 if (!nix_hw->ipolicer)
4107 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
4109 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4110 ipolicer = &nix_hw->ipolicer[layer];
4112 case BAND_PROF_LEAF_LAYER:
4113 ipolicer->band_prof.max = cfg & 0XFFFF;
4115 case BAND_PROF_MID_LAYER:
4116 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
4118 case BAND_PROF_TOP_LAYER:
4119 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
4123 if (!ipolicer->band_prof.max)
4126 err = rvu_alloc_bitmap(&ipolicer->band_prof);
4130 ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
4131 ipolicer->band_prof.max,
4132 sizeof(u16), GFP_KERNEL);
4133 if (!ipolicer->pfvf_map)
4136 ipolicer->match_id = devm_kcalloc(rvu->dev,
4137 ipolicer->band_prof.max,
4138 sizeof(u16), GFP_KERNEL);
4139 if (!ipolicer->match_id)
4143 prof_idx < ipolicer->band_prof.max; prof_idx++) {
4144 /* Set AF as current owner for INIT ops to succeed */
4145 ipolicer->pfvf_map[prof_idx] = 0x00;
4147 /* There is no enable bit in the profile context,
4148 * so no context disable. So let's INIT them here
4149 * so that PF/VF later on have to just do WRITE to
4150 * setup policer rates and config.
4152 err = nix_init_policer_context(rvu, nix_hw,
4158 /* Allocate memory for maintaining ref_counts for MID level
4159 * profiles, this will be needed for leaf layer profiles'
4162 if (layer != BAND_PROF_MID_LAYER)
4165 ipolicer->ref_count = devm_kcalloc(rvu->dev,
4166 ipolicer->band_prof.max,
4167 sizeof(u16), GFP_KERNEL);
4170 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
4171 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
4173 nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
4178 static void nix_ipolicer_freemem(struct nix_hw *nix_hw)
4180 struct nix_ipolicer *ipolicer;
4183 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4184 ipolicer = &nix_hw->ipolicer[layer];
4186 if (!ipolicer->band_prof.max)
4189 kfree(ipolicer->band_prof.bmap);
4193 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
4194 struct nix_hw *nix_hw, u16 pcifunc)
4196 struct nix_ipolicer *ipolicer;
4197 int layer, hi_layer, prof_idx;
4199 /* Bits [15:14] in profile index represent layer */
4200 layer = (req->qidx >> 14) & 0x03;
4201 prof_idx = req->qidx & 0x3FFF;
4203 ipolicer = &nix_hw->ipolicer[layer];
4204 if (prof_idx >= ipolicer->band_prof.max)
4207 /* Check if the profile is allocated to the requesting PCIFUNC or not
4208 * with the exception of AF. AF is allowed to read and update contexts.
4210 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
4213 /* If this profile is linked to higher layer profile then check
4214 * if that profile is also allocated to the requesting PCIFUNC
4217 if (!req->prof.hl_en)
4220 /* Leaf layer profile can link only to mid layer and
4221 * mid layer to top layer.
4223 if (layer == BAND_PROF_LEAF_LAYER)
4224 hi_layer = BAND_PROF_MID_LAYER;
4225 else if (layer == BAND_PROF_MID_LAYER)
4226 hi_layer = BAND_PROF_TOP_LAYER;
4230 ipolicer = &nix_hw->ipolicer[hi_layer];
4231 prof_idx = req->prof.band_prof_id;
4232 if (prof_idx >= ipolicer->band_prof.max ||
4233 ipolicer->pfvf_map[prof_idx] != pcifunc)
4239 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
4240 struct nix_bandprof_alloc_req *req,
4241 struct nix_bandprof_alloc_rsp *rsp)
4243 int blkaddr, layer, prof, idx, err;
4244 u16 pcifunc = req->hdr.pcifunc;
4245 struct nix_ipolicer *ipolicer;
4246 struct nix_hw *nix_hw;
4248 if (!rvu->hw->cap.ipolicer)
4249 return NIX_AF_ERR_IPOLICER_NOTSUPP;
4251 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4255 mutex_lock(&rvu->rsrc_lock);
4256 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4257 if (layer == BAND_PROF_INVAL_LAYER)
4259 if (!req->prof_count[layer])
4262 ipolicer = &nix_hw->ipolicer[layer];
4263 for (idx = 0; idx < req->prof_count[layer]; idx++) {
4264 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
4265 if (idx == MAX_BANDPROF_PER_PFFUNC)
4268 prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4271 rsp->prof_count[layer]++;
4272 rsp->prof_idx[layer][idx] = prof;
4273 ipolicer->pfvf_map[prof] = pcifunc;
4276 mutex_unlock(&rvu->rsrc_lock);
4280 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
4282 int blkaddr, layer, prof_idx, err;
4283 struct nix_ipolicer *ipolicer;
4284 struct nix_hw *nix_hw;
4286 if (!rvu->hw->cap.ipolicer)
4287 return NIX_AF_ERR_IPOLICER_NOTSUPP;
4289 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4293 mutex_lock(&rvu->rsrc_lock);
4294 /* Free all the profiles allocated to the PCIFUNC */
4295 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4296 if (layer == BAND_PROF_INVAL_LAYER)
4298 ipolicer = &nix_hw->ipolicer[layer];
4300 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
4301 if (ipolicer->pfvf_map[prof_idx] != pcifunc)
4304 /* Clear ratelimit aggregation, if any */
4305 if (layer == BAND_PROF_LEAF_LAYER &&
4306 ipolicer->match_id[prof_idx])
4307 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4309 ipolicer->pfvf_map[prof_idx] = 0x00;
4310 ipolicer->match_id[prof_idx] = 0;
4311 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4314 mutex_unlock(&rvu->rsrc_lock);
4318 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
4319 struct nix_bandprof_free_req *req,
4320 struct msg_rsp *rsp)
4322 int blkaddr, layer, prof_idx, idx, err;
4323 u16 pcifunc = req->hdr.pcifunc;
4324 struct nix_ipolicer *ipolicer;
4325 struct nix_hw *nix_hw;
4328 return nix_free_all_bandprof(rvu, pcifunc);
4330 if (!rvu->hw->cap.ipolicer)
4331 return NIX_AF_ERR_IPOLICER_NOTSUPP;
4333 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4337 mutex_lock(&rvu->rsrc_lock);
4338 /* Free the requested profile indices */
4339 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4340 if (layer == BAND_PROF_INVAL_LAYER)
4342 if (!req->prof_count[layer])
4345 ipolicer = &nix_hw->ipolicer[layer];
4346 for (idx = 0; idx < req->prof_count[layer]; idx++) {
4347 prof_idx = req->prof_idx[layer][idx];
4348 if (prof_idx >= ipolicer->band_prof.max ||
4349 ipolicer->pfvf_map[prof_idx] != pcifunc)
4352 /* Clear ratelimit aggregation, if any */
4353 if (layer == BAND_PROF_LEAF_LAYER &&
4354 ipolicer->match_id[prof_idx])
4355 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4357 ipolicer->pfvf_map[prof_idx] = 0x00;
4358 ipolicer->match_id[prof_idx] = 0;
4359 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4360 if (idx == MAX_BANDPROF_PER_PFFUNC)
4364 mutex_unlock(&rvu->rsrc_lock);
4368 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
4369 struct nix_cn10k_aq_enq_req *aq_req,
4370 struct nix_cn10k_aq_enq_rsp *aq_rsp,
4371 u16 pcifunc, u8 ctype, u32 qidx)
4373 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4374 aq_req->hdr.pcifunc = pcifunc;
4375 aq_req->ctype = ctype;
4376 aq_req->op = NIX_AQ_INSTOP_READ;
4377 aq_req->qidx = qidx;
4379 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4380 (struct nix_aq_enq_req *)aq_req,
4381 (struct nix_aq_enq_rsp *)aq_rsp);
4384 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
4385 struct nix_hw *nix_hw,
4386 struct nix_cn10k_aq_enq_req *aq_req,
4387 struct nix_cn10k_aq_enq_rsp *aq_rsp,
4388 u32 leaf_prof, u16 mid_prof)
4390 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4391 aq_req->hdr.pcifunc = 0x00;
4392 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
4393 aq_req->op = NIX_AQ_INSTOP_WRITE;
4394 aq_req->qidx = leaf_prof;
4396 aq_req->prof.band_prof_id = mid_prof;
4397 aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
4398 aq_req->prof.hl_en = 1;
4399 aq_req->prof_mask.hl_en = 1;
4401 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4402 (struct nix_aq_enq_req *)aq_req,
4403 (struct nix_aq_enq_rsp *)aq_rsp);
4406 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
4407 u16 rq_idx, u16 match_id)
4409 int leaf_prof, mid_prof, leaf_match;
4410 struct nix_cn10k_aq_enq_req aq_req;
4411 struct nix_cn10k_aq_enq_rsp aq_rsp;
4412 struct nix_ipolicer *ipolicer;
4413 struct nix_hw *nix_hw;
4414 int blkaddr, idx, rc;
4416 if (!rvu->hw->cap.ipolicer)
4419 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4423 /* Fetch the RQ's context to see if policing is enabled */
4424 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
4425 NIX_AQ_CTYPE_RQ, rq_idx);
4428 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
4429 __func__, rq_idx, pcifunc);
4433 if (!aq_rsp.rq.policer_ena)
4436 /* Get the bandwidth profile ID mapped to this RQ */
4437 leaf_prof = aq_rsp.rq.band_prof_id;
4439 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
4440 ipolicer->match_id[leaf_prof] = match_id;
4442 /* Check if any other leaf profile is marked with same match_id */
4443 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
4444 if (idx == leaf_prof)
4446 if (ipolicer->match_id[idx] != match_id)
4453 if (idx == ipolicer->band_prof.max)
4456 /* Fetch the matching profile's context to check if it's already
4457 * mapped to a mid level profile.
4459 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4460 NIX_AQ_CTYPE_BANDPROF, leaf_match);
4463 "%s: Failed to fetch context of leaf profile %d\n",
4464 __func__, leaf_match);
4468 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4469 if (aq_rsp.prof.hl_en) {
4470 /* Get Mid layer prof index and map leaf_prof index
4471 * also such that flows that are being steered
4472 * to different RQs and marked with same match_id
4473 * are rate limited in a aggregate fashion
4475 mid_prof = aq_rsp.prof.band_prof_id;
4476 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4478 leaf_prof, mid_prof);
4481 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4482 __func__, leaf_prof, mid_prof);
4486 mutex_lock(&rvu->rsrc_lock);
4487 ipolicer->ref_count[mid_prof]++;
4488 mutex_unlock(&rvu->rsrc_lock);
4492 /* Allocate a mid layer profile and
4493 * map both 'leaf_prof' and 'leaf_match' profiles to it.
4495 mutex_lock(&rvu->rsrc_lock);
4496 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4499 "%s: Unable to allocate mid layer profile\n", __func__);
4500 mutex_unlock(&rvu->rsrc_lock);
4503 mutex_unlock(&rvu->rsrc_lock);
4504 ipolicer->pfvf_map[mid_prof] = 0x00;
4505 ipolicer->ref_count[mid_prof] = 0;
4507 /* Initialize mid layer profile same as 'leaf_prof' */
4508 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4509 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4512 "%s: Failed to fetch context of leaf profile %d\n",
4513 __func__, leaf_prof);
4517 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4518 aq_req.hdr.pcifunc = 0x00;
4519 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
4520 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4521 aq_req.op = NIX_AQ_INSTOP_WRITE;
4522 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
4523 /* Clear higher layer enable bit in the mid profile, just in case */
4524 aq_req.prof.hl_en = 0;
4525 aq_req.prof_mask.hl_en = 1;
4527 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4528 (struct nix_aq_enq_req *)&aq_req, NULL);
4531 "%s: Failed to INIT context of mid layer profile %d\n",
4532 __func__, mid_prof);
4536 /* Map both leaf profiles to this mid layer profile */
4537 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4539 leaf_prof, mid_prof);
4542 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4543 __func__, leaf_prof, mid_prof);
4547 mutex_lock(&rvu->rsrc_lock);
4548 ipolicer->ref_count[mid_prof]++;
4549 mutex_unlock(&rvu->rsrc_lock);
4551 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4553 leaf_match, mid_prof);
4556 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4557 __func__, leaf_match, mid_prof);
4558 ipolicer->ref_count[mid_prof]--;
4562 mutex_lock(&rvu->rsrc_lock);
4563 ipolicer->ref_count[mid_prof]++;
4564 mutex_unlock(&rvu->rsrc_lock);
4570 /* Called with mutex rsrc_lock */
4571 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
4574 struct nix_cn10k_aq_enq_req aq_req;
4575 struct nix_cn10k_aq_enq_rsp aq_rsp;
4576 struct nix_ipolicer *ipolicer;
4580 mutex_unlock(&rvu->rsrc_lock);
4582 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4583 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4585 mutex_lock(&rvu->rsrc_lock);
4588 "%s: Failed to fetch context of leaf profile %d\n",
4589 __func__, leaf_prof);
4593 if (!aq_rsp.prof.hl_en)
4596 mid_prof = aq_rsp.prof.band_prof_id;
4597 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4598 ipolicer->ref_count[mid_prof]--;
4599 /* If ref_count is zero, free mid layer profile */
4600 if (!ipolicer->ref_count[mid_prof]) {
4601 ipolicer->pfvf_map[mid_prof] = 0x00;
4602 rvu_free_rsrc(&ipolicer->band_prof, mid_prof);