1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
19 #include "lmac_common.h"
21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
23 int type, int chan_id);
24 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
26 static int nix_setup_ipolicers(struct rvu *rvu,
27 struct nix_hw *nix_hw, int blkaddr);
28 static void nix_ipolicer_freemem(struct nix_hw *nix_hw);
29 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
30 struct nix_hw *nix_hw, u16 pcifunc);
31 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
32 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
59 enum nix_makr_fmt_indexes {
60 NIX_MARK_CFG_IP_DSCP_RED,
61 NIX_MARK_CFG_IP_DSCP_YELLOW,
62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
63 NIX_MARK_CFG_IP_ECN_RED,
64 NIX_MARK_CFG_IP_ECN_YELLOW,
65 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
66 NIX_MARK_CFG_VLAN_DEI_RED,
67 NIX_MARK_CFG_VLAN_DEI_YELLOW,
68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
72 /* For now considering MC resources needed for broadcast
73 * pkt replication only. i.e 256 HWVFs + 12 PFs.
75 #define MC_TBL_SIZE MC_TBL_SZ_512
76 #define MC_BUF_CNT MC_BUF_CNT_128
79 struct hlist_node node;
83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
87 /*If blkaddr is 0, return the first nix block address*/
89 return rvu->nix_blkaddr[blkaddr];
91 while (i + 1 < MAX_NIX_BLKS) {
92 if (rvu->nix_blkaddr[i] == blkaddr)
93 return rvu->nix_blkaddr[i + 1];
100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
102 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
106 if (!pfvf->nixlf || blkaddr < 0)
111 int rvu_get_nixlf_count(struct rvu *rvu)
113 int blkaddr = 0, max = 0;
114 struct rvu_block *block;
116 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
118 block = &rvu->hw->block[blkaddr];
119 max += block->lf.max;
120 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
127 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
128 struct rvu_hwinfo *hw = rvu->hw;
131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
132 if (!pfvf->nixlf || blkaddr < 0)
133 return NIX_AF_ERR_AF_LF_INVALID;
135 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
137 return NIX_AF_ERR_AF_LF_INVALID;
140 *nix_blkaddr = blkaddr;
145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
146 struct nix_hw **nix_hw, int *blkaddr)
148 struct rvu_pfvf *pfvf;
150 pfvf = rvu_get_pfvf(rvu, pcifunc);
151 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
152 if (!pfvf->nixlf || *blkaddr < 0)
153 return NIX_AF_ERR_AF_LF_INVALID;
155 *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
157 return NIX_AF_ERR_INVALID_NIXBLK;
161 static void nix_mce_list_init(struct nix_mce_list *list, int max)
163 INIT_HLIST_HEAD(&list->head);
168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
175 idx = mcast->next_free_mce;
176 mcast->next_free_mce += count;
180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
182 int nix_blkaddr = 0, i = 0;
183 struct rvu *rvu = hw->rvu;
185 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
186 while (nix_blkaddr) {
187 if (blkaddr == nix_blkaddr && hw->nix)
189 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
195 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
199 /*Sync all in flight RX packets to LLC/DRAM */
200 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
201 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
203 dev_err(rvu->dev, "NIX RX software sync failed\n");
206 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
207 int lvl, u16 pcifunc, u16 schq)
209 struct rvu_hwinfo *hw = rvu->hw;
210 struct nix_txsch *txsch;
211 struct nix_hw *nix_hw;
214 nix_hw = get_nix_hw(rvu->hw, blkaddr);
218 txsch = &nix_hw->txsch[lvl];
219 /* Check out of bounds */
220 if (schq >= txsch->schq.max)
223 mutex_lock(&rvu->rsrc_lock);
224 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
225 mutex_unlock(&rvu->rsrc_lock);
227 /* TLs aggegating traffic are shared across PF and VFs */
228 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
229 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
235 if (map_func != pcifunc)
241 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
243 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
244 struct mac_ops *mac_ops;
245 int pkind, pf, vf, lbkid;
249 pf = rvu_get_pf(pcifunc);
250 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
254 case NIX_INTF_TYPE_CGX:
255 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
256 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
258 pkind = rvu_npc_get_pkind(rvu, pf);
261 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
264 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
265 pfvf->tx_chan_base = pfvf->rx_chan_base;
266 pfvf->rx_chan_cnt = 1;
267 pfvf->tx_chan_cnt = 1;
268 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
269 rvu_npc_set_pkind(rvu, pkind, pfvf);
271 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
272 /* By default we enable pause frames */
273 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
274 mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
276 lmac_id, true, true);
278 case NIX_INTF_TYPE_LBK:
279 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
281 /* If NIX1 block is present on the silicon then NIXes are
282 * assigned alternatively for lbk interfaces. NIX0 should
283 * send packets on lbk link 1 channels and NIX1 should send
284 * on lbk link 0 channels for the communication between
288 if (rvu->hw->lbk_links > 1)
289 lbkid = vf & 0x1 ? 0 : 1;
291 /* Note that AF's VFs work in pairs and talk over consecutive
292 * loopback channels.Therefore if odd number of AF VFs are
293 * enabled then the last VF remains with no pair.
295 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
296 pfvf->tx_chan_base = vf & 0x1 ?
297 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
298 rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
299 pfvf->rx_chan_cnt = 1;
300 pfvf->tx_chan_cnt = 1;
301 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
307 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
308 * RVU PF/VF's MAC address.
310 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
311 pfvf->rx_chan_base, pfvf->mac_addr);
313 /* Add this PF_FUNC to bcast pkt replication list */
314 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
317 "Bcast list, failed to enable PF_FUNC 0x%x\n",
321 /* Install MCAM rule matching Ethernet broadcast mac address */
322 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
323 nixlf, pfvf->rx_chan_base);
325 pfvf->maxlen = NIC_HW_MIN_FRS;
326 pfvf->minlen = NIC_HW_MIN_FRS;
331 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
333 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
339 /* Remove this PF_FUNC from bcast pkt replication list */
340 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
343 "Bcast list, failed to disable PF_FUNC 0x%x\n",
347 /* Free and disable any MCAM entries used by this NIX LF */
348 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
350 /* Disable DMAC filters used */
351 rvu_cgx_disable_dmac_entries(rvu, pcifunc);
354 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
355 struct nix_bp_cfg_req *req,
358 u16 pcifunc = req->hdr.pcifunc;
359 struct rvu_pfvf *pfvf;
360 int blkaddr, pf, type;
364 pf = rvu_get_pf(pcifunc);
365 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
366 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
369 pfvf = rvu_get_pfvf(rvu, pcifunc);
370 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
372 chan_base = pfvf->rx_chan_base + req->chan_base;
373 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
374 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
375 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
381 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
382 int type, int chan_id)
384 int bpid, blkaddr, lmac_chan_cnt;
385 struct rvu_hwinfo *hw = rvu->hw;
386 u16 cgx_bpid_cnt, lbk_bpid_cnt;
387 struct rvu_pfvf *pfvf;
391 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
392 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
393 lmac_chan_cnt = cfg & 0xFF;
395 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
396 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
398 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
400 /* Backpressure IDs range division
401 * CGX channles are mapped to (0 - 191) BPIDs
402 * LBK channles are mapped to (192 - 255) BPIDs
403 * SDP channles are mapped to (256 - 511) BPIDs
405 * Lmac channles and bpids mapped as follows
406 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
407 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
408 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
411 case NIX_INTF_TYPE_CGX:
412 if ((req->chan_base + req->chan_cnt) > 15)
414 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
415 /* Assign bpid based on cgx, lmac and chan id */
416 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
417 (lmac_id * lmac_chan_cnt) + req->chan_base;
419 if (req->bpid_per_chan)
421 if (bpid > cgx_bpid_cnt)
425 case NIX_INTF_TYPE_LBK:
426 if ((req->chan_base + req->chan_cnt) > 63)
428 bpid = cgx_bpid_cnt + req->chan_base;
429 if (req->bpid_per_chan)
431 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
440 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
441 struct nix_bp_cfg_req *req,
442 struct nix_bp_cfg_rsp *rsp)
444 int blkaddr, pf, type, chan_id = 0;
445 u16 pcifunc = req->hdr.pcifunc;
446 struct rvu_pfvf *pfvf;
451 pf = rvu_get_pf(pcifunc);
452 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
454 /* Enable backpressure only for CGX mapped PFs and LBK interface */
455 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
458 pfvf = rvu_get_pfvf(rvu, pcifunc);
459 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
461 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
462 chan_base = pfvf->rx_chan_base + req->chan_base;
465 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
467 dev_warn(rvu->dev, "Fail to enable backpressure\n");
471 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
472 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
473 cfg | (bpid & 0xFF) | BIT_ULL(16));
475 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
478 for (chan = 0; chan < req->chan_cnt; chan++) {
479 /* Map channel and bpid assign to it */
480 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
482 if (req->bpid_per_chan)
485 rsp->chan_cnt = req->chan_cnt;
490 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
491 u64 format, bool v4, u64 *fidx)
493 struct nix_lso_format field = {0};
495 /* IP's Length field */
496 field.layer = NIX_TXLAYER_OL3;
497 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
498 field.offset = v4 ? 2 : 4;
499 field.sizem1 = 1; /* i.e 2 bytes */
500 field.alg = NIX_LSOALG_ADD_PAYLEN;
501 rvu_write64(rvu, blkaddr,
502 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
505 /* No ID field in IPv6 header */
510 field.layer = NIX_TXLAYER_OL3;
512 field.sizem1 = 1; /* i.e 2 bytes */
513 field.alg = NIX_LSOALG_ADD_SEGNUM;
514 rvu_write64(rvu, blkaddr,
515 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
519 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
520 u64 format, u64 *fidx)
522 struct nix_lso_format field = {0};
524 /* TCP's sequence number field */
525 field.layer = NIX_TXLAYER_OL4;
527 field.sizem1 = 3; /* i.e 4 bytes */
528 field.alg = NIX_LSOALG_ADD_OFFSET;
529 rvu_write64(rvu, blkaddr,
530 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
533 /* TCP's flags field */
534 field.layer = NIX_TXLAYER_OL4;
536 field.sizem1 = 1; /* 2 bytes */
537 field.alg = NIX_LSOALG_TCP_FLAGS;
538 rvu_write64(rvu, blkaddr,
539 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
543 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
545 u64 cfg, idx, fidx = 0;
547 /* Get max HW supported format indices */
548 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
549 nix_hw->lso.total = cfg;
552 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
553 /* For TSO, set first and middle segment flags to
554 * mask out PSH, RST & FIN flags in TCP packet
556 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
557 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
558 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
560 /* Setup default static LSO formats
562 * Configure format fields for TCPv4 segmentation offload
564 idx = NIX_LSO_FORMAT_IDX_TSOV4;
565 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
566 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
568 /* Set rest of the fields to NOP */
569 for (; fidx < 8; fidx++) {
570 rvu_write64(rvu, blkaddr,
571 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
573 nix_hw->lso.in_use++;
575 /* Configure format fields for TCPv6 segmentation offload */
576 idx = NIX_LSO_FORMAT_IDX_TSOV6;
578 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
579 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
581 /* Set rest of the fields to NOP */
582 for (; fidx < 8; fidx++) {
583 rvu_write64(rvu, blkaddr,
584 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
586 nix_hw->lso.in_use++;
589 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
591 kfree(pfvf->rq_bmap);
592 kfree(pfvf->sq_bmap);
593 kfree(pfvf->cq_bmap);
595 qmem_free(rvu->dev, pfvf->rq_ctx);
597 qmem_free(rvu->dev, pfvf->sq_ctx);
599 qmem_free(rvu->dev, pfvf->cq_ctx);
601 qmem_free(rvu->dev, pfvf->rss_ctx);
602 if (pfvf->nix_qints_ctx)
603 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
604 if (pfvf->cq_ints_ctx)
605 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
607 pfvf->rq_bmap = NULL;
608 pfvf->cq_bmap = NULL;
609 pfvf->sq_bmap = NULL;
613 pfvf->rss_ctx = NULL;
614 pfvf->nix_qints_ctx = NULL;
615 pfvf->cq_ints_ctx = NULL;
618 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
619 struct rvu_pfvf *pfvf, int nixlf,
620 int rss_sz, int rss_grps, int hwctx_size,
623 int err, grp, num_indices;
625 /* RSS is not requested for this NIXLF */
628 num_indices = rss_sz * rss_grps;
630 /* Alloc NIX RSS HW context memory and config the base */
631 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
635 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
636 (u64)pfvf->rss_ctx->iova);
638 /* Config full RSS table size, enable RSS and caching */
639 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
640 BIT_ULL(36) | BIT_ULL(4) |
641 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
643 /* Config RSS group offset and sizes */
644 for (grp = 0; grp < rss_grps; grp++)
645 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
646 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
650 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
651 struct nix_aq_inst_s *inst)
653 struct admin_queue *aq = block->aq;
654 struct nix_aq_res_s *result;
658 result = (struct nix_aq_res_s *)aq->res->base;
660 /* Get current head pointer where to append this instruction */
661 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
662 head = (reg >> 4) & AQ_PTR_MASK;
664 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
665 (void *)inst, aq->inst->entry_sz);
666 memset(result, 0, sizeof(*result));
667 /* sync into memory */
670 /* Ring the doorbell and wait for result */
671 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
672 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
680 if (result->compcode != NIX_AQ_COMP_GOOD)
681 /* TODO: Replace this with some error code */
687 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
688 struct nix_aq_enq_req *req,
689 struct nix_aq_enq_rsp *rsp)
691 struct rvu_hwinfo *hw = rvu->hw;
692 u16 pcifunc = req->hdr.pcifunc;
693 int nixlf, blkaddr, rc = 0;
694 struct nix_aq_inst_s inst;
695 struct rvu_block *block;
696 struct admin_queue *aq;
697 struct rvu_pfvf *pfvf;
702 blkaddr = nix_hw->blkaddr;
703 block = &hw->block[blkaddr];
706 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
707 return NIX_AF_ERR_AQ_ENQUEUE;
710 pfvf = rvu_get_pfvf(rvu, pcifunc);
711 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
713 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
714 * operations done by AF itself.
716 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
717 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
718 if (!pfvf->nixlf || nixlf < 0)
719 return NIX_AF_ERR_AF_LF_INVALID;
722 switch (req->ctype) {
723 case NIX_AQ_CTYPE_RQ:
724 /* Check if index exceeds max no of queues */
725 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
726 rc = NIX_AF_ERR_AQ_ENQUEUE;
728 case NIX_AQ_CTYPE_SQ:
729 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
730 rc = NIX_AF_ERR_AQ_ENQUEUE;
732 case NIX_AQ_CTYPE_CQ:
733 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
734 rc = NIX_AF_ERR_AQ_ENQUEUE;
736 case NIX_AQ_CTYPE_RSS:
737 /* Check if RSS is enabled and qidx is within range */
738 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
739 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
740 (req->qidx >= (256UL << (cfg & 0xF))))
741 rc = NIX_AF_ERR_AQ_ENQUEUE;
743 case NIX_AQ_CTYPE_MCE:
744 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
746 /* Check if index exceeds MCE list length */
747 if (!nix_hw->mcast.mce_ctx ||
748 (req->qidx >= (256UL << (cfg & 0xF))))
749 rc = NIX_AF_ERR_AQ_ENQUEUE;
751 /* Adding multicast lists for requests from PF/VFs is not
752 * yet supported, so ignore this.
755 rc = NIX_AF_ERR_AQ_ENQUEUE;
757 case NIX_AQ_CTYPE_BANDPROF:
758 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
760 rc = NIX_AF_ERR_INVALID_BANDPROF;
763 rc = NIX_AF_ERR_AQ_ENQUEUE;
769 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
770 if (req->ctype == NIX_AQ_CTYPE_SQ &&
771 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
772 (req->op == NIX_AQ_INSTOP_WRITE &&
773 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
774 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
775 pcifunc, req->sq.smq))
776 return NIX_AF_ERR_AQ_ENQUEUE;
779 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
781 inst.cindex = req->qidx;
782 inst.ctype = req->ctype;
784 /* Currently we are not supporting enqueuing multiple instructions,
785 * so always choose first entry in result memory.
787 inst.res_addr = (u64)aq->res->iova;
789 /* Hardware uses same aq->res->base for updating result of
790 * previous instruction hence wait here till it is done.
792 spin_lock(&aq->lock);
794 /* Clean result + context memory */
795 memset(aq->res->base, 0, aq->res->entry_sz);
796 /* Context needs to be written at RES_ADDR + 128 */
797 ctx = aq->res->base + 128;
798 /* Mask needs to be written at RES_ADDR + 256 */
799 mask = aq->res->base + 256;
802 case NIX_AQ_INSTOP_WRITE:
803 if (req->ctype == NIX_AQ_CTYPE_RQ)
804 memcpy(mask, &req->rq_mask,
805 sizeof(struct nix_rq_ctx_s));
806 else if (req->ctype == NIX_AQ_CTYPE_SQ)
807 memcpy(mask, &req->sq_mask,
808 sizeof(struct nix_sq_ctx_s));
809 else if (req->ctype == NIX_AQ_CTYPE_CQ)
810 memcpy(mask, &req->cq_mask,
811 sizeof(struct nix_cq_ctx_s));
812 else if (req->ctype == NIX_AQ_CTYPE_RSS)
813 memcpy(mask, &req->rss_mask,
814 sizeof(struct nix_rsse_s));
815 else if (req->ctype == NIX_AQ_CTYPE_MCE)
816 memcpy(mask, &req->mce_mask,
817 sizeof(struct nix_rx_mce_s));
818 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
819 memcpy(mask, &req->prof_mask,
820 sizeof(struct nix_bandprof_s));
822 case NIX_AQ_INSTOP_INIT:
823 if (req->ctype == NIX_AQ_CTYPE_RQ)
824 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
825 else if (req->ctype == NIX_AQ_CTYPE_SQ)
826 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
827 else if (req->ctype == NIX_AQ_CTYPE_CQ)
828 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
829 else if (req->ctype == NIX_AQ_CTYPE_RSS)
830 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
831 else if (req->ctype == NIX_AQ_CTYPE_MCE)
832 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
833 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
834 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
836 case NIX_AQ_INSTOP_NOP:
837 case NIX_AQ_INSTOP_READ:
838 case NIX_AQ_INSTOP_LOCK:
839 case NIX_AQ_INSTOP_UNLOCK:
842 rc = NIX_AF_ERR_AQ_ENQUEUE;
843 spin_unlock(&aq->lock);
847 /* Submit the instruction to AQ */
848 rc = nix_aq_enqueue_wait(rvu, block, &inst);
850 spin_unlock(&aq->lock);
854 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
855 if (req->op == NIX_AQ_INSTOP_INIT) {
856 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
857 __set_bit(req->qidx, pfvf->rq_bmap);
858 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
859 __set_bit(req->qidx, pfvf->sq_bmap);
860 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
861 __set_bit(req->qidx, pfvf->cq_bmap);
864 if (req->op == NIX_AQ_INSTOP_WRITE) {
865 if (req->ctype == NIX_AQ_CTYPE_RQ) {
866 ena = (req->rq.ena & req->rq_mask.ena) |
867 (test_bit(req->qidx, pfvf->rq_bmap) &
870 __set_bit(req->qidx, pfvf->rq_bmap);
872 __clear_bit(req->qidx, pfvf->rq_bmap);
874 if (req->ctype == NIX_AQ_CTYPE_SQ) {
875 ena = (req->rq.ena & req->sq_mask.ena) |
876 (test_bit(req->qidx, pfvf->sq_bmap) &
879 __set_bit(req->qidx, pfvf->sq_bmap);
881 __clear_bit(req->qidx, pfvf->sq_bmap);
883 if (req->ctype == NIX_AQ_CTYPE_CQ) {
884 ena = (req->rq.ena & req->cq_mask.ena) |
885 (test_bit(req->qidx, pfvf->cq_bmap) &
888 __set_bit(req->qidx, pfvf->cq_bmap);
890 __clear_bit(req->qidx, pfvf->cq_bmap);
895 /* Copy read context into mailbox */
896 if (req->op == NIX_AQ_INSTOP_READ) {
897 if (req->ctype == NIX_AQ_CTYPE_RQ)
898 memcpy(&rsp->rq, ctx,
899 sizeof(struct nix_rq_ctx_s));
900 else if (req->ctype == NIX_AQ_CTYPE_SQ)
901 memcpy(&rsp->sq, ctx,
902 sizeof(struct nix_sq_ctx_s));
903 else if (req->ctype == NIX_AQ_CTYPE_CQ)
904 memcpy(&rsp->cq, ctx,
905 sizeof(struct nix_cq_ctx_s));
906 else if (req->ctype == NIX_AQ_CTYPE_RSS)
907 memcpy(&rsp->rss, ctx,
908 sizeof(struct nix_rsse_s));
909 else if (req->ctype == NIX_AQ_CTYPE_MCE)
910 memcpy(&rsp->mce, ctx,
911 sizeof(struct nix_rx_mce_s));
912 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
913 memcpy(&rsp->prof, ctx,
914 sizeof(struct nix_bandprof_s));
918 spin_unlock(&aq->lock);
922 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
923 struct nix_aq_enq_rsp *rsp)
925 struct nix_hw *nix_hw;
928 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
930 return NIX_AF_ERR_AF_LF_INVALID;
932 nix_hw = get_nix_hw(rvu->hw, blkaddr);
936 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
939 static const char *nix_get_ctx_name(int ctype)
942 case NIX_AQ_CTYPE_CQ:
944 case NIX_AQ_CTYPE_SQ:
946 case NIX_AQ_CTYPE_RQ:
948 case NIX_AQ_CTYPE_RSS:
954 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
956 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
957 struct nix_aq_enq_req aq_req;
962 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
963 return NIX_AF_ERR_AQ_ENQUEUE;
965 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
966 aq_req.hdr.pcifunc = req->hdr.pcifunc;
968 if (req->ctype == NIX_AQ_CTYPE_CQ) {
970 aq_req.cq_mask.ena = 1;
971 aq_req.cq.bp_ena = 0;
972 aq_req.cq_mask.bp_ena = 1;
973 q_cnt = pfvf->cq_ctx->qsize;
974 bmap = pfvf->cq_bmap;
976 if (req->ctype == NIX_AQ_CTYPE_SQ) {
978 aq_req.sq_mask.ena = 1;
979 q_cnt = pfvf->sq_ctx->qsize;
980 bmap = pfvf->sq_bmap;
982 if (req->ctype == NIX_AQ_CTYPE_RQ) {
984 aq_req.rq_mask.ena = 1;
985 q_cnt = pfvf->rq_ctx->qsize;
986 bmap = pfvf->rq_bmap;
989 aq_req.ctype = req->ctype;
990 aq_req.op = NIX_AQ_INSTOP_WRITE;
992 for (qidx = 0; qidx < q_cnt; qidx++) {
993 if (!test_bit(qidx, bmap))
996 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
999 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1000 nix_get_ctx_name(req->ctype), qidx);
1007 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1008 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1010 struct nix_aq_enq_req lock_ctx_req;
1013 if (req->op != NIX_AQ_INSTOP_INIT)
1016 if (req->ctype == NIX_AQ_CTYPE_MCE ||
1017 req->ctype == NIX_AQ_CTYPE_DYNO)
1020 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1021 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1022 lock_ctx_req.ctype = req->ctype;
1023 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1024 lock_ctx_req.qidx = req->qidx;
1025 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1028 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1030 nix_get_ctx_name(req->ctype), req->qidx);
1034 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1035 struct nix_aq_enq_req *req,
1036 struct nix_aq_enq_rsp *rsp)
1040 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1042 err = nix_lf_hwctx_lockdown(rvu, req);
1047 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1048 struct nix_aq_enq_req *req,
1049 struct nix_aq_enq_rsp *rsp)
1051 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1054 /* CN10K mbox handler */
1055 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1056 struct nix_cn10k_aq_enq_req *req,
1057 struct nix_cn10k_aq_enq_rsp *rsp)
1059 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1060 (struct nix_aq_enq_rsp *)rsp);
1063 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1064 struct hwctx_disable_req *req,
1065 struct msg_rsp *rsp)
1067 return nix_lf_hwctx_disable(rvu, req);
1070 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1071 struct nix_lf_alloc_req *req,
1072 struct nix_lf_alloc_rsp *rsp)
1074 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1075 struct rvu_hwinfo *hw = rvu->hw;
1076 u16 pcifunc = req->hdr.pcifunc;
1077 struct rvu_block *block;
1078 struct rvu_pfvf *pfvf;
1082 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1083 return NIX_AF_ERR_PARAM;
1086 req->way_mask &= 0xFFFF;
1088 pfvf = rvu_get_pfvf(rvu, pcifunc);
1089 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1090 if (!pfvf->nixlf || blkaddr < 0)
1091 return NIX_AF_ERR_AF_LF_INVALID;
1093 block = &hw->block[blkaddr];
1094 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1096 return NIX_AF_ERR_AF_LF_INVALID;
1098 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1099 if (req->npa_func) {
1100 /* If default, use 'this' NIXLF's PFFUNC */
1101 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1102 req->npa_func = pcifunc;
1103 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1104 return NIX_AF_INVAL_NPA_PF_FUNC;
1107 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1108 if (req->sso_func) {
1109 /* If default, use 'this' NIXLF's PFFUNC */
1110 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1111 req->sso_func = pcifunc;
1112 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1113 return NIX_AF_INVAL_SSO_PF_FUNC;
1116 /* If RSS is being enabled, check if requested config is valid.
1117 * RSS table size should be power of two, otherwise
1118 * RSS_GRP::OFFSET + adder might go beyond that group or
1119 * won't be able to use entire table.
1121 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1122 !is_power_of_2(req->rss_sz)))
1123 return NIX_AF_ERR_RSS_SIZE_INVALID;
1126 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1127 return NIX_AF_ERR_RSS_GRPS_INVALID;
1129 /* Reset this NIX LF */
1130 err = rvu_lf_reset(rvu, block, nixlf);
1132 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1133 block->addr - BLKADDR_NIX0, nixlf);
1134 return NIX_AF_ERR_LF_RESET;
1137 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1139 /* Alloc NIX RQ HW context memory and config the base */
1140 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1141 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1145 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1149 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1150 (u64)pfvf->rq_ctx->iova);
1152 /* Set caching and queue count in HW */
1153 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1154 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1156 /* Alloc NIX SQ HW context memory and config the base */
1157 hwctx_size = 1UL << (ctx_cfg & 0xF);
1158 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1162 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1166 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1167 (u64)pfvf->sq_ctx->iova);
1169 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1170 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1172 /* Alloc NIX CQ HW context memory and config the base */
1173 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1174 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1178 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1182 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1183 (u64)pfvf->cq_ctx->iova);
1185 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1186 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1188 /* Initialize receive side scaling (RSS) */
1189 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1190 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1191 req->rss_grps, hwctx_size, req->way_mask);
1195 /* Alloc memory for CQINT's HW contexts */
1196 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1197 qints = (cfg >> 24) & 0xFFF;
1198 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1199 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1203 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1204 (u64)pfvf->cq_ints_ctx->iova);
1206 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1207 BIT_ULL(36) | req->way_mask << 20);
1209 /* Alloc memory for QINT's HW contexts */
1210 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1211 qints = (cfg >> 12) & 0xFFF;
1212 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1213 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1217 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1218 (u64)pfvf->nix_qints_ctx->iova);
1219 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1220 BIT_ULL(36) | req->way_mask << 20);
1222 /* Setup VLANX TPID's.
1223 * Use VLAN1 for 802.1Q
1224 * and VLAN0 for 802.1AD.
1226 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1227 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1229 /* Enable LMTST for this NIX LF */
1230 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1232 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1234 cfg = req->npa_func;
1236 cfg |= (u64)req->sso_func << 16;
1238 cfg |= (u64)req->xqe_sz << 33;
1239 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1241 /* Config Rx pkt length, csum checks and apad enable / disable */
1242 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1244 /* Configure pkind for TX parse config */
1245 cfg = NPC_TX_DEF_PKIND;
1246 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1248 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1249 err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1253 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1254 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1256 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1257 rvu_write64(rvu, blkaddr,
1258 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1259 VTAGSIZE_T4 | VTAG_STRIP);
1264 nix_ctx_free(rvu, pfvf);
1268 /* Set macaddr of this PF/VF */
1269 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1271 /* set SQB size info */
1272 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1273 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1274 rsp->rx_chan_base = pfvf->rx_chan_base;
1275 rsp->tx_chan_base = pfvf->tx_chan_base;
1276 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1277 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1278 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1279 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1280 /* Get HW supported stat count */
1281 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1282 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1283 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1284 /* Get count of CQ IRQs and error IRQs supported per LF */
1285 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1286 rsp->qints = ((cfg >> 12) & 0xFFF);
1287 rsp->cints = ((cfg >> 24) & 0xFFF);
1288 rsp->cgx_links = hw->cgx_links;
1289 rsp->lbk_links = hw->lbk_links;
1290 rsp->sdp_links = hw->sdp_links;
1295 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1296 struct msg_rsp *rsp)
1298 struct rvu_hwinfo *hw = rvu->hw;
1299 u16 pcifunc = req->hdr.pcifunc;
1300 struct rvu_block *block;
1301 int blkaddr, nixlf, err;
1302 struct rvu_pfvf *pfvf;
1304 pfvf = rvu_get_pfvf(rvu, pcifunc);
1305 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1306 if (!pfvf->nixlf || blkaddr < 0)
1307 return NIX_AF_ERR_AF_LF_INVALID;
1309 block = &hw->block[blkaddr];
1310 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1312 return NIX_AF_ERR_AF_LF_INVALID;
1314 if (req->flags & NIX_LF_DISABLE_FLOWS)
1315 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1317 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1319 /* Free any tx vtag def entries used by this NIX LF */
1320 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1321 nix_free_tx_vtag_entries(rvu, pcifunc);
1323 nix_interface_deinit(rvu, pcifunc, nixlf);
1325 /* Reset this NIX LF */
1326 err = rvu_lf_reset(rvu, block, nixlf);
1328 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1329 block->addr - BLKADDR_NIX0, nixlf);
1330 return NIX_AF_ERR_LF_RESET;
1333 nix_ctx_free(rvu, pfvf);
1338 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1339 struct nix_mark_format_cfg *req,
1340 struct nix_mark_format_cfg_rsp *rsp)
1342 u16 pcifunc = req->hdr.pcifunc;
1343 struct nix_hw *nix_hw;
1344 struct rvu_pfvf *pfvf;
1348 pfvf = rvu_get_pfvf(rvu, pcifunc);
1349 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1350 if (!pfvf->nixlf || blkaddr < 0)
1351 return NIX_AF_ERR_AF_LF_INVALID;
1353 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1357 cfg = (((u32)req->offset & 0x7) << 16) |
1358 (((u32)req->y_mask & 0xF) << 12) |
1359 (((u32)req->y_val & 0xF) << 8) |
1360 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1362 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1364 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1365 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1366 return NIX_AF_ERR_MARK_CFG_FAIL;
1369 rsp->mark_format_idx = rc;
1373 /* Disable shaping of pkts by a scheduler queue
1374 * at a given scheduler level.
1376 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1379 u64 cir_reg = 0, pir_reg = 0;
1383 case NIX_TXSCH_LVL_TL1:
1384 cir_reg = NIX_AF_TL1X_CIR(schq);
1385 pir_reg = 0; /* PIR not available at TL1 */
1387 case NIX_TXSCH_LVL_TL2:
1388 cir_reg = NIX_AF_TL2X_CIR(schq);
1389 pir_reg = NIX_AF_TL2X_PIR(schq);
1391 case NIX_TXSCH_LVL_TL3:
1392 cir_reg = NIX_AF_TL3X_CIR(schq);
1393 pir_reg = NIX_AF_TL3X_PIR(schq);
1395 case NIX_TXSCH_LVL_TL4:
1396 cir_reg = NIX_AF_TL4X_CIR(schq);
1397 pir_reg = NIX_AF_TL4X_PIR(schq);
1403 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1404 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1408 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1409 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1412 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1415 struct rvu_hwinfo *hw = rvu->hw;
1418 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1421 /* Reset TL4's SDP link config */
1422 if (lvl == NIX_TXSCH_LVL_TL4)
1423 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1425 if (lvl != NIX_TXSCH_LVL_TL2)
1428 /* Reset TL2's CGX or LBK link config */
1429 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1430 rvu_write64(rvu, blkaddr,
1431 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1434 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1436 struct rvu_hwinfo *hw = rvu->hw;
1437 int pf = rvu_get_pf(pcifunc);
1438 u8 cgx_id = 0, lmac_id = 0;
1440 if (is_afvf(pcifunc)) {/* LBK links */
1441 return hw->cgx_links;
1442 } else if (is_pf_cgxmapped(rvu, pf)) {
1443 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1444 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1448 return hw->cgx_links + hw->lbk_links;
1451 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1452 int link, int *start, int *end)
1454 struct rvu_hwinfo *hw = rvu->hw;
1455 int pf = rvu_get_pf(pcifunc);
1457 if (is_afvf(pcifunc)) { /* LBK links */
1458 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1459 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1460 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1461 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1462 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1463 } else { /* SDP link */
1464 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1465 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1466 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1470 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1471 struct nix_hw *nix_hw,
1472 struct nix_txsch_alloc_req *req)
1474 struct rvu_hwinfo *hw = rvu->hw;
1475 int schq, req_schq, free_cnt;
1476 struct nix_txsch *txsch;
1477 int link, start, end;
1479 txsch = &nix_hw->txsch[lvl];
1480 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1485 link = nix_get_tx_link(rvu, pcifunc);
1487 /* For traffic aggregating scheduler level, one queue is enough */
1488 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1490 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1494 /* Get free SCHQ count and check if request can be accomodated */
1495 if (hw->cap.nix_fixed_txschq_mapping) {
1496 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1497 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1498 if (end <= txsch->schq.max && schq < end &&
1499 !test_bit(schq, txsch->schq.bmap))
1504 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1507 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1508 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1510 /* If contiguous queues are needed, check for availability */
1511 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1512 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1513 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1518 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1519 struct nix_txsch_alloc_rsp *rsp,
1520 int lvl, int start, int end)
1522 struct rvu_hwinfo *hw = rvu->hw;
1523 u16 pcifunc = rsp->hdr.pcifunc;
1526 /* For traffic aggregating levels, queue alloc is based
1527 * on transmit link to which PF_FUNC is mapped to.
1529 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1530 /* A single TL queue is allocated */
1531 if (rsp->schq_contig[lvl]) {
1532 rsp->schq_contig[lvl] = 1;
1533 rsp->schq_contig_list[lvl][0] = start;
1536 /* Both contig and non-contig reqs doesn't make sense here */
1537 if (rsp->schq_contig[lvl])
1540 if (rsp->schq[lvl]) {
1542 rsp->schq_list[lvl][0] = start;
1547 /* Adjust the queue request count if HW supports
1548 * only one queue per level configuration.
1550 if (hw->cap.nix_fixed_txschq_mapping) {
1551 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1553 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1554 rsp->schq_contig[lvl] = 0;
1559 if (rsp->schq_contig[lvl]) {
1560 rsp->schq_contig[lvl] = 1;
1561 set_bit(schq, txsch->schq.bmap);
1562 rsp->schq_contig_list[lvl][0] = schq;
1564 } else if (rsp->schq[lvl]) {
1566 set_bit(schq, txsch->schq.bmap);
1567 rsp->schq_list[lvl][0] = schq;
1572 /* Allocate contiguous queue indices requesty first */
1573 if (rsp->schq_contig[lvl]) {
1574 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1575 txsch->schq.max, start,
1576 rsp->schq_contig[lvl], 0);
1578 rsp->schq_contig[lvl] = 0;
1579 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1580 set_bit(schq, txsch->schq.bmap);
1581 rsp->schq_contig_list[lvl][idx] = schq;
1586 /* Allocate non-contiguous queue indices */
1587 if (rsp->schq[lvl]) {
1589 for (schq = start; schq < end; schq++) {
1590 if (!test_bit(schq, txsch->schq.bmap)) {
1591 set_bit(schq, txsch->schq.bmap);
1592 rsp->schq_list[lvl][idx++] = schq;
1594 if (idx == rsp->schq[lvl])
1597 /* Update how many were allocated */
1598 rsp->schq[lvl] = idx;
1602 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1603 struct nix_txsch_alloc_req *req,
1604 struct nix_txsch_alloc_rsp *rsp)
1606 struct rvu_hwinfo *hw = rvu->hw;
1607 u16 pcifunc = req->hdr.pcifunc;
1608 int link, blkaddr, rc = 0;
1609 int lvl, idx, start, end;
1610 struct nix_txsch *txsch;
1611 struct rvu_pfvf *pfvf;
1612 struct nix_hw *nix_hw;
1616 pfvf = rvu_get_pfvf(rvu, pcifunc);
1617 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1618 if (!pfvf->nixlf || blkaddr < 0)
1619 return NIX_AF_ERR_AF_LF_INVALID;
1621 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1625 mutex_lock(&rvu->rsrc_lock);
1627 /* Check if request is valid as per HW capabilities
1628 * and can be accomodated.
1630 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1631 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1636 /* Allocate requested Tx scheduler queues */
1637 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1638 txsch = &nix_hw->txsch[lvl];
1639 pfvf_map = txsch->pfvf_map;
1641 if (!req->schq[lvl] && !req->schq_contig[lvl])
1644 rsp->schq[lvl] = req->schq[lvl];
1645 rsp->schq_contig[lvl] = req->schq_contig[lvl];
1647 link = nix_get_tx_link(rvu, pcifunc);
1649 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1652 } else if (hw->cap.nix_fixed_txschq_mapping) {
1653 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1656 end = txsch->schq.max;
1659 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1661 /* Reset queue config */
1662 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1663 schq = rsp->schq_contig_list[lvl][idx];
1664 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1665 NIX_TXSCHQ_CFG_DONE))
1666 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1667 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1668 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1671 for (idx = 0; idx < req->schq[lvl]; idx++) {
1672 schq = rsp->schq_list[lvl][idx];
1673 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1674 NIX_TXSCHQ_CFG_DONE))
1675 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1676 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1677 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1681 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1682 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1683 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1684 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1685 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1688 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1690 mutex_unlock(&rvu->rsrc_lock);
1694 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1695 int smq, u16 pcifunc, int nixlf)
1697 int pf = rvu_get_pf(pcifunc);
1698 u8 cgx_id = 0, lmac_id = 0;
1699 int err, restore_tx_en = 0;
1702 /* enable cgx tx if disabled */
1703 if (is_pf_cgxmapped(rvu, pf)) {
1704 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1705 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1709 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1710 /* Do SMQ flush and set enqueue xoff */
1711 cfg |= BIT_ULL(50) | BIT_ULL(49);
1712 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1714 /* Disable backpressure from physical link,
1715 * otherwise SMQ flush may stall.
1717 rvu_cgx_enadis_rx_bp(rvu, pf, false);
1719 /* Wait for flush to complete */
1720 err = rvu_poll_reg(rvu, blkaddr,
1721 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1724 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1726 rvu_cgx_enadis_rx_bp(rvu, pf, true);
1727 /* restore cgx tx state */
1729 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1732 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1734 int blkaddr, nixlf, lvl, schq, err;
1735 struct rvu_hwinfo *hw = rvu->hw;
1736 struct nix_txsch *txsch;
1737 struct nix_hw *nix_hw;
1739 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1741 return NIX_AF_ERR_AF_LF_INVALID;
1743 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1747 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1749 return NIX_AF_ERR_AF_LF_INVALID;
1751 /* Disable TL2/3 queue links before SMQ flush*/
1752 mutex_lock(&rvu->rsrc_lock);
1753 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1754 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1757 txsch = &nix_hw->txsch[lvl];
1758 for (schq = 0; schq < txsch->schq.max; schq++) {
1759 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1761 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1766 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1767 for (schq = 0; schq < txsch->schq.max; schq++) {
1768 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1770 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1773 /* Now free scheduler queues to free pool */
1774 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1775 /* TLs above aggregation level are shared across all PF
1776 * and it's VFs, hence skip freeing them.
1778 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1781 txsch = &nix_hw->txsch[lvl];
1782 for (schq = 0; schq < txsch->schq.max; schq++) {
1783 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1785 rvu_free_rsrc(&txsch->schq, schq);
1786 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1789 mutex_unlock(&rvu->rsrc_lock);
1791 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1792 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1793 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1795 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1800 static int nix_txschq_free_one(struct rvu *rvu,
1801 struct nix_txsch_free_req *req)
1803 struct rvu_hwinfo *hw = rvu->hw;
1804 u16 pcifunc = req->hdr.pcifunc;
1805 int lvl, schq, nixlf, blkaddr;
1806 struct nix_txsch *txsch;
1807 struct nix_hw *nix_hw;
1810 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1812 return NIX_AF_ERR_AF_LF_INVALID;
1814 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1818 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1820 return NIX_AF_ERR_AF_LF_INVALID;
1822 lvl = req->schq_lvl;
1824 txsch = &nix_hw->txsch[lvl];
1826 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1829 pfvf_map = txsch->pfvf_map;
1830 mutex_lock(&rvu->rsrc_lock);
1832 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1833 mutex_unlock(&rvu->rsrc_lock);
1837 /* Flush if it is a SMQ. Onus of disabling
1838 * TL2/3 queue links before SMQ flush is on user
1840 if (lvl == NIX_TXSCH_LVL_SMQ)
1841 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1843 /* Free the resource */
1844 rvu_free_rsrc(&txsch->schq, schq);
1845 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1846 mutex_unlock(&rvu->rsrc_lock);
1849 return NIX_AF_ERR_TLX_INVALID;
1852 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1853 struct nix_txsch_free_req *req,
1854 struct msg_rsp *rsp)
1856 if (req->flags & TXSCHQ_FREE_ALL)
1857 return nix_txschq_free(rvu, req->hdr.pcifunc);
1859 return nix_txschq_free_one(rvu, req);
1862 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1863 int lvl, u64 reg, u64 regval)
1865 u64 regbase = reg & 0xFFFF;
1868 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1871 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1872 /* Check if this schq belongs to this PF/VF or not */
1873 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1876 parent = (regval >> 16) & 0x1FF;
1877 /* Validate MDQ's TL4 parent */
1878 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1879 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1882 /* Validate TL4's TL3 parent */
1883 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1884 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1887 /* Validate TL3's TL2 parent */
1888 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1889 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1892 /* Validate TL2's TL1 parent */
1893 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1894 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1900 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1904 if (hw->cap.nix_shaping)
1907 /* If shaping and coloring is not supported, then
1908 * *_CIR and *_PIR registers should not be configured.
1910 regbase = reg & 0xFFFF;
1913 case NIX_TXSCH_LVL_TL1:
1914 if (regbase == NIX_AF_TL1X_CIR(0))
1917 case NIX_TXSCH_LVL_TL2:
1918 if (regbase == NIX_AF_TL2X_CIR(0) ||
1919 regbase == NIX_AF_TL2X_PIR(0))
1922 case NIX_TXSCH_LVL_TL3:
1923 if (regbase == NIX_AF_TL3X_CIR(0) ||
1924 regbase == NIX_AF_TL3X_PIR(0))
1927 case NIX_TXSCH_LVL_TL4:
1928 if (regbase == NIX_AF_TL4X_CIR(0) ||
1929 regbase == NIX_AF_TL4X_PIR(0))
1936 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1937 u16 pcifunc, int blkaddr)
1942 schq = nix_get_tx_link(rvu, pcifunc);
1943 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1944 /* Skip if PF has already done the config */
1945 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1947 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1948 (TXSCH_TL1_DFLT_RR_PRIO << 1));
1949 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1950 TXSCH_TL1_DFLT_RR_QTM);
1951 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1952 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1955 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1956 struct nix_txschq_config *req,
1957 struct msg_rsp *rsp)
1959 struct rvu_hwinfo *hw = rvu->hw;
1960 u16 pcifunc = req->hdr.pcifunc;
1961 u64 reg, regval, schq_regbase;
1962 struct nix_txsch *txsch;
1963 struct nix_hw *nix_hw;
1964 int blkaddr, idx, err;
1968 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1969 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1970 return NIX_AF_INVAL_TXSCHQ_CFG;
1972 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1976 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1980 txsch = &nix_hw->txsch[req->lvl];
1981 pfvf_map = txsch->pfvf_map;
1983 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1984 pcifunc & RVU_PFVF_FUNC_MASK) {
1985 mutex_lock(&rvu->rsrc_lock);
1986 if (req->lvl == NIX_TXSCH_LVL_TL1)
1987 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1988 mutex_unlock(&rvu->rsrc_lock);
1992 for (idx = 0; idx < req->num_regs; idx++) {
1993 reg = req->reg[idx];
1994 regval = req->regval[idx];
1995 schq_regbase = reg & 0xFFFF;
1997 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1998 txsch->lvl, reg, regval))
1999 return NIX_AF_INVAL_TXSCHQ_CFG;
2001 /* Check if shaping and coloring is supported */
2002 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2005 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2006 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2007 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2009 regval &= ~(0x7FULL << 24);
2010 regval |= ((u64)nixlf << 24);
2013 /* Clear 'BP_ENA' config, if it's not allowed */
2014 if (!hw->cap.nix_tx_link_bp) {
2015 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2016 (schq_regbase & 0xFF00) ==
2017 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2018 regval &= ~BIT_ULL(13);
2021 /* Mark config as done for TL1 by PF */
2022 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2023 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2024 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2025 mutex_lock(&rvu->rsrc_lock);
2026 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2027 NIX_TXSCHQ_CFG_DONE);
2028 mutex_unlock(&rvu->rsrc_lock);
2031 /* SMQ flush is special hence split register writes such
2032 * that flush first and write rest of the bits later.
2034 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2035 (regval & BIT_ULL(49))) {
2036 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2037 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2038 regval &= ~BIT_ULL(49);
2040 rvu_write64(rvu, blkaddr, reg, regval);
2046 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2047 struct nix_vtag_config *req)
2049 u64 regval = req->vtag_size;
2051 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2052 req->vtag_size > VTAGSIZE_T8)
2055 /* RX VTAG Type 7 reserved for vf vlan */
2056 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2057 return NIX_AF_ERR_RX_VTAG_INUSE;
2059 if (req->rx.capture_vtag)
2060 regval |= BIT_ULL(5);
2061 if (req->rx.strip_vtag)
2062 regval |= BIT_ULL(4);
2064 rvu_write64(rvu, blkaddr,
2065 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2069 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2070 u16 pcifunc, int index)
2072 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2073 struct nix_txvlan *vlan = &nix_hw->txvlan;
2075 if (vlan->entry2pfvf_map[index] != pcifunc)
2076 return NIX_AF_ERR_PARAM;
2078 rvu_write64(rvu, blkaddr,
2079 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2080 rvu_write64(rvu, blkaddr,
2081 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2083 vlan->entry2pfvf_map[index] = 0;
2084 rvu_free_rsrc(&vlan->rsrc, index);
2089 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2091 struct nix_txvlan *vlan;
2092 struct nix_hw *nix_hw;
2095 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2099 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2100 vlan = &nix_hw->txvlan;
2102 mutex_lock(&vlan->rsrc_lock);
2103 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2104 for (index = 0; index < vlan->rsrc.max; index++) {
2105 if (vlan->entry2pfvf_map[index] == pcifunc)
2106 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2108 mutex_unlock(&vlan->rsrc_lock);
2111 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2114 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2115 struct nix_txvlan *vlan = &nix_hw->txvlan;
2119 mutex_lock(&vlan->rsrc_lock);
2121 index = rvu_alloc_rsrc(&vlan->rsrc);
2123 mutex_unlock(&vlan->rsrc_lock);
2127 mutex_unlock(&vlan->rsrc_lock);
2129 regval = size ? vtag : vtag << 32;
2131 rvu_write64(rvu, blkaddr,
2132 NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2133 rvu_write64(rvu, blkaddr,
2134 NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2139 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2140 struct nix_vtag_config *req)
2142 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2143 struct nix_txvlan *vlan = &nix_hw->txvlan;
2144 u16 pcifunc = req->hdr.pcifunc;
2145 int idx0 = req->tx.vtag0_idx;
2146 int idx1 = req->tx.vtag1_idx;
2149 if (req->tx.free_vtag0 && req->tx.free_vtag1)
2150 if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2151 vlan->entry2pfvf_map[idx1] != pcifunc)
2152 return NIX_AF_ERR_PARAM;
2154 mutex_lock(&vlan->rsrc_lock);
2156 if (req->tx.free_vtag0) {
2157 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2162 if (req->tx.free_vtag1)
2163 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2166 mutex_unlock(&vlan->rsrc_lock);
2170 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2171 struct nix_vtag_config *req,
2172 struct nix_vtag_config_rsp *rsp)
2174 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2175 struct nix_txvlan *vlan = &nix_hw->txvlan;
2176 u16 pcifunc = req->hdr.pcifunc;
2178 if (req->tx.cfg_vtag0) {
2180 nix_tx_vtag_alloc(rvu, blkaddr,
2181 req->tx.vtag0, req->vtag_size);
2183 if (rsp->vtag0_idx < 0)
2184 return NIX_AF_ERR_TX_VTAG_NOSPC;
2186 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2189 if (req->tx.cfg_vtag1) {
2191 nix_tx_vtag_alloc(rvu, blkaddr,
2192 req->tx.vtag1, req->vtag_size);
2194 if (rsp->vtag1_idx < 0)
2197 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2203 if (req->tx.cfg_vtag0)
2204 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2206 return NIX_AF_ERR_TX_VTAG_NOSPC;
2209 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2210 struct nix_vtag_config *req,
2211 struct nix_vtag_config_rsp *rsp)
2213 u16 pcifunc = req->hdr.pcifunc;
2214 int blkaddr, nixlf, err;
2216 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2220 if (req->cfg_type) {
2221 /* rx vtag configuration */
2222 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2224 return NIX_AF_ERR_PARAM;
2226 /* tx vtag configuration */
2227 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2228 (req->tx.free_vtag0 || req->tx.free_vtag1))
2229 return NIX_AF_ERR_PARAM;
2231 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2232 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2234 if (req->tx.free_vtag0 || req->tx.free_vtag1)
2235 return nix_tx_vtag_decfg(rvu, blkaddr, req);
2241 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2242 int mce, u8 op, u16 pcifunc, int next, bool eol)
2244 struct nix_aq_enq_req aq_req;
2247 aq_req.hdr.pcifunc = 0;
2248 aq_req.ctype = NIX_AQ_CTYPE_MCE;
2252 /* Use RSS with RSS index 0 */
2254 aq_req.mce.index = 0;
2255 aq_req.mce.eol = eol;
2256 aq_req.mce.pf_func = pcifunc;
2257 aq_req.mce.next = next;
2259 /* All fields valid */
2260 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
2262 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2264 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2265 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2271 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2272 u16 pcifunc, bool add)
2274 struct mce *mce, *tail = NULL;
2275 bool delete = false;
2277 /* Scan through the current list */
2278 hlist_for_each_entry(mce, &mce_list->head, node) {
2279 /* If already exists, then delete */
2280 if (mce->pcifunc == pcifunc && !add) {
2283 } else if (mce->pcifunc == pcifunc && add) {
2284 /* entry already exists */
2291 hlist_del(&mce->node);
2300 /* Add a new one to the list, at the tail */
2301 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2304 mce->pcifunc = pcifunc;
2306 hlist_add_head(&mce->node, &mce_list->head);
2308 hlist_add_behind(&mce->node, &tail->node);
2313 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
2314 struct nix_mce_list *mce_list,
2315 int mce_idx, int mcam_index, bool add)
2317 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
2318 struct npc_mcam *mcam = &rvu->hw->mcam;
2319 struct nix_mcast *mcast;
2320 struct nix_hw *nix_hw;
2326 /* Get this PF/VF func's MCE index */
2327 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2329 if (idx > (mce_idx + mce_list->max)) {
2331 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2332 __func__, idx, mce_list->max,
2333 pcifunc >> RVU_PFVF_PF_SHIFT);
2337 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
2341 mcast = &nix_hw->mcast;
2342 mutex_lock(&mcast->mce_lock);
2344 err = nix_update_mce_list_entry(mce_list, pcifunc, add);
2348 /* Disable MCAM entry in NPC */
2349 if (!mce_list->count) {
2350 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2351 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
2355 /* Dump the updated list to HW */
2357 last_idx = idx + mce_list->count - 1;
2358 hlist_for_each_entry(mce, &mce_list->head, node) {
2363 /* EOL should be set in last MCE */
2364 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2365 mce->pcifunc, next_idx,
2366 (next_idx > last_idx) ? true : false);
2373 mutex_unlock(&mcast->mce_lock);
2377 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
2378 struct nix_mce_list **mce_list, int *mce_idx)
2380 struct rvu_hwinfo *hw = rvu->hw;
2381 struct rvu_pfvf *pfvf;
2383 if (!hw->cap.nix_rx_multicast ||
2384 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
2390 /* Get this PF/VF func's MCE index */
2391 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2393 if (type == NIXLF_BCAST_ENTRY) {
2394 *mce_list = &pfvf->bcast_mce_list;
2395 *mce_idx = pfvf->bcast_mce_idx;
2396 } else if (type == NIXLF_ALLMULTI_ENTRY) {
2397 *mce_list = &pfvf->mcast_mce_list;
2398 *mce_idx = pfvf->mcast_mce_idx;
2399 } else if (type == NIXLF_PROMISC_ENTRY) {
2400 *mce_list = &pfvf->promisc_mce_list;
2401 *mce_idx = pfvf->promisc_mce_idx;
2408 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
2411 int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
2412 struct npc_mcam *mcam = &rvu->hw->mcam;
2413 struct rvu_hwinfo *hw = rvu->hw;
2414 struct nix_mce_list *mce_list;
2416 /* skip multicast pkt replication for AF's VFs */
2417 if (is_afvf(pcifunc))
2420 if (!hw->cap.nix_rx_multicast)
2423 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2427 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2431 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
2433 mcam_index = npc_get_nixlf_mcam_index(mcam,
2434 pcifunc & ~RVU_PFVF_FUNC_MASK,
2436 err = nix_update_mce_list(rvu, pcifunc, mce_list,
2437 mce_idx, mcam_index, add);
2441 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2443 struct nix_mcast *mcast = &nix_hw->mcast;
2444 int err, pf, numvfs, idx;
2445 struct rvu_pfvf *pfvf;
2449 /* Skip PF0 (i.e AF) */
2450 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2451 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2452 /* If PF is not enabled, nothing to do */
2453 if (!((cfg >> 20) & 0x01))
2455 /* Get numVFs attached to this PF */
2456 numvfs = (cfg >> 12) & 0xFF;
2458 pfvf = &rvu->pf[pf];
2460 /* This NIX0/1 block mapped to PF ? */
2461 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2464 /* save start idx of broadcast mce list */
2465 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2466 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2468 /* save start idx of multicast mce list */
2469 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2470 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
2472 /* save the start idx of promisc mce list */
2473 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2474 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
2476 for (idx = 0; idx < (numvfs + 1); idx++) {
2477 /* idx-0 is for PF, followed by VFs */
2478 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2480 /* Add dummy entries now, so that we don't have to check
2481 * for whether AQ_OP should be INIT/WRITE later on.
2482 * Will be updated when a NIXLF is attached/detached to
2485 err = nix_blk_setup_mce(rvu, nix_hw,
2486 pfvf->bcast_mce_idx + idx,
2492 /* add dummy entries to multicast mce list */
2493 err = nix_blk_setup_mce(rvu, nix_hw,
2494 pfvf->mcast_mce_idx + idx,
2500 /* add dummy entries to promisc mce list */
2501 err = nix_blk_setup_mce(rvu, nix_hw,
2502 pfvf->promisc_mce_idx + idx,
2512 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2514 struct nix_mcast *mcast = &nix_hw->mcast;
2515 struct rvu_hwinfo *hw = rvu->hw;
2518 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2519 size = (1ULL << size);
2521 /* Alloc memory for multicast/mirror replication entries */
2522 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2523 (256UL << MC_TBL_SIZE), size);
2527 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2528 (u64)mcast->mce_ctx->iova);
2530 /* Set max list length equal to max no of VFs per PF + PF itself */
2531 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2532 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2534 /* Alloc memory for multicast replication buffers */
2535 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2536 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2537 (8UL << MC_BUF_CNT), size);
2541 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2542 (u64)mcast->mcast_buf->iova);
2544 /* Alloc pkind for NIX internal RX multicast/mirror replay */
2545 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2547 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2548 BIT_ULL(63) | (mcast->replay_pkind << 24) |
2549 BIT_ULL(20) | MC_BUF_CNT);
2551 mutex_init(&mcast->mce_lock);
2553 return nix_setup_mce_tables(rvu, nix_hw);
2556 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2558 struct nix_txvlan *vlan = &nix_hw->txvlan;
2561 /* Allocate resource bimap for tx vtag def registers*/
2562 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2563 err = rvu_alloc_bitmap(&vlan->rsrc);
2567 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2568 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
2569 sizeof(u16), GFP_KERNEL);
2570 if (!vlan->entry2pfvf_map)
2573 mutex_init(&vlan->rsrc_lock);
2577 kfree(vlan->rsrc.bmap);
2581 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2583 struct nix_txsch *txsch;
2587 /* Get scheduler queue count of each type and alloc
2588 * bitmap for each for alloc/free/attach operations.
2590 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2591 txsch = &nix_hw->txsch[lvl];
2594 case NIX_TXSCH_LVL_SMQ:
2595 reg = NIX_AF_MDQ_CONST;
2597 case NIX_TXSCH_LVL_TL4:
2598 reg = NIX_AF_TL4_CONST;
2600 case NIX_TXSCH_LVL_TL3:
2601 reg = NIX_AF_TL3_CONST;
2603 case NIX_TXSCH_LVL_TL2:
2604 reg = NIX_AF_TL2_CONST;
2606 case NIX_TXSCH_LVL_TL1:
2607 reg = NIX_AF_TL1_CONST;
2610 cfg = rvu_read64(rvu, blkaddr, reg);
2611 txsch->schq.max = cfg & 0xFFFF;
2612 err = rvu_alloc_bitmap(&txsch->schq);
2616 /* Allocate memory for scheduler queues to
2617 * PF/VF pcifunc mapping info.
2619 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2620 sizeof(u32), GFP_KERNEL);
2621 if (!txsch->pfvf_map)
2623 for (schq = 0; schq < txsch->schq.max; schq++)
2624 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2629 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2630 int blkaddr, u32 cfg)
2634 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2635 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2638 if (fmt_idx >= nix_hw->mark_format.total)
2641 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2642 nix_hw->mark_format.cfg[fmt_idx] = cfg;
2643 nix_hw->mark_format.in_use++;
2647 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2651 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
2652 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
2653 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
2654 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
2655 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
2656 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
2657 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
2658 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
2659 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2664 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2665 nix_hw->mark_format.total = (u8)total;
2666 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2668 if (!nix_hw->mark_format.cfg)
2670 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2671 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2673 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2680 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2682 /* CN10K supports LBK FIFO size 72 KB */
2683 if (rvu->hw->lbk_bufsize == 0x12000)
2684 *max_mtu = CN10K_LBK_LINK_MAX_FRS;
2686 *max_mtu = NIC_HW_MAX_FRS;
2689 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2691 /* RPM supports FIFO len 128 KB */
2692 if (rvu_cgx_get_fifolen(rvu) == 0x20000)
2693 *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
2695 *max_mtu = NIC_HW_MAX_FRS;
2698 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
2699 struct nix_hw_info *rsp)
2701 u16 pcifunc = req->hdr.pcifunc;
2704 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2706 return NIX_AF_ERR_AF_LF_INVALID;
2708 if (is_afvf(pcifunc))
2709 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
2711 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
2713 rsp->min_mtu = NIC_HW_MIN_FRS;
2717 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2718 struct msg_rsp *rsp)
2720 u16 pcifunc = req->hdr.pcifunc;
2721 int i, nixlf, blkaddr, err;
2724 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2728 /* Get stats count supported by HW */
2729 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2731 /* Reset tx stats */
2732 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2733 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2735 /* Reset rx stats */
2736 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2737 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2742 /* Returns the ALG index to be set into NPC_RX_ACTION */
2743 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2747 /* Scan over exiting algo entries to find a match */
2748 for (i = 0; i < nix_hw->flowkey.in_use; i++)
2749 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2755 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2757 int idx, nr_field, key_off, field_marker, keyoff_marker;
2758 int max_key_off, max_bit_pos, group_member;
2759 struct nix_rx_flowkey_alg *field;
2760 struct nix_rx_flowkey_alg tmp;
2761 u32 key_type, valid_key;
2762 int l4_key_offset = 0;
2767 #define FIELDS_PER_ALG 5
2768 #define MAX_KEY_OFF 40
2769 /* Clear all fields */
2770 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2772 /* Each of the 32 possible flow key algorithm definitions should
2773 * fall into above incremental config (except ALG0). Otherwise a
2774 * single NPC MCAM entry is not sufficient for supporting RSS.
2776 * If a different definition or combination needed then NPC MCAM
2777 * has to be programmed to filter such pkts and it's action should
2778 * point to this definition to calculate flowtag or hash.
2780 * The `for loop` goes over _all_ protocol field and the following
2781 * variables depicts the state machine forward progress logic.
2783 * keyoff_marker - Enabled when hash byte length needs to be accounted
2784 * in field->key_offset update.
2785 * field_marker - Enabled when a new field needs to be selected.
2786 * group_member - Enabled when protocol is part of a group.
2789 keyoff_marker = 0; max_key_off = 0; group_member = 0;
2790 nr_field = 0; key_off = 0; field_marker = 1;
2791 field = &tmp; max_bit_pos = fls(flow_cfg);
2793 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2794 key_off < MAX_KEY_OFF; idx++) {
2795 key_type = BIT(idx);
2796 valid_key = flow_cfg & key_type;
2797 /* Found a field marker, reset the field values */
2799 memset(&tmp, 0, sizeof(tmp));
2801 field_marker = true;
2802 keyoff_marker = true;
2804 case NIX_FLOW_KEY_TYPE_PORT:
2805 field->sel_chan = true;
2806 /* This should be set to 1, when SEL_CHAN is set */
2809 case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
2810 field->lid = NPC_LID_LC;
2811 field->hdr_offset = 9; /* offset */
2812 field->bytesm1 = 0; /* 1 byte */
2813 field->ltype_match = NPC_LT_LC_IP;
2814 field->ltype_mask = 0xF;
2816 case NIX_FLOW_KEY_TYPE_IPV4:
2817 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2818 field->lid = NPC_LID_LC;
2819 field->ltype_match = NPC_LT_LC_IP;
2820 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2821 field->lid = NPC_LID_LG;
2822 field->ltype_match = NPC_LT_LG_TU_IP;
2824 field->hdr_offset = 12; /* SIP offset */
2825 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2826 field->ltype_mask = 0xF; /* Match only IPv4 */
2827 keyoff_marker = false;
2829 case NIX_FLOW_KEY_TYPE_IPV6:
2830 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2831 field->lid = NPC_LID_LC;
2832 field->ltype_match = NPC_LT_LC_IP6;
2833 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2834 field->lid = NPC_LID_LG;
2835 field->ltype_match = NPC_LT_LG_TU_IP6;
2837 field->hdr_offset = 8; /* SIP offset */
2838 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2839 field->ltype_mask = 0xF; /* Match only IPv6 */
2841 case NIX_FLOW_KEY_TYPE_TCP:
2842 case NIX_FLOW_KEY_TYPE_UDP:
2843 case NIX_FLOW_KEY_TYPE_SCTP:
2844 case NIX_FLOW_KEY_TYPE_INNR_TCP:
2845 case NIX_FLOW_KEY_TYPE_INNR_UDP:
2846 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2847 field->lid = NPC_LID_LD;
2848 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2849 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2850 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2851 field->lid = NPC_LID_LH;
2852 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2854 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2855 * so no need to change the ltype_match, just change
2856 * the lid for inner protocols
2858 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2859 (int)NPC_LT_LH_TU_TCP);
2860 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2861 (int)NPC_LT_LH_TU_UDP);
2862 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2863 (int)NPC_LT_LH_TU_SCTP);
2865 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2866 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2868 field->ltype_match |= NPC_LT_LD_TCP;
2869 group_member = true;
2870 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2871 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2873 field->ltype_match |= NPC_LT_LD_UDP;
2874 group_member = true;
2875 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2876 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2878 field->ltype_match |= NPC_LT_LD_SCTP;
2879 group_member = true;
2881 field->ltype_mask = ~field->ltype_match;
2882 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2883 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2884 /* Handle the case where any of the group item
2885 * is enabled in the group but not the final one
2889 group_member = false;
2892 field_marker = false;
2893 keyoff_marker = false;
2896 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
2897 * remember the TCP key offset of 40 byte hash key.
2899 if (key_type == NIX_FLOW_KEY_TYPE_TCP)
2900 l4_key_offset = key_off;
2902 case NIX_FLOW_KEY_TYPE_NVGRE:
2903 field->lid = NPC_LID_LD;
2904 field->hdr_offset = 4; /* VSID offset */
2906 field->ltype_match = NPC_LT_LD_NVGRE;
2907 field->ltype_mask = 0xF;
2909 case NIX_FLOW_KEY_TYPE_VXLAN:
2910 case NIX_FLOW_KEY_TYPE_GENEVE:
2911 field->lid = NPC_LID_LE;
2913 field->hdr_offset = 4;
2914 field->ltype_mask = 0xF;
2915 field_marker = false;
2916 keyoff_marker = false;
2918 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2919 field->ltype_match |= NPC_LT_LE_VXLAN;
2920 group_member = true;
2923 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2924 field->ltype_match |= NPC_LT_LE_GENEVE;
2925 group_member = true;
2928 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2930 field->ltype_mask = ~field->ltype_match;
2931 field_marker = true;
2932 keyoff_marker = true;
2934 group_member = false;
2938 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2939 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2940 field->lid = NPC_LID_LA;
2941 field->ltype_match = NPC_LT_LA_ETHER;
2942 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2943 field->lid = NPC_LID_LF;
2944 field->ltype_match = NPC_LT_LF_TU_ETHER;
2946 field->hdr_offset = 0;
2947 field->bytesm1 = 5; /* DMAC 6 Byte */
2948 field->ltype_mask = 0xF;
2950 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2951 field->lid = NPC_LID_LC;
2952 field->hdr_offset = 40; /* IPV6 hdr */
2953 field->bytesm1 = 0; /* 1 Byte ext hdr*/
2954 field->ltype_match = NPC_LT_LC_IP6_EXT;
2955 field->ltype_mask = 0xF;
2957 case NIX_FLOW_KEY_TYPE_GTPU:
2958 field->lid = NPC_LID_LE;
2959 field->hdr_offset = 4;
2960 field->bytesm1 = 3; /* 4 bytes TID*/
2961 field->ltype_match = NPC_LT_LE_GTPU;
2962 field->ltype_mask = 0xF;
2964 case NIX_FLOW_KEY_TYPE_VLAN:
2965 field->lid = NPC_LID_LB;
2966 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2967 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
2968 field->ltype_match = NPC_LT_LB_CTAG;
2969 field->ltype_mask = 0xF;
2970 field->fn_mask = 1; /* Mask out the first nibble */
2972 case NIX_FLOW_KEY_TYPE_AH:
2973 case NIX_FLOW_KEY_TYPE_ESP:
2974 field->hdr_offset = 0;
2975 field->bytesm1 = 7; /* SPI + sequence number */
2976 field->ltype_mask = 0xF;
2977 field->lid = NPC_LID_LE;
2978 field->ltype_match = NPC_LT_LE_ESP;
2979 if (key_type == NIX_FLOW_KEY_TYPE_AH) {
2980 field->lid = NPC_LID_LD;
2981 field->ltype_match = NPC_LT_LD_AH;
2982 field->hdr_offset = 4;
2983 keyoff_marker = false;
2989 /* Found a valid flow key type */
2991 /* Use the key offset of TCP/UDP/SCTP fields
2992 * for ESP/AH fields.
2994 if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
2995 key_type == NIX_FLOW_KEY_TYPE_AH)
2996 key_off = l4_key_offset;
2997 field->key_offset = key_off;
2998 memcpy(&alg[nr_field], field, sizeof(*field));
2999 max_key_off = max(max_key_off, field->bytesm1 + 1);
3001 /* Found a field marker, get the next field */
3006 /* Found a keyoff marker, update the new key_off */
3007 if (keyoff_marker) {
3008 key_off += max_key_off;
3012 /* Processed all the flow key types */
3013 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3016 return NIX_AF_ERR_RSS_NOSPC_FIELD;
3019 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3021 u64 field[FIELDS_PER_ALG];
3025 hw = get_nix_hw(rvu->hw, blkaddr);
3029 /* No room to add new flow hash algoritham */
3030 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3031 return NIX_AF_ERR_RSS_NOSPC_ALGO;
3033 /* Generate algo fields for the given flow_cfg */
3034 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3038 /* Update ALGX_FIELDX register with generated fields */
3039 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3040 rvu_write64(rvu, blkaddr,
3041 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3044 /* Store the flow_cfg for futher lookup */
3045 rc = hw->flowkey.in_use;
3046 hw->flowkey.flowkey[rc] = flow_cfg;
3047 hw->flowkey.in_use++;
3052 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3053 struct nix_rss_flowkey_cfg *req,
3054 struct nix_rss_flowkey_cfg_rsp *rsp)
3056 u16 pcifunc = req->hdr.pcifunc;
3057 int alg_idx, nixlf, blkaddr;
3058 struct nix_hw *nix_hw;
3061 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3065 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3069 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3070 /* Failed to get algo index from the exiting list, reserve new */
3072 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3077 rsp->alg_idx = alg_idx;
3078 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3079 alg_idx, req->mcam_index);
3083 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3085 u32 flowkey_cfg, minkey_cfg;
3088 /* Disable all flow key algx fieldx */
3089 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3090 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3091 rvu_write64(rvu, blkaddr,
3092 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3096 /* IPv4/IPv6 SIP/DIPs */
3097 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3098 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3102 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3103 minkey_cfg = flowkey_cfg;
3104 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3105 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3109 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3110 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3111 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3115 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3116 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3117 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3121 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3122 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3123 NIX_FLOW_KEY_TYPE_UDP;
3124 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3128 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3129 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3130 NIX_FLOW_KEY_TYPE_SCTP;
3131 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3135 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3136 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3137 NIX_FLOW_KEY_TYPE_SCTP;
3138 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3142 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3143 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3144 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3145 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3152 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3153 struct nix_set_mac_addr *req,
3154 struct msg_rsp *rsp)
3156 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3157 u16 pcifunc = req->hdr.pcifunc;
3158 int blkaddr, nixlf, err;
3159 struct rvu_pfvf *pfvf;
3161 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3165 pfvf = rvu_get_pfvf(rvu, pcifunc);
3167 /* untrusted VF can't overwrite admin(PF) changes */
3168 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3169 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3171 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3175 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3177 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3178 pfvf->rx_chan_base, req->mac_addr);
3180 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3181 ether_addr_copy(pfvf->default_mac, req->mac_addr);
3186 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3187 struct msg_req *req,
3188 struct nix_get_mac_addr_rsp *rsp)
3190 u16 pcifunc = req->hdr.pcifunc;
3191 struct rvu_pfvf *pfvf;
3193 if (!is_nixlf_attached(rvu, pcifunc))
3194 return NIX_AF_ERR_AF_LF_INVALID;
3196 pfvf = rvu_get_pfvf(rvu, pcifunc);
3198 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3203 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3204 struct msg_rsp *rsp)
3206 bool allmulti, promisc, nix_rx_multicast;
3207 u16 pcifunc = req->hdr.pcifunc;
3208 struct rvu_pfvf *pfvf;
3211 pfvf = rvu_get_pfvf(rvu, pcifunc);
3212 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
3213 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
3214 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
3216 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
3218 if (is_vf(pcifunc) && !nix_rx_multicast &&
3219 (promisc || allmulti)) {
3220 dev_warn_ratelimited(rvu->dev,
3221 "VF promisc/multicast not supported\n");
3225 /* untrusted VF can't configure promisc/allmulti */
3226 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3227 (promisc || allmulti))
3230 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3234 if (nix_rx_multicast) {
3235 /* add/del this PF_FUNC to/from mcast pkt replication list */
3236 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
3240 "Failed to update pcifunc 0x%x to multicast list\n",
3245 /* add/del this PF_FUNC to/from promisc pkt replication list */
3246 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
3250 "Failed to update pcifunc 0x%x to promisc list\n",
3256 /* install/uninstall allmulti entry */
3258 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
3259 pfvf->rx_chan_base);
3261 if (!nix_rx_multicast)
3262 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
3265 /* install/uninstall promisc entry */
3267 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3271 if (!nix_rx_multicast)
3272 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
3278 static void nix_find_link_frs(struct rvu *rvu,
3279 struct nix_frs_cfg *req, u16 pcifunc)
3281 int pf = rvu_get_pf(pcifunc);
3282 struct rvu_pfvf *pfvf;
3287 /* Update with requester's min/max lengths */
3288 pfvf = rvu_get_pfvf(rvu, pcifunc);
3289 pfvf->maxlen = req->maxlen;
3290 if (req->update_minlen)
3291 pfvf->minlen = req->minlen;
3293 maxlen = req->maxlen;
3294 minlen = req->update_minlen ? req->minlen : 0;
3296 /* Get this PF's numVFs and starting hwvf */
3297 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3299 /* For each VF, compare requested max/minlen */
3300 for (vf = 0; vf < numvfs; vf++) {
3301 pfvf = &rvu->hwvf[hwvf + vf];
3302 if (pfvf->maxlen > maxlen)
3303 maxlen = pfvf->maxlen;
3304 if (req->update_minlen &&
3305 pfvf->minlen && pfvf->minlen < minlen)
3306 minlen = pfvf->minlen;
3309 /* Compare requested max/minlen with PF's max/minlen */
3310 pfvf = &rvu->pf[pf];
3311 if (pfvf->maxlen > maxlen)
3312 maxlen = pfvf->maxlen;
3313 if (req->update_minlen &&
3314 pfvf->minlen && pfvf->minlen < minlen)
3315 minlen = pfvf->minlen;
3317 /* Update the request with max/min PF's and it's VF's max/min */
3318 req->maxlen = maxlen;
3319 if (req->update_minlen)
3320 req->minlen = minlen;
3323 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3324 struct msg_rsp *rsp)
3326 struct rvu_hwinfo *hw = rvu->hw;
3327 u16 pcifunc = req->hdr.pcifunc;
3328 int pf = rvu_get_pf(pcifunc);
3329 int blkaddr, schq, link = -1;
3330 struct nix_txsch *txsch;
3331 u64 cfg, lmac_fifo_len;
3332 struct nix_hw *nix_hw;
3333 u8 cgx = 0, lmac = 0;
3336 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3338 return NIX_AF_ERR_AF_LF_INVALID;
3340 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3344 if (is_afvf(pcifunc))
3345 rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3347 rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3349 if (!req->sdp_link && req->maxlen > max_mtu)
3350 return NIX_AF_ERR_FRS_INVALID;
3352 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3353 return NIX_AF_ERR_FRS_INVALID;
3355 /* Check if requester wants to update SMQ's */
3356 if (!req->update_smq)
3359 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
3360 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3361 mutex_lock(&rvu->rsrc_lock);
3362 for (schq = 0; schq < txsch->schq.max; schq++) {
3363 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3365 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3366 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3367 if (req->update_minlen)
3368 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3369 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3371 mutex_unlock(&rvu->rsrc_lock);
3374 /* Check if config is for SDP link */
3375 if (req->sdp_link) {
3377 return NIX_AF_ERR_RX_LINK_INVALID;
3378 link = hw->cgx_links + hw->lbk_links;
3382 /* Check if the request is from CGX mapped RVU PF */
3383 if (is_pf_cgxmapped(rvu, pf)) {
3384 /* Get CGX and LMAC to which this PF is mapped and find link */
3385 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3386 link = (cgx * hw->lmac_per_cgx) + lmac;
3387 } else if (pf == 0) {
3388 /* For VFs of PF0 ingress is LBK port, so config LBK link */
3389 link = hw->cgx_links;
3393 return NIX_AF_ERR_RX_LINK_INVALID;
3395 nix_find_link_frs(rvu, req, pcifunc);
3398 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3399 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3400 if (req->update_minlen)
3401 cfg = (cfg & ~0xFFFFULL) | req->minlen;
3402 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3404 if (req->sdp_link || pf == 0)
3407 /* Update transmit credits for CGX links */
3409 rvu_cgx_get_fifolen(rvu) /
3410 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3411 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
3412 cfg &= ~(0xFFFFFULL << 12);
3413 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
3414 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3418 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3419 struct msg_rsp *rsp)
3421 int nixlf, blkaddr, err;
3424 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3428 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3429 /* Set the interface configuration */
3430 if (req->len_verify & BIT(0))
3433 cfg &= ~BIT_ULL(41);
3435 if (req->len_verify & BIT(1))
3438 cfg &= ~BIT_ULL(40);
3440 if (req->csum_verify & BIT(0))
3443 cfg &= ~BIT_ULL(37);
3445 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3450 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3452 /* CN10k supports 72KB FIFO size and max packet size of 64k */
3453 if (rvu->hw->lbk_bufsize == 0x12000)
3454 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3456 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3459 static void nix_link_config(struct rvu *rvu, int blkaddr)
3461 struct rvu_hwinfo *hw = rvu->hw;
3462 int cgx, lmac_cnt, slink, link;
3463 u16 lbk_max_frs, lmac_max_frs;
3466 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3467 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3469 /* Set default min/max packet lengths allowed on NIX Rx links.
3471 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3472 * as undersize and report them to SW as error pkts, hence
3473 * setting it to 40 bytes.
3475 for (link = 0; link < hw->cgx_links; link++) {
3476 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3477 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
3480 for (link = hw->cgx_links; link < hw->lbk_links; link++) {
3481 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3482 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
3484 if (hw->sdp_links) {
3485 link = hw->cgx_links + hw->lbk_links;
3486 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3487 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3490 /* Set credits for Tx links assuming max packet length allowed.
3491 * This will be reconfigured based on MTU set for PF/VF.
3493 for (cgx = 0; cgx < hw->cgx; cgx++) {
3494 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3495 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
3497 /* Enable credits and set credit pkt count to max allowed */
3498 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3499 slink = cgx * hw->lmac_per_cgx;
3500 for (link = slink; link < (slink + lmac_cnt); link++) {
3501 rvu_write64(rvu, blkaddr,
3502 NIX_AF_TX_LINKX_NORM_CREDIT(link),
3507 /* Set Tx credits for LBK link */
3508 slink = hw->cgx_links;
3509 for (link = slink; link < (slink + hw->lbk_links); link++) {
3510 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
3511 /* Enable credits and set credit pkt count to max allowed */
3512 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3513 rvu_write64(rvu, blkaddr,
3514 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3518 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3523 /* Start X2P bus calibration */
3524 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3525 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3526 /* Wait for calibration to complete */
3527 err = rvu_poll_reg(rvu, blkaddr,
3528 NIX_AF_STATUS, BIT_ULL(10), false);
3530 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3534 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3535 /* Check if CGX devices are ready */
3536 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3537 /* Skip when cgx port is not available */
3538 if (!rvu_cgx_pdata(idx, rvu) ||
3539 (status & (BIT_ULL(16 + idx))))
3542 "CGX%d didn't respond to NIX X2P calibration\n", idx);
3546 /* Check if LBK is ready */
3547 if (!(status & BIT_ULL(19))) {
3549 "LBK didn't respond to NIX X2P calibration\n");
3553 /* Clear 'calibrate_x2p' bit */
3554 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3555 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3556 if (err || (status & 0x3FFULL))
3558 "NIX X2P calibration failed, status 0x%llx\n", status);
3564 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3569 /* Set admin queue endianness */
3570 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3573 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3576 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3579 /* Do not bypass NDC cache */
3580 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3582 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3583 /* Disable caching of SQB aka SQEs */
3586 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3588 /* Result structure can be followed by RQ/SQ/CQ context at
3589 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3590 * operation type. Alloc sufficient result memory for all operations.
3592 err = rvu_aq_alloc(rvu, &block->aq,
3593 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3594 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3598 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3599 rvu_write64(rvu, block->addr,
3600 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3604 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3606 const struct npc_lt_def_cfg *ltdefs;
3607 struct rvu_hwinfo *hw = rvu->hw;
3608 int blkaddr = nix_hw->blkaddr;
3609 struct rvu_block *block;
3613 block = &hw->block[blkaddr];
3615 if (is_rvu_96xx_B0(rvu)) {
3616 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3617 * internal state when conditional clocks are turned off.
3618 * Hence enable them.
3620 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3621 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3623 /* Set chan/link to backpressure TL3 instead of TL2 */
3624 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3626 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
3627 * This sticky mode is known to cause SQ stalls when multiple
3628 * SQs are mapped to same SMQ and transmitting pkts at a time.
3630 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3631 cfg &= ~BIT_ULL(15);
3632 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3635 ltdefs = rvu->kpu.lt_def;
3636 /* Calibrate X2P bus to check if CGX/LBK links are fine */
3637 err = nix_calibrate_x2p(rvu, blkaddr);
3641 /* Initialize admin queue */
3642 err = nix_aq_init(rvu, block);
3646 /* Restore CINT timer delay to HW reset values */
3647 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3649 if (is_block_implemented(hw, blkaddr)) {
3650 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3654 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
3658 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3662 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3666 err = nix_setup_txvlan(rvu, nix_hw);
3670 /* Configure segmentation offload formats */
3671 nix_setup_lso(rvu, nix_hw, blkaddr);
3673 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3674 * This helps HW protocol checker to identify headers
3675 * and validate length and checksums.
3677 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3678 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3679 ltdefs->rx_ol2.ltype_mask);
3680 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3681 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3682 ltdefs->rx_oip4.ltype_mask);
3683 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3684 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3685 ltdefs->rx_iip4.ltype_mask);
3686 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3687 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3688 ltdefs->rx_oip6.ltype_mask);
3689 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3690 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3691 ltdefs->rx_iip6.ltype_mask);
3692 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3693 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3694 ltdefs->rx_otcp.ltype_mask);
3695 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3696 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3697 ltdefs->rx_itcp.ltype_mask);
3698 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3699 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3700 ltdefs->rx_oudp.ltype_mask);
3701 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3702 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3703 ltdefs->rx_iudp.ltype_mask);
3704 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3705 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3706 ltdefs->rx_osctp.ltype_mask);
3707 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3708 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3709 ltdefs->rx_isctp.ltype_mask);
3711 if (!is_rvu_otx2(rvu)) {
3712 /* Enable APAD calculation for other protocols
3713 * matching APAD0 and APAD1 lt def registers.
3715 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
3716 (ltdefs->rx_apad0.valid << 11) |
3717 (ltdefs->rx_apad0.lid << 8) |
3718 (ltdefs->rx_apad0.ltype_match << 4) |
3719 ltdefs->rx_apad0.ltype_mask);
3720 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
3721 (ltdefs->rx_apad1.valid << 11) |
3722 (ltdefs->rx_apad1.lid << 8) |
3723 (ltdefs->rx_apad1.ltype_match << 4) |
3724 ltdefs->rx_apad1.ltype_mask);
3726 /* Receive ethertype defination register defines layer
3727 * information in NPC_RESULT_S to identify the Ethertype
3728 * location in L2 header. Used for Ethertype overwriting
3729 * in inline IPsec flow.
3731 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
3732 (ltdefs->rx_et[0].offset << 12) |
3733 (ltdefs->rx_et[0].valid << 11) |
3734 (ltdefs->rx_et[0].lid << 8) |
3735 (ltdefs->rx_et[0].ltype_match << 4) |
3736 ltdefs->rx_et[0].ltype_mask);
3737 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
3738 (ltdefs->rx_et[1].offset << 12) |
3739 (ltdefs->rx_et[1].valid << 11) |
3740 (ltdefs->rx_et[1].lid << 8) |
3741 (ltdefs->rx_et[1].ltype_match << 4) |
3742 ltdefs->rx_et[1].ltype_mask);
3745 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3749 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3750 nix_link_config(rvu, blkaddr);
3752 /* Enable Channel backpressure */
3753 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3758 int rvu_nix_init(struct rvu *rvu)
3760 struct rvu_hwinfo *hw = rvu->hw;
3761 struct nix_hw *nix_hw;
3762 int blkaddr = 0, err;
3765 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3770 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3772 nix_hw = &hw->nix[i];
3774 nix_hw->blkaddr = blkaddr;
3775 err = rvu_nix_block_init(rvu, nix_hw);
3778 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3785 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3786 struct rvu_block *block)
3788 struct nix_txsch *txsch;
3789 struct nix_mcast *mcast;
3790 struct nix_txvlan *vlan;
3791 struct nix_hw *nix_hw;
3794 rvu_aq_free(rvu, block->aq);
3796 if (is_block_implemented(rvu->hw, blkaddr)) {
3797 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3801 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3802 txsch = &nix_hw->txsch[lvl];
3803 kfree(txsch->schq.bmap);
3806 nix_ipolicer_freemem(nix_hw);
3808 vlan = &nix_hw->txvlan;
3809 kfree(vlan->rsrc.bmap);
3810 mutex_destroy(&vlan->rsrc_lock);
3811 devm_kfree(rvu->dev, vlan->entry2pfvf_map);
3813 mcast = &nix_hw->mcast;
3814 qmem_free(rvu->dev, mcast->mce_ctx);
3815 qmem_free(rvu->dev, mcast->mcast_buf);
3816 mutex_destroy(&mcast->mce_lock);
3820 void rvu_nix_freemem(struct rvu *rvu)
3822 struct rvu_hwinfo *hw = rvu->hw;
3823 struct rvu_block *block;
3826 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3828 block = &hw->block[blkaddr];
3829 rvu_nix_block_freemem(rvu, blkaddr, block);
3830 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3834 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3835 struct msg_rsp *rsp)
3837 u16 pcifunc = req->hdr.pcifunc;
3838 struct rvu_pfvf *pfvf;
3841 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3845 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3847 npc_mcam_enable_flows(rvu, pcifunc);
3849 pfvf = rvu_get_pfvf(rvu, pcifunc);
3850 set_bit(NIXLF_INITIALIZED, &pfvf->flags);
3852 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3855 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3856 struct msg_rsp *rsp)
3858 u16 pcifunc = req->hdr.pcifunc;
3859 struct rvu_pfvf *pfvf;
3862 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3866 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3868 pfvf = rvu_get_pfvf(rvu, pcifunc);
3869 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3871 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3874 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3876 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3877 struct hwctx_disable_req ctx_req;
3880 ctx_req.hdr.pcifunc = pcifunc;
3882 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3883 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3884 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
3885 nix_interface_deinit(rvu, pcifunc, nixlf);
3886 nix_rx_sync(rvu, blkaddr);
3887 nix_txschq_free(rvu, pcifunc);
3889 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3891 rvu_cgx_start_stop_io(rvu, pcifunc, false);
3894 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3895 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3897 dev_err(rvu->dev, "SQ ctx disable failed\n");
3901 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3902 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3904 dev_err(rvu->dev, "RQ ctx disable failed\n");
3908 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3909 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3911 dev_err(rvu->dev, "CQ ctx disable failed\n");
3914 nix_ctx_free(rvu, pfvf);
3916 nix_free_all_bandprof(rvu, pcifunc);
3919 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
3921 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3923 struct rvu_hwinfo *hw = rvu->hw;
3924 struct rvu_block *block;
3929 pf = rvu_get_pf(pcifunc);
3930 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
3933 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3935 return NIX_AF_ERR_AF_LF_INVALID;
3937 block = &hw->block[blkaddr];
3938 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3940 return NIX_AF_ERR_AF_LF_INVALID;
3942 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3945 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3947 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3949 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3954 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3955 struct msg_rsp *rsp)
3957 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3960 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3961 struct msg_rsp *rsp)
3963 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3966 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3967 struct nix_lso_format_cfg *req,
3968 struct nix_lso_format_cfg_rsp *rsp)
3970 u16 pcifunc = req->hdr.pcifunc;
3971 struct nix_hw *nix_hw;
3972 struct rvu_pfvf *pfvf;
3973 int blkaddr, idx, f;
3976 pfvf = rvu_get_pfvf(rvu, pcifunc);
3977 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3978 if (!pfvf->nixlf || blkaddr < 0)
3979 return NIX_AF_ERR_AF_LF_INVALID;
3981 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3985 /* Find existing matching LSO format, if any */
3986 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3987 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3988 reg = rvu_read64(rvu, blkaddr,
3989 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3990 if (req->fields[f] != (reg & req->field_mask))
3994 if (f == NIX_LSO_FIELD_MAX)
3998 if (idx < nix_hw->lso.in_use) {
4000 rsp->lso_format_idx = idx;
4004 if (nix_hw->lso.in_use == nix_hw->lso.total)
4005 return NIX_AF_ERR_LSO_CFG_FAIL;
4007 rsp->lso_format_idx = nix_hw->lso.in_use++;
4009 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
4010 rvu_write64(rvu, blkaddr,
4011 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
4017 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
4019 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
4021 /* overwrite vf mac address with default_mac */
4023 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
4026 /* NIX ingress policers or bandwidth profiles APIs */
4027 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
4029 struct npc_lt_def_cfg defs, *ltdefs;
4032 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
4034 /* Extract PCP and DEI fields from outer VLAN from byte offset
4035 * 2 from the start of LB_PTR (ie TAG).
4036 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
4037 * fields are considered when 'Tunnel enable' is set in profile.
4039 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
4040 (2UL << 12) | (ltdefs->ovlan.lid << 8) |
4041 (ltdefs->ovlan.ltype_match << 4) |
4042 ltdefs->ovlan.ltype_mask);
4043 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
4044 (2UL << 12) | (ltdefs->ivlan.lid << 8) |
4045 (ltdefs->ivlan.ltype_match << 4) |
4046 ltdefs->ivlan.ltype_mask);
4048 /* DSCP field in outer and tunneled IPv4 packets */
4049 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
4050 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
4051 (ltdefs->rx_oip4.ltype_match << 4) |
4052 ltdefs->rx_oip4.ltype_mask);
4053 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
4054 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
4055 (ltdefs->rx_iip4.ltype_match << 4) |
4056 ltdefs->rx_iip4.ltype_mask);
4058 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
4059 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
4060 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
4061 (ltdefs->rx_oip6.ltype_match << 4) |
4062 ltdefs->rx_oip6.ltype_mask);
4063 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
4064 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
4065 (ltdefs->rx_iip6.ltype_match << 4) |
4066 ltdefs->rx_iip6.ltype_mask);
4069 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
4070 int layer, int prof_idx)
4072 struct nix_cn10k_aq_enq_req aq_req;
4075 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4077 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
4078 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4079 aq_req.op = NIX_AQ_INSTOP_INIT;
4081 /* Context is all zeros, submit to AQ */
4082 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4083 (struct nix_aq_enq_req *)&aq_req, NULL);
4085 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
4090 static int nix_setup_ipolicers(struct rvu *rvu,
4091 struct nix_hw *nix_hw, int blkaddr)
4093 struct rvu_hwinfo *hw = rvu->hw;
4094 struct nix_ipolicer *ipolicer;
4095 int err, layer, prof_idx;
4098 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
4099 if (!(cfg & BIT_ULL(61))) {
4100 hw->cap.ipolicer = false;
4104 hw->cap.ipolicer = true;
4105 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
4106 sizeof(*ipolicer), GFP_KERNEL);
4107 if (!nix_hw->ipolicer)
4110 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
4112 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4113 ipolicer = &nix_hw->ipolicer[layer];
4115 case BAND_PROF_LEAF_LAYER:
4116 ipolicer->band_prof.max = cfg & 0XFFFF;
4118 case BAND_PROF_MID_LAYER:
4119 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
4121 case BAND_PROF_TOP_LAYER:
4122 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
4126 if (!ipolicer->band_prof.max)
4129 err = rvu_alloc_bitmap(&ipolicer->band_prof);
4133 ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
4134 ipolicer->band_prof.max,
4135 sizeof(u16), GFP_KERNEL);
4136 if (!ipolicer->pfvf_map)
4139 ipolicer->match_id = devm_kcalloc(rvu->dev,
4140 ipolicer->band_prof.max,
4141 sizeof(u16), GFP_KERNEL);
4142 if (!ipolicer->match_id)
4146 prof_idx < ipolicer->band_prof.max; prof_idx++) {
4147 /* Set AF as current owner for INIT ops to succeed */
4148 ipolicer->pfvf_map[prof_idx] = 0x00;
4150 /* There is no enable bit in the profile context,
4151 * so no context disable. So let's INIT them here
4152 * so that PF/VF later on have to just do WRITE to
4153 * setup policer rates and config.
4155 err = nix_init_policer_context(rvu, nix_hw,
4161 /* Allocate memory for maintaining ref_counts for MID level
4162 * profiles, this will be needed for leaf layer profiles'
4165 if (layer != BAND_PROF_MID_LAYER)
4168 ipolicer->ref_count = devm_kcalloc(rvu->dev,
4169 ipolicer->band_prof.max,
4170 sizeof(u16), GFP_KERNEL);
4173 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
4174 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
4176 nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
4181 static void nix_ipolicer_freemem(struct nix_hw *nix_hw)
4183 struct nix_ipolicer *ipolicer;
4186 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4187 ipolicer = &nix_hw->ipolicer[layer];
4189 if (!ipolicer->band_prof.max)
4192 kfree(ipolicer->band_prof.bmap);
4196 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
4197 struct nix_hw *nix_hw, u16 pcifunc)
4199 struct nix_ipolicer *ipolicer;
4200 int layer, hi_layer, prof_idx;
4202 /* Bits [15:14] in profile index represent layer */
4203 layer = (req->qidx >> 14) & 0x03;
4204 prof_idx = req->qidx & 0x3FFF;
4206 ipolicer = &nix_hw->ipolicer[layer];
4207 if (prof_idx >= ipolicer->band_prof.max)
4210 /* Check if the profile is allocated to the requesting PCIFUNC or not
4211 * with the exception of AF. AF is allowed to read and update contexts.
4213 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
4216 /* If this profile is linked to higher layer profile then check
4217 * if that profile is also allocated to the requesting PCIFUNC
4220 if (!req->prof.hl_en)
4223 /* Leaf layer profile can link only to mid layer and
4224 * mid layer to top layer.
4226 if (layer == BAND_PROF_LEAF_LAYER)
4227 hi_layer = BAND_PROF_MID_LAYER;
4228 else if (layer == BAND_PROF_MID_LAYER)
4229 hi_layer = BAND_PROF_TOP_LAYER;
4233 ipolicer = &nix_hw->ipolicer[hi_layer];
4234 prof_idx = req->prof.band_prof_id;
4235 if (prof_idx >= ipolicer->band_prof.max ||
4236 ipolicer->pfvf_map[prof_idx] != pcifunc)
4242 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
4243 struct nix_bandprof_alloc_req *req,
4244 struct nix_bandprof_alloc_rsp *rsp)
4246 int blkaddr, layer, prof, idx, err;
4247 u16 pcifunc = req->hdr.pcifunc;
4248 struct nix_ipolicer *ipolicer;
4249 struct nix_hw *nix_hw;
4251 if (!rvu->hw->cap.ipolicer)
4252 return NIX_AF_ERR_IPOLICER_NOTSUPP;
4254 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4258 mutex_lock(&rvu->rsrc_lock);
4259 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4260 if (layer == BAND_PROF_INVAL_LAYER)
4262 if (!req->prof_count[layer])
4265 ipolicer = &nix_hw->ipolicer[layer];
4266 for (idx = 0; idx < req->prof_count[layer]; idx++) {
4267 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
4268 if (idx == MAX_BANDPROF_PER_PFFUNC)
4271 prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4274 rsp->prof_count[layer]++;
4275 rsp->prof_idx[layer][idx] = prof;
4276 ipolicer->pfvf_map[prof] = pcifunc;
4279 mutex_unlock(&rvu->rsrc_lock);
4283 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
4285 int blkaddr, layer, prof_idx, err;
4286 struct nix_ipolicer *ipolicer;
4287 struct nix_hw *nix_hw;
4289 if (!rvu->hw->cap.ipolicer)
4290 return NIX_AF_ERR_IPOLICER_NOTSUPP;
4292 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4296 mutex_lock(&rvu->rsrc_lock);
4297 /* Free all the profiles allocated to the PCIFUNC */
4298 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4299 if (layer == BAND_PROF_INVAL_LAYER)
4301 ipolicer = &nix_hw->ipolicer[layer];
4303 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
4304 if (ipolicer->pfvf_map[prof_idx] != pcifunc)
4307 /* Clear ratelimit aggregation, if any */
4308 if (layer == BAND_PROF_LEAF_LAYER &&
4309 ipolicer->match_id[prof_idx])
4310 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4312 ipolicer->pfvf_map[prof_idx] = 0x00;
4313 ipolicer->match_id[prof_idx] = 0;
4314 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4317 mutex_unlock(&rvu->rsrc_lock);
4321 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
4322 struct nix_bandprof_free_req *req,
4323 struct msg_rsp *rsp)
4325 int blkaddr, layer, prof_idx, idx, err;
4326 u16 pcifunc = req->hdr.pcifunc;
4327 struct nix_ipolicer *ipolicer;
4328 struct nix_hw *nix_hw;
4331 return nix_free_all_bandprof(rvu, pcifunc);
4333 if (!rvu->hw->cap.ipolicer)
4334 return NIX_AF_ERR_IPOLICER_NOTSUPP;
4336 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4340 mutex_lock(&rvu->rsrc_lock);
4341 /* Free the requested profile indices */
4342 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4343 if (layer == BAND_PROF_INVAL_LAYER)
4345 if (!req->prof_count[layer])
4348 ipolicer = &nix_hw->ipolicer[layer];
4349 for (idx = 0; idx < req->prof_count[layer]; idx++) {
4350 prof_idx = req->prof_idx[layer][idx];
4351 if (prof_idx >= ipolicer->band_prof.max ||
4352 ipolicer->pfvf_map[prof_idx] != pcifunc)
4355 /* Clear ratelimit aggregation, if any */
4356 if (layer == BAND_PROF_LEAF_LAYER &&
4357 ipolicer->match_id[prof_idx])
4358 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4360 ipolicer->pfvf_map[prof_idx] = 0x00;
4361 ipolicer->match_id[prof_idx] = 0;
4362 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4363 if (idx == MAX_BANDPROF_PER_PFFUNC)
4367 mutex_unlock(&rvu->rsrc_lock);
4371 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
4372 struct nix_cn10k_aq_enq_req *aq_req,
4373 struct nix_cn10k_aq_enq_rsp *aq_rsp,
4374 u16 pcifunc, u8 ctype, u32 qidx)
4376 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4377 aq_req->hdr.pcifunc = pcifunc;
4378 aq_req->ctype = ctype;
4379 aq_req->op = NIX_AQ_INSTOP_READ;
4380 aq_req->qidx = qidx;
4382 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4383 (struct nix_aq_enq_req *)aq_req,
4384 (struct nix_aq_enq_rsp *)aq_rsp);
4387 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
4388 struct nix_hw *nix_hw,
4389 struct nix_cn10k_aq_enq_req *aq_req,
4390 struct nix_cn10k_aq_enq_rsp *aq_rsp,
4391 u32 leaf_prof, u16 mid_prof)
4393 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4394 aq_req->hdr.pcifunc = 0x00;
4395 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
4396 aq_req->op = NIX_AQ_INSTOP_WRITE;
4397 aq_req->qidx = leaf_prof;
4399 aq_req->prof.band_prof_id = mid_prof;
4400 aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
4401 aq_req->prof.hl_en = 1;
4402 aq_req->prof_mask.hl_en = 1;
4404 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4405 (struct nix_aq_enq_req *)aq_req,
4406 (struct nix_aq_enq_rsp *)aq_rsp);
4409 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
4410 u16 rq_idx, u16 match_id)
4412 int leaf_prof, mid_prof, leaf_match;
4413 struct nix_cn10k_aq_enq_req aq_req;
4414 struct nix_cn10k_aq_enq_rsp aq_rsp;
4415 struct nix_ipolicer *ipolicer;
4416 struct nix_hw *nix_hw;
4417 int blkaddr, idx, rc;
4419 if (!rvu->hw->cap.ipolicer)
4422 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4426 /* Fetch the RQ's context to see if policing is enabled */
4427 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
4428 NIX_AQ_CTYPE_RQ, rq_idx);
4431 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
4432 __func__, rq_idx, pcifunc);
4436 if (!aq_rsp.rq.policer_ena)
4439 /* Get the bandwidth profile ID mapped to this RQ */
4440 leaf_prof = aq_rsp.rq.band_prof_id;
4442 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
4443 ipolicer->match_id[leaf_prof] = match_id;
4445 /* Check if any other leaf profile is marked with same match_id */
4446 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
4447 if (idx == leaf_prof)
4449 if (ipolicer->match_id[idx] != match_id)
4456 if (idx == ipolicer->band_prof.max)
4459 /* Fetch the matching profile's context to check if it's already
4460 * mapped to a mid level profile.
4462 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4463 NIX_AQ_CTYPE_BANDPROF, leaf_match);
4466 "%s: Failed to fetch context of leaf profile %d\n",
4467 __func__, leaf_match);
4471 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4472 if (aq_rsp.prof.hl_en) {
4473 /* Get Mid layer prof index and map leaf_prof index
4474 * also such that flows that are being steered
4475 * to different RQs and marked with same match_id
4476 * are rate limited in a aggregate fashion
4478 mid_prof = aq_rsp.prof.band_prof_id;
4479 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4481 leaf_prof, mid_prof);
4484 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4485 __func__, leaf_prof, mid_prof);
4489 mutex_lock(&rvu->rsrc_lock);
4490 ipolicer->ref_count[mid_prof]++;
4491 mutex_unlock(&rvu->rsrc_lock);
4495 /* Allocate a mid layer profile and
4496 * map both 'leaf_prof' and 'leaf_match' profiles to it.
4498 mutex_lock(&rvu->rsrc_lock);
4499 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4502 "%s: Unable to allocate mid layer profile\n", __func__);
4503 mutex_unlock(&rvu->rsrc_lock);
4506 mutex_unlock(&rvu->rsrc_lock);
4507 ipolicer->pfvf_map[mid_prof] = 0x00;
4508 ipolicer->ref_count[mid_prof] = 0;
4510 /* Initialize mid layer profile same as 'leaf_prof' */
4511 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4512 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4515 "%s: Failed to fetch context of leaf profile %d\n",
4516 __func__, leaf_prof);
4520 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4521 aq_req.hdr.pcifunc = 0x00;
4522 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
4523 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4524 aq_req.op = NIX_AQ_INSTOP_WRITE;
4525 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
4526 /* Clear higher layer enable bit in the mid profile, just in case */
4527 aq_req.prof.hl_en = 0;
4528 aq_req.prof_mask.hl_en = 1;
4530 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4531 (struct nix_aq_enq_req *)&aq_req, NULL);
4534 "%s: Failed to INIT context of mid layer profile %d\n",
4535 __func__, mid_prof);
4539 /* Map both leaf profiles to this mid layer profile */
4540 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4542 leaf_prof, mid_prof);
4545 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4546 __func__, leaf_prof, mid_prof);
4550 mutex_lock(&rvu->rsrc_lock);
4551 ipolicer->ref_count[mid_prof]++;
4552 mutex_unlock(&rvu->rsrc_lock);
4554 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4556 leaf_match, mid_prof);
4559 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4560 __func__, leaf_match, mid_prof);
4561 ipolicer->ref_count[mid_prof]--;
4565 mutex_lock(&rvu->rsrc_lock);
4566 ipolicer->ref_count[mid_prof]++;
4567 mutex_unlock(&rvu->rsrc_lock);
4573 /* Called with mutex rsrc_lock */
4574 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
4577 struct nix_cn10k_aq_enq_req aq_req;
4578 struct nix_cn10k_aq_enq_rsp aq_rsp;
4579 struct nix_ipolicer *ipolicer;
4583 mutex_unlock(&rvu->rsrc_lock);
4585 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4586 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4588 mutex_lock(&rvu->rsrc_lock);
4591 "%s: Failed to fetch context of leaf profile %d\n",
4592 __func__, leaf_prof);
4596 if (!aq_rsp.prof.hl_en)
4599 mid_prof = aq_rsp.prof.band_prof_id;
4600 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4601 ipolicer->ref_count[mid_prof]--;
4602 /* If ref_count is zero, free mid layer profile */
4603 if (!ipolicer->ref_count[mid_prof]) {
4604 ipolicer->pfvf_map[mid_prof] = 0x00;
4605 rvu_free_rsrc(&ipolicer->band_prof, mid_prof);