1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
19 #include "lmac_common.h"
21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
23 int type, int chan_id);
24 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
26 static int nix_setup_ipolicers(struct rvu *rvu,
27 struct nix_hw *nix_hw, int blkaddr);
28 static void nix_ipolicer_freemem(struct nix_hw *nix_hw);
29 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
30 struct nix_hw *nix_hw, u16 pcifunc);
31 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
32 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
59 enum nix_makr_fmt_indexes {
60 NIX_MARK_CFG_IP_DSCP_RED,
61 NIX_MARK_CFG_IP_DSCP_YELLOW,
62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
63 NIX_MARK_CFG_IP_ECN_RED,
64 NIX_MARK_CFG_IP_ECN_YELLOW,
65 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
66 NIX_MARK_CFG_VLAN_DEI_RED,
67 NIX_MARK_CFG_VLAN_DEI_YELLOW,
68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
72 /* For now considering MC resources needed for broadcast
73 * pkt replication only. i.e 256 HWVFs + 12 PFs.
75 #define MC_TBL_SIZE MC_TBL_SZ_512
76 #define MC_BUF_CNT MC_BUF_CNT_128
79 struct hlist_node node;
83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
87 /*If blkaddr is 0, return the first nix block address*/
89 return rvu->nix_blkaddr[blkaddr];
91 while (i + 1 < MAX_NIX_BLKS) {
92 if (rvu->nix_blkaddr[i] == blkaddr)
93 return rvu->nix_blkaddr[i + 1];
100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
102 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
106 if (!pfvf->nixlf || blkaddr < 0)
111 int rvu_get_nixlf_count(struct rvu *rvu)
113 int blkaddr = 0, max = 0;
114 struct rvu_block *block;
116 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
118 block = &rvu->hw->block[blkaddr];
119 max += block->lf.max;
120 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
127 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
128 struct rvu_hwinfo *hw = rvu->hw;
131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
132 if (!pfvf->nixlf || blkaddr < 0)
133 return NIX_AF_ERR_AF_LF_INVALID;
135 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
137 return NIX_AF_ERR_AF_LF_INVALID;
140 *nix_blkaddr = blkaddr;
145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
146 struct nix_hw **nix_hw, int *blkaddr)
148 struct rvu_pfvf *pfvf;
150 pfvf = rvu_get_pfvf(rvu, pcifunc);
151 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
152 if (!pfvf->nixlf || *blkaddr < 0)
153 return NIX_AF_ERR_AF_LF_INVALID;
155 *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
157 return NIX_AF_ERR_INVALID_NIXBLK;
161 static void nix_mce_list_init(struct nix_mce_list *list, int max)
163 INIT_HLIST_HEAD(&list->head);
168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
175 idx = mcast->next_free_mce;
176 mcast->next_free_mce += count;
180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
182 int nix_blkaddr = 0, i = 0;
183 struct rvu *rvu = hw->rvu;
185 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
186 while (nix_blkaddr) {
187 if (blkaddr == nix_blkaddr && hw->nix)
189 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
195 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
199 /*Sync all in flight RX packets to LLC/DRAM */
200 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
201 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
203 dev_err(rvu->dev, "NIX RX software sync failed\n");
206 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
207 int lvl, u16 pcifunc, u16 schq)
209 struct rvu_hwinfo *hw = rvu->hw;
210 struct nix_txsch *txsch;
211 struct nix_hw *nix_hw;
214 nix_hw = get_nix_hw(rvu->hw, blkaddr);
218 txsch = &nix_hw->txsch[lvl];
219 /* Check out of bounds */
220 if (schq >= txsch->schq.max)
223 mutex_lock(&rvu->rsrc_lock);
224 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
225 mutex_unlock(&rvu->rsrc_lock);
227 /* TLs aggegating traffic are shared across PF and VFs */
228 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
229 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
235 if (map_func != pcifunc)
241 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
243 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
244 struct mac_ops *mac_ops;
245 int pkind, pf, vf, lbkid;
249 pf = rvu_get_pf(pcifunc);
250 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
254 case NIX_INTF_TYPE_CGX:
255 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
256 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
258 pkind = rvu_npc_get_pkind(rvu, pf);
261 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
264 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
265 pfvf->tx_chan_base = pfvf->rx_chan_base;
266 pfvf->rx_chan_cnt = 1;
267 pfvf->tx_chan_cnt = 1;
268 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
269 rvu_npc_set_pkind(rvu, pkind, pfvf);
271 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
272 /* By default we enable pause frames */
273 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
274 mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
276 lmac_id, true, true);
278 case NIX_INTF_TYPE_LBK:
279 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
281 /* If NIX1 block is present on the silicon then NIXes are
282 * assigned alternatively for lbk interfaces. NIX0 should
283 * send packets on lbk link 1 channels and NIX1 should send
284 * on lbk link 0 channels for the communication between
288 if (rvu->hw->lbk_links > 1)
289 lbkid = vf & 0x1 ? 0 : 1;
291 /* Note that AF's VFs work in pairs and talk over consecutive
292 * loopback channels.Therefore if odd number of AF VFs are
293 * enabled then the last VF remains with no pair.
295 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
296 pfvf->tx_chan_base = vf & 0x1 ?
297 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
298 rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
299 pfvf->rx_chan_cnt = 1;
300 pfvf->tx_chan_cnt = 1;
301 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
307 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
308 * RVU PF/VF's MAC address.
310 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
311 pfvf->rx_chan_base, pfvf->mac_addr);
313 /* Add this PF_FUNC to bcast pkt replication list */
314 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
317 "Bcast list, failed to enable PF_FUNC 0x%x\n",
321 /* Install MCAM rule matching Ethernet broadcast mac address */
322 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
323 nixlf, pfvf->rx_chan_base);
325 pfvf->maxlen = NIC_HW_MIN_FRS;
326 pfvf->minlen = NIC_HW_MIN_FRS;
331 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
333 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
339 /* Remove this PF_FUNC from bcast pkt replication list */
340 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
343 "Bcast list, failed to disable PF_FUNC 0x%x\n",
347 /* Free and disable any MCAM entries used by this NIX LF */
348 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
350 /* Disable DMAC filters used */
351 rvu_cgx_disable_dmac_entries(rvu, pcifunc);
354 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
355 struct nix_bp_cfg_req *req,
358 u16 pcifunc = req->hdr.pcifunc;
359 struct rvu_pfvf *pfvf;
360 int blkaddr, pf, type;
364 pf = rvu_get_pf(pcifunc);
365 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
366 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
369 pfvf = rvu_get_pfvf(rvu, pcifunc);
370 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
372 chan_base = pfvf->rx_chan_base + req->chan_base;
373 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
374 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
375 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
381 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
382 int type, int chan_id)
384 int bpid, blkaddr, lmac_chan_cnt;
385 struct rvu_hwinfo *hw = rvu->hw;
386 u16 cgx_bpid_cnt, lbk_bpid_cnt;
387 struct rvu_pfvf *pfvf;
391 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
392 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
393 lmac_chan_cnt = cfg & 0xFF;
395 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
396 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
398 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
400 /* Backpressure IDs range division
401 * CGX channles are mapped to (0 - 191) BPIDs
402 * LBK channles are mapped to (192 - 255) BPIDs
403 * SDP channles are mapped to (256 - 511) BPIDs
405 * Lmac channles and bpids mapped as follows
406 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
407 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
408 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
411 case NIX_INTF_TYPE_CGX:
412 if ((req->chan_base + req->chan_cnt) > 15)
414 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
415 /* Assign bpid based on cgx, lmac and chan id */
416 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
417 (lmac_id * lmac_chan_cnt) + req->chan_base;
419 if (req->bpid_per_chan)
421 if (bpid > cgx_bpid_cnt)
425 case NIX_INTF_TYPE_LBK:
426 if ((req->chan_base + req->chan_cnt) > 63)
428 bpid = cgx_bpid_cnt + req->chan_base;
429 if (req->bpid_per_chan)
431 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
440 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
441 struct nix_bp_cfg_req *req,
442 struct nix_bp_cfg_rsp *rsp)
444 int blkaddr, pf, type, chan_id = 0;
445 u16 pcifunc = req->hdr.pcifunc;
446 struct rvu_pfvf *pfvf;
451 pf = rvu_get_pf(pcifunc);
452 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
454 /* Enable backpressure only for CGX mapped PFs and LBK interface */
455 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
458 pfvf = rvu_get_pfvf(rvu, pcifunc);
459 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
461 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
462 chan_base = pfvf->rx_chan_base + req->chan_base;
465 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
467 dev_warn(rvu->dev, "Fail to enable backpressure\n");
471 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
472 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
473 cfg | (bpid & 0xFF) | BIT_ULL(16));
475 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
478 for (chan = 0; chan < req->chan_cnt; chan++) {
479 /* Map channel and bpid assign to it */
480 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
482 if (req->bpid_per_chan)
485 rsp->chan_cnt = req->chan_cnt;
490 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
491 u64 format, bool v4, u64 *fidx)
493 struct nix_lso_format field = {0};
495 /* IP's Length field */
496 field.layer = NIX_TXLAYER_OL3;
497 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
498 field.offset = v4 ? 2 : 4;
499 field.sizem1 = 1; /* i.e 2 bytes */
500 field.alg = NIX_LSOALG_ADD_PAYLEN;
501 rvu_write64(rvu, blkaddr,
502 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
505 /* No ID field in IPv6 header */
510 field.layer = NIX_TXLAYER_OL3;
512 field.sizem1 = 1; /* i.e 2 bytes */
513 field.alg = NIX_LSOALG_ADD_SEGNUM;
514 rvu_write64(rvu, blkaddr,
515 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
519 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
520 u64 format, u64 *fidx)
522 struct nix_lso_format field = {0};
524 /* TCP's sequence number field */
525 field.layer = NIX_TXLAYER_OL4;
527 field.sizem1 = 3; /* i.e 4 bytes */
528 field.alg = NIX_LSOALG_ADD_OFFSET;
529 rvu_write64(rvu, blkaddr,
530 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
533 /* TCP's flags field */
534 field.layer = NIX_TXLAYER_OL4;
536 field.sizem1 = 1; /* 2 bytes */
537 field.alg = NIX_LSOALG_TCP_FLAGS;
538 rvu_write64(rvu, blkaddr,
539 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
543 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
545 u64 cfg, idx, fidx = 0;
547 /* Get max HW supported format indices */
548 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
549 nix_hw->lso.total = cfg;
552 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
553 /* For TSO, set first and middle segment flags to
554 * mask out PSH, RST & FIN flags in TCP packet
556 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
557 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
558 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
560 /* Setup default static LSO formats
562 * Configure format fields for TCPv4 segmentation offload
564 idx = NIX_LSO_FORMAT_IDX_TSOV4;
565 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
566 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
568 /* Set rest of the fields to NOP */
569 for (; fidx < 8; fidx++) {
570 rvu_write64(rvu, blkaddr,
571 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
573 nix_hw->lso.in_use++;
575 /* Configure format fields for TCPv6 segmentation offload */
576 idx = NIX_LSO_FORMAT_IDX_TSOV6;
578 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
579 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
581 /* Set rest of the fields to NOP */
582 for (; fidx < 8; fidx++) {
583 rvu_write64(rvu, blkaddr,
584 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
586 nix_hw->lso.in_use++;
589 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
591 kfree(pfvf->rq_bmap);
592 kfree(pfvf->sq_bmap);
593 kfree(pfvf->cq_bmap);
595 qmem_free(rvu->dev, pfvf->rq_ctx);
597 qmem_free(rvu->dev, pfvf->sq_ctx);
599 qmem_free(rvu->dev, pfvf->cq_ctx);
601 qmem_free(rvu->dev, pfvf->rss_ctx);
602 if (pfvf->nix_qints_ctx)
603 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
604 if (pfvf->cq_ints_ctx)
605 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
607 pfvf->rq_bmap = NULL;
608 pfvf->cq_bmap = NULL;
609 pfvf->sq_bmap = NULL;
613 pfvf->rss_ctx = NULL;
614 pfvf->nix_qints_ctx = NULL;
615 pfvf->cq_ints_ctx = NULL;
618 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
619 struct rvu_pfvf *pfvf, int nixlf,
620 int rss_sz, int rss_grps, int hwctx_size,
623 int err, grp, num_indices;
625 /* RSS is not requested for this NIXLF */
628 num_indices = rss_sz * rss_grps;
630 /* Alloc NIX RSS HW context memory and config the base */
631 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
635 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
636 (u64)pfvf->rss_ctx->iova);
638 /* Config full RSS table size, enable RSS and caching */
639 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
640 BIT_ULL(36) | BIT_ULL(4) |
641 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
643 /* Config RSS group offset and sizes */
644 for (grp = 0; grp < rss_grps; grp++)
645 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
646 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
650 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
651 struct nix_aq_inst_s *inst)
653 struct admin_queue *aq = block->aq;
654 struct nix_aq_res_s *result;
658 result = (struct nix_aq_res_s *)aq->res->base;
660 /* Get current head pointer where to append this instruction */
661 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
662 head = (reg >> 4) & AQ_PTR_MASK;
664 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
665 (void *)inst, aq->inst->entry_sz);
666 memset(result, 0, sizeof(*result));
667 /* sync into memory */
670 /* Ring the doorbell and wait for result */
671 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
672 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
680 if (result->compcode != NIX_AQ_COMP_GOOD)
681 /* TODO: Replace this with some error code */
687 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
688 struct nix_aq_enq_req *req,
689 struct nix_aq_enq_rsp *rsp)
691 struct rvu_hwinfo *hw = rvu->hw;
692 u16 pcifunc = req->hdr.pcifunc;
693 int nixlf, blkaddr, rc = 0;
694 struct nix_aq_inst_s inst;
695 struct rvu_block *block;
696 struct admin_queue *aq;
697 struct rvu_pfvf *pfvf;
702 blkaddr = nix_hw->blkaddr;
703 block = &hw->block[blkaddr];
706 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
707 return NIX_AF_ERR_AQ_ENQUEUE;
710 pfvf = rvu_get_pfvf(rvu, pcifunc);
711 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
713 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
714 * operations done by AF itself.
716 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
717 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
718 if (!pfvf->nixlf || nixlf < 0)
719 return NIX_AF_ERR_AF_LF_INVALID;
722 switch (req->ctype) {
723 case NIX_AQ_CTYPE_RQ:
724 /* Check if index exceeds max no of queues */
725 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
726 rc = NIX_AF_ERR_AQ_ENQUEUE;
728 case NIX_AQ_CTYPE_SQ:
729 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
730 rc = NIX_AF_ERR_AQ_ENQUEUE;
732 case NIX_AQ_CTYPE_CQ:
733 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
734 rc = NIX_AF_ERR_AQ_ENQUEUE;
736 case NIX_AQ_CTYPE_RSS:
737 /* Check if RSS is enabled and qidx is within range */
738 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
739 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
740 (req->qidx >= (256UL << (cfg & 0xF))))
741 rc = NIX_AF_ERR_AQ_ENQUEUE;
743 case NIX_AQ_CTYPE_MCE:
744 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
746 /* Check if index exceeds MCE list length */
747 if (!nix_hw->mcast.mce_ctx ||
748 (req->qidx >= (256UL << (cfg & 0xF))))
749 rc = NIX_AF_ERR_AQ_ENQUEUE;
751 /* Adding multicast lists for requests from PF/VFs is not
752 * yet supported, so ignore this.
755 rc = NIX_AF_ERR_AQ_ENQUEUE;
757 case NIX_AQ_CTYPE_BANDPROF:
758 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
760 rc = NIX_AF_ERR_INVALID_BANDPROF;
763 rc = NIX_AF_ERR_AQ_ENQUEUE;
769 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
770 if (req->ctype == NIX_AQ_CTYPE_SQ &&
771 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
772 (req->op == NIX_AQ_INSTOP_WRITE &&
773 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
774 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
775 pcifunc, req->sq.smq))
776 return NIX_AF_ERR_AQ_ENQUEUE;
779 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
781 inst.cindex = req->qidx;
782 inst.ctype = req->ctype;
784 /* Currently we are not supporting enqueuing multiple instructions,
785 * so always choose first entry in result memory.
787 inst.res_addr = (u64)aq->res->iova;
789 /* Hardware uses same aq->res->base for updating result of
790 * previous instruction hence wait here till it is done.
792 spin_lock(&aq->lock);
794 /* Clean result + context memory */
795 memset(aq->res->base, 0, aq->res->entry_sz);
796 /* Context needs to be written at RES_ADDR + 128 */
797 ctx = aq->res->base + 128;
798 /* Mask needs to be written at RES_ADDR + 256 */
799 mask = aq->res->base + 256;
802 case NIX_AQ_INSTOP_WRITE:
803 if (req->ctype == NIX_AQ_CTYPE_RQ)
804 memcpy(mask, &req->rq_mask,
805 sizeof(struct nix_rq_ctx_s));
806 else if (req->ctype == NIX_AQ_CTYPE_SQ)
807 memcpy(mask, &req->sq_mask,
808 sizeof(struct nix_sq_ctx_s));
809 else if (req->ctype == NIX_AQ_CTYPE_CQ)
810 memcpy(mask, &req->cq_mask,
811 sizeof(struct nix_cq_ctx_s));
812 else if (req->ctype == NIX_AQ_CTYPE_RSS)
813 memcpy(mask, &req->rss_mask,
814 sizeof(struct nix_rsse_s));
815 else if (req->ctype == NIX_AQ_CTYPE_MCE)
816 memcpy(mask, &req->mce_mask,
817 sizeof(struct nix_rx_mce_s));
818 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
819 memcpy(mask, &req->prof_mask,
820 sizeof(struct nix_bandprof_s));
822 case NIX_AQ_INSTOP_INIT:
823 if (req->ctype == NIX_AQ_CTYPE_RQ)
824 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
825 else if (req->ctype == NIX_AQ_CTYPE_SQ)
826 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
827 else if (req->ctype == NIX_AQ_CTYPE_CQ)
828 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
829 else if (req->ctype == NIX_AQ_CTYPE_RSS)
830 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
831 else if (req->ctype == NIX_AQ_CTYPE_MCE)
832 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
833 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
834 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
836 case NIX_AQ_INSTOP_NOP:
837 case NIX_AQ_INSTOP_READ:
838 case NIX_AQ_INSTOP_LOCK:
839 case NIX_AQ_INSTOP_UNLOCK:
842 rc = NIX_AF_ERR_AQ_ENQUEUE;
843 spin_unlock(&aq->lock);
847 /* Submit the instruction to AQ */
848 rc = nix_aq_enqueue_wait(rvu, block, &inst);
850 spin_unlock(&aq->lock);
854 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
855 if (req->op == NIX_AQ_INSTOP_INIT) {
856 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
857 __set_bit(req->qidx, pfvf->rq_bmap);
858 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
859 __set_bit(req->qidx, pfvf->sq_bmap);
860 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
861 __set_bit(req->qidx, pfvf->cq_bmap);
864 if (req->op == NIX_AQ_INSTOP_WRITE) {
865 if (req->ctype == NIX_AQ_CTYPE_RQ) {
866 ena = (req->rq.ena & req->rq_mask.ena) |
867 (test_bit(req->qidx, pfvf->rq_bmap) &
870 __set_bit(req->qidx, pfvf->rq_bmap);
872 __clear_bit(req->qidx, pfvf->rq_bmap);
874 if (req->ctype == NIX_AQ_CTYPE_SQ) {
875 ena = (req->rq.ena & req->sq_mask.ena) |
876 (test_bit(req->qidx, pfvf->sq_bmap) &
879 __set_bit(req->qidx, pfvf->sq_bmap);
881 __clear_bit(req->qidx, pfvf->sq_bmap);
883 if (req->ctype == NIX_AQ_CTYPE_CQ) {
884 ena = (req->rq.ena & req->cq_mask.ena) |
885 (test_bit(req->qidx, pfvf->cq_bmap) &
888 __set_bit(req->qidx, pfvf->cq_bmap);
890 __clear_bit(req->qidx, pfvf->cq_bmap);
895 /* Copy read context into mailbox */
896 if (req->op == NIX_AQ_INSTOP_READ) {
897 if (req->ctype == NIX_AQ_CTYPE_RQ)
898 memcpy(&rsp->rq, ctx,
899 sizeof(struct nix_rq_ctx_s));
900 else if (req->ctype == NIX_AQ_CTYPE_SQ)
901 memcpy(&rsp->sq, ctx,
902 sizeof(struct nix_sq_ctx_s));
903 else if (req->ctype == NIX_AQ_CTYPE_CQ)
904 memcpy(&rsp->cq, ctx,
905 sizeof(struct nix_cq_ctx_s));
906 else if (req->ctype == NIX_AQ_CTYPE_RSS)
907 memcpy(&rsp->rss, ctx,
908 sizeof(struct nix_rsse_s));
909 else if (req->ctype == NIX_AQ_CTYPE_MCE)
910 memcpy(&rsp->mce, ctx,
911 sizeof(struct nix_rx_mce_s));
912 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
913 memcpy(&rsp->prof, ctx,
914 sizeof(struct nix_bandprof_s));
918 spin_unlock(&aq->lock);
922 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
923 struct nix_aq_enq_rsp *rsp)
925 struct nix_hw *nix_hw;
928 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
930 return NIX_AF_ERR_AF_LF_INVALID;
932 nix_hw = get_nix_hw(rvu->hw, blkaddr);
936 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
939 static const char *nix_get_ctx_name(int ctype)
942 case NIX_AQ_CTYPE_CQ:
944 case NIX_AQ_CTYPE_SQ:
946 case NIX_AQ_CTYPE_RQ:
948 case NIX_AQ_CTYPE_RSS:
954 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
956 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
957 struct nix_aq_enq_req aq_req;
962 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
963 return NIX_AF_ERR_AQ_ENQUEUE;
965 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
966 aq_req.hdr.pcifunc = req->hdr.pcifunc;
968 if (req->ctype == NIX_AQ_CTYPE_CQ) {
970 aq_req.cq_mask.ena = 1;
971 aq_req.cq.bp_ena = 0;
972 aq_req.cq_mask.bp_ena = 1;
973 q_cnt = pfvf->cq_ctx->qsize;
974 bmap = pfvf->cq_bmap;
976 if (req->ctype == NIX_AQ_CTYPE_SQ) {
978 aq_req.sq_mask.ena = 1;
979 q_cnt = pfvf->sq_ctx->qsize;
980 bmap = pfvf->sq_bmap;
982 if (req->ctype == NIX_AQ_CTYPE_RQ) {
984 aq_req.rq_mask.ena = 1;
985 q_cnt = pfvf->rq_ctx->qsize;
986 bmap = pfvf->rq_bmap;
989 aq_req.ctype = req->ctype;
990 aq_req.op = NIX_AQ_INSTOP_WRITE;
992 for (qidx = 0; qidx < q_cnt; qidx++) {
993 if (!test_bit(qidx, bmap))
996 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
999 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1000 nix_get_ctx_name(req->ctype), qidx);
1007 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1008 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1010 struct nix_aq_enq_req lock_ctx_req;
1013 if (req->op != NIX_AQ_INSTOP_INIT)
1016 if (req->ctype == NIX_AQ_CTYPE_MCE ||
1017 req->ctype == NIX_AQ_CTYPE_DYNO)
1020 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1021 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1022 lock_ctx_req.ctype = req->ctype;
1023 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1024 lock_ctx_req.qidx = req->qidx;
1025 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1028 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1030 nix_get_ctx_name(req->ctype), req->qidx);
1034 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1035 struct nix_aq_enq_req *req,
1036 struct nix_aq_enq_rsp *rsp)
1040 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1042 err = nix_lf_hwctx_lockdown(rvu, req);
1047 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1048 struct nix_aq_enq_req *req,
1049 struct nix_aq_enq_rsp *rsp)
1051 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1054 /* CN10K mbox handler */
1055 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1056 struct nix_cn10k_aq_enq_req *req,
1057 struct nix_cn10k_aq_enq_rsp *rsp)
1059 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1060 (struct nix_aq_enq_rsp *)rsp);
1063 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1064 struct hwctx_disable_req *req,
1065 struct msg_rsp *rsp)
1067 return nix_lf_hwctx_disable(rvu, req);
1070 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1071 struct nix_lf_alloc_req *req,
1072 struct nix_lf_alloc_rsp *rsp)
1074 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1075 struct rvu_hwinfo *hw = rvu->hw;
1076 u16 pcifunc = req->hdr.pcifunc;
1077 struct rvu_block *block;
1078 struct rvu_pfvf *pfvf;
1082 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1083 return NIX_AF_ERR_PARAM;
1086 req->way_mask &= 0xFFFF;
1088 pfvf = rvu_get_pfvf(rvu, pcifunc);
1089 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1090 if (!pfvf->nixlf || blkaddr < 0)
1091 return NIX_AF_ERR_AF_LF_INVALID;
1093 block = &hw->block[blkaddr];
1094 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1096 return NIX_AF_ERR_AF_LF_INVALID;
1098 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1099 if (req->npa_func) {
1100 /* If default, use 'this' NIXLF's PFFUNC */
1101 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1102 req->npa_func = pcifunc;
1103 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1104 return NIX_AF_INVAL_NPA_PF_FUNC;
1107 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1108 if (req->sso_func) {
1109 /* If default, use 'this' NIXLF's PFFUNC */
1110 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1111 req->sso_func = pcifunc;
1112 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1113 return NIX_AF_INVAL_SSO_PF_FUNC;
1116 /* If RSS is being enabled, check if requested config is valid.
1117 * RSS table size should be power of two, otherwise
1118 * RSS_GRP::OFFSET + adder might go beyond that group or
1119 * won't be able to use entire table.
1121 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1122 !is_power_of_2(req->rss_sz)))
1123 return NIX_AF_ERR_RSS_SIZE_INVALID;
1126 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1127 return NIX_AF_ERR_RSS_GRPS_INVALID;
1129 /* Reset this NIX LF */
1130 err = rvu_lf_reset(rvu, block, nixlf);
1132 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1133 block->addr - BLKADDR_NIX0, nixlf);
1134 return NIX_AF_ERR_LF_RESET;
1137 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1139 /* Alloc NIX RQ HW context memory and config the base */
1140 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1141 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1145 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1149 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1150 (u64)pfvf->rq_ctx->iova);
1152 /* Set caching and queue count in HW */
1153 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1154 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1156 /* Alloc NIX SQ HW context memory and config the base */
1157 hwctx_size = 1UL << (ctx_cfg & 0xF);
1158 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1162 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1166 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1167 (u64)pfvf->sq_ctx->iova);
1169 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1170 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1172 /* Alloc NIX CQ HW context memory and config the base */
1173 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1174 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1178 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1182 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1183 (u64)pfvf->cq_ctx->iova);
1185 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1186 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1188 /* Initialize receive side scaling (RSS) */
1189 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1190 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1191 req->rss_grps, hwctx_size, req->way_mask);
1195 /* Alloc memory for CQINT's HW contexts */
1196 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1197 qints = (cfg >> 24) & 0xFFF;
1198 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1199 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1203 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1204 (u64)pfvf->cq_ints_ctx->iova);
1206 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1207 BIT_ULL(36) | req->way_mask << 20);
1209 /* Alloc memory for QINT's HW contexts */
1210 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1211 qints = (cfg >> 12) & 0xFFF;
1212 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1213 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1217 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1218 (u64)pfvf->nix_qints_ctx->iova);
1219 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1220 BIT_ULL(36) | req->way_mask << 20);
1222 /* Setup VLANX TPID's.
1223 * Use VLAN1 for 802.1Q
1224 * and VLAN0 for 802.1AD.
1226 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1227 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1229 /* Enable LMTST for this NIX LF */
1230 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1232 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1234 cfg = req->npa_func;
1236 cfg |= (u64)req->sso_func << 16;
1238 cfg |= (u64)req->xqe_sz << 33;
1239 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1241 /* Config Rx pkt length, csum checks and apad enable / disable */
1242 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1244 /* Configure pkind for TX parse config */
1245 cfg = NPC_TX_DEF_PKIND;
1246 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1248 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1249 err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1253 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1254 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1256 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1257 rvu_write64(rvu, blkaddr,
1258 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1259 VTAGSIZE_T4 | VTAG_STRIP);
1264 nix_ctx_free(rvu, pfvf);
1268 /* Set macaddr of this PF/VF */
1269 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1271 /* set SQB size info */
1272 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1273 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1274 rsp->rx_chan_base = pfvf->rx_chan_base;
1275 rsp->tx_chan_base = pfvf->tx_chan_base;
1276 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1277 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1278 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1279 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1280 /* Get HW supported stat count */
1281 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1282 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1283 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1284 /* Get count of CQ IRQs and error IRQs supported per LF */
1285 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1286 rsp->qints = ((cfg >> 12) & 0xFFF);
1287 rsp->cints = ((cfg >> 24) & 0xFFF);
1288 rsp->cgx_links = hw->cgx_links;
1289 rsp->lbk_links = hw->lbk_links;
1290 rsp->sdp_links = hw->sdp_links;
1295 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1296 struct msg_rsp *rsp)
1298 struct rvu_hwinfo *hw = rvu->hw;
1299 u16 pcifunc = req->hdr.pcifunc;
1300 struct rvu_block *block;
1301 int blkaddr, nixlf, err;
1302 struct rvu_pfvf *pfvf;
1304 pfvf = rvu_get_pfvf(rvu, pcifunc);
1305 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1306 if (!pfvf->nixlf || blkaddr < 0)
1307 return NIX_AF_ERR_AF_LF_INVALID;
1309 block = &hw->block[blkaddr];
1310 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1312 return NIX_AF_ERR_AF_LF_INVALID;
1314 if (req->flags & NIX_LF_DISABLE_FLOWS)
1315 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1317 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1319 /* Free any tx vtag def entries used by this NIX LF */
1320 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1321 nix_free_tx_vtag_entries(rvu, pcifunc);
1323 nix_interface_deinit(rvu, pcifunc, nixlf);
1325 /* Reset this NIX LF */
1326 err = rvu_lf_reset(rvu, block, nixlf);
1328 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1329 block->addr - BLKADDR_NIX0, nixlf);
1330 return NIX_AF_ERR_LF_RESET;
1333 nix_ctx_free(rvu, pfvf);
1338 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1339 struct nix_mark_format_cfg *req,
1340 struct nix_mark_format_cfg_rsp *rsp)
1342 u16 pcifunc = req->hdr.pcifunc;
1343 struct nix_hw *nix_hw;
1344 struct rvu_pfvf *pfvf;
1348 pfvf = rvu_get_pfvf(rvu, pcifunc);
1349 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1350 if (!pfvf->nixlf || blkaddr < 0)
1351 return NIX_AF_ERR_AF_LF_INVALID;
1353 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1357 cfg = (((u32)req->offset & 0x7) << 16) |
1358 (((u32)req->y_mask & 0xF) << 12) |
1359 (((u32)req->y_val & 0xF) << 8) |
1360 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1362 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1364 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1365 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1366 return NIX_AF_ERR_MARK_CFG_FAIL;
1369 rsp->mark_format_idx = rc;
1373 /* Disable shaping of pkts by a scheduler queue
1374 * at a given scheduler level.
1376 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1379 u64 cir_reg = 0, pir_reg = 0;
1383 case NIX_TXSCH_LVL_TL1:
1384 cir_reg = NIX_AF_TL1X_CIR(schq);
1385 pir_reg = 0; /* PIR not available at TL1 */
1387 case NIX_TXSCH_LVL_TL2:
1388 cir_reg = NIX_AF_TL2X_CIR(schq);
1389 pir_reg = NIX_AF_TL2X_PIR(schq);
1391 case NIX_TXSCH_LVL_TL3:
1392 cir_reg = NIX_AF_TL3X_CIR(schq);
1393 pir_reg = NIX_AF_TL3X_PIR(schq);
1395 case NIX_TXSCH_LVL_TL4:
1396 cir_reg = NIX_AF_TL4X_CIR(schq);
1397 pir_reg = NIX_AF_TL4X_PIR(schq);
1403 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1404 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1408 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1409 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1412 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1415 struct rvu_hwinfo *hw = rvu->hw;
1418 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1421 /* Reset TL4's SDP link config */
1422 if (lvl == NIX_TXSCH_LVL_TL4)
1423 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1425 if (lvl != NIX_TXSCH_LVL_TL2)
1428 /* Reset TL2's CGX or LBK link config */
1429 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1430 rvu_write64(rvu, blkaddr,
1431 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1434 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1436 struct rvu_hwinfo *hw = rvu->hw;
1437 int pf = rvu_get_pf(pcifunc);
1438 u8 cgx_id = 0, lmac_id = 0;
1440 if (is_afvf(pcifunc)) {/* LBK links */
1441 return hw->cgx_links;
1442 } else if (is_pf_cgxmapped(rvu, pf)) {
1443 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1444 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1448 return hw->cgx_links + hw->lbk_links;
1451 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1452 int link, int *start, int *end)
1454 struct rvu_hwinfo *hw = rvu->hw;
1455 int pf = rvu_get_pf(pcifunc);
1457 if (is_afvf(pcifunc)) { /* LBK links */
1458 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1459 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1460 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1461 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1462 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1463 } else { /* SDP link */
1464 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1465 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1466 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1470 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1471 struct nix_hw *nix_hw,
1472 struct nix_txsch_alloc_req *req)
1474 struct rvu_hwinfo *hw = rvu->hw;
1475 int schq, req_schq, free_cnt;
1476 struct nix_txsch *txsch;
1477 int link, start, end;
1479 txsch = &nix_hw->txsch[lvl];
1480 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1485 link = nix_get_tx_link(rvu, pcifunc);
1487 /* For traffic aggregating scheduler level, one queue is enough */
1488 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1490 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1494 /* Get free SCHQ count and check if request can be accomodated */
1495 if (hw->cap.nix_fixed_txschq_mapping) {
1496 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1497 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1498 if (end <= txsch->schq.max && schq < end &&
1499 !test_bit(schq, txsch->schq.bmap))
1504 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1507 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1508 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1510 /* If contiguous queues are needed, check for availability */
1511 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1512 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1513 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1518 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1519 struct nix_txsch_alloc_rsp *rsp,
1520 int lvl, int start, int end)
1522 struct rvu_hwinfo *hw = rvu->hw;
1523 u16 pcifunc = rsp->hdr.pcifunc;
1526 /* For traffic aggregating levels, queue alloc is based
1527 * on transmit link to which PF_FUNC is mapped to.
1529 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1530 /* A single TL queue is allocated */
1531 if (rsp->schq_contig[lvl]) {
1532 rsp->schq_contig[lvl] = 1;
1533 rsp->schq_contig_list[lvl][0] = start;
1536 /* Both contig and non-contig reqs doesn't make sense here */
1537 if (rsp->schq_contig[lvl])
1540 if (rsp->schq[lvl]) {
1542 rsp->schq_list[lvl][0] = start;
1547 /* Adjust the queue request count if HW supports
1548 * only one queue per level configuration.
1550 if (hw->cap.nix_fixed_txschq_mapping) {
1551 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1553 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1554 rsp->schq_contig[lvl] = 0;
1559 if (rsp->schq_contig[lvl]) {
1560 rsp->schq_contig[lvl] = 1;
1561 set_bit(schq, txsch->schq.bmap);
1562 rsp->schq_contig_list[lvl][0] = schq;
1564 } else if (rsp->schq[lvl]) {
1566 set_bit(schq, txsch->schq.bmap);
1567 rsp->schq_list[lvl][0] = schq;
1572 /* Allocate contiguous queue indices requesty first */
1573 if (rsp->schq_contig[lvl]) {
1574 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1575 txsch->schq.max, start,
1576 rsp->schq_contig[lvl], 0);
1578 rsp->schq_contig[lvl] = 0;
1579 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1580 set_bit(schq, txsch->schq.bmap);
1581 rsp->schq_contig_list[lvl][idx] = schq;
1586 /* Allocate non-contiguous queue indices */
1587 if (rsp->schq[lvl]) {
1589 for (schq = start; schq < end; schq++) {
1590 if (!test_bit(schq, txsch->schq.bmap)) {
1591 set_bit(schq, txsch->schq.bmap);
1592 rsp->schq_list[lvl][idx++] = schq;
1594 if (idx == rsp->schq[lvl])
1597 /* Update how many were allocated */
1598 rsp->schq[lvl] = idx;
1602 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1603 struct nix_txsch_alloc_req *req,
1604 struct nix_txsch_alloc_rsp *rsp)
1606 struct rvu_hwinfo *hw = rvu->hw;
1607 u16 pcifunc = req->hdr.pcifunc;
1608 int link, blkaddr, rc = 0;
1609 int lvl, idx, start, end;
1610 struct nix_txsch *txsch;
1611 struct rvu_pfvf *pfvf;
1612 struct nix_hw *nix_hw;
1616 pfvf = rvu_get_pfvf(rvu, pcifunc);
1617 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1618 if (!pfvf->nixlf || blkaddr < 0)
1619 return NIX_AF_ERR_AF_LF_INVALID;
1621 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1625 mutex_lock(&rvu->rsrc_lock);
1627 /* Check if request is valid as per HW capabilities
1628 * and can be accomodated.
1630 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1631 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1636 /* Allocate requested Tx scheduler queues */
1637 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1638 txsch = &nix_hw->txsch[lvl];
1639 pfvf_map = txsch->pfvf_map;
1641 if (!req->schq[lvl] && !req->schq_contig[lvl])
1644 rsp->schq[lvl] = req->schq[lvl];
1645 rsp->schq_contig[lvl] = req->schq_contig[lvl];
1647 link = nix_get_tx_link(rvu, pcifunc);
1649 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1652 } else if (hw->cap.nix_fixed_txschq_mapping) {
1653 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1656 end = txsch->schq.max;
1659 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1661 /* Reset queue config */
1662 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1663 schq = rsp->schq_contig_list[lvl][idx];
1664 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1665 NIX_TXSCHQ_CFG_DONE))
1666 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1667 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1668 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1671 for (idx = 0; idx < req->schq[lvl]; idx++) {
1672 schq = rsp->schq_list[lvl][idx];
1673 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1674 NIX_TXSCHQ_CFG_DONE))
1675 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1676 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1677 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1681 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1682 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1683 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1684 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1685 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1688 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1690 mutex_unlock(&rvu->rsrc_lock);
1694 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1695 int smq, u16 pcifunc, int nixlf)
1697 int pf = rvu_get_pf(pcifunc);
1698 u8 cgx_id = 0, lmac_id = 0;
1699 int err, restore_tx_en = 0;
1702 /* enable cgx tx if disabled */
1703 if (is_pf_cgxmapped(rvu, pf)) {
1704 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1705 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1709 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1710 /* Do SMQ flush and set enqueue xoff */
1711 cfg |= BIT_ULL(50) | BIT_ULL(49);
1712 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1714 /* Disable backpressure from physical link,
1715 * otherwise SMQ flush may stall.
1717 rvu_cgx_enadis_rx_bp(rvu, pf, false);
1719 /* Wait for flush to complete */
1720 err = rvu_poll_reg(rvu, blkaddr,
1721 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1724 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1726 rvu_cgx_enadis_rx_bp(rvu, pf, true);
1727 /* restore cgx tx state */
1729 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1732 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1734 int blkaddr, nixlf, lvl, schq, err;
1735 struct rvu_hwinfo *hw = rvu->hw;
1736 struct nix_txsch *txsch;
1737 struct nix_hw *nix_hw;
1739 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1741 return NIX_AF_ERR_AF_LF_INVALID;
1743 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1747 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1749 return NIX_AF_ERR_AF_LF_INVALID;
1751 /* Disable TL2/3 queue links before SMQ flush*/
1752 mutex_lock(&rvu->rsrc_lock);
1753 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1754 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1757 txsch = &nix_hw->txsch[lvl];
1758 for (schq = 0; schq < txsch->schq.max; schq++) {
1759 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1761 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1766 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1767 for (schq = 0; schq < txsch->schq.max; schq++) {
1768 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1770 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1773 /* Now free scheduler queues to free pool */
1774 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1775 /* TLs above aggregation level are shared across all PF
1776 * and it's VFs, hence skip freeing them.
1778 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1781 txsch = &nix_hw->txsch[lvl];
1782 for (schq = 0; schq < txsch->schq.max; schq++) {
1783 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1785 rvu_free_rsrc(&txsch->schq, schq);
1786 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1789 mutex_unlock(&rvu->rsrc_lock);
1791 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1792 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1793 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1795 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1800 static int nix_txschq_free_one(struct rvu *rvu,
1801 struct nix_txsch_free_req *req)
1803 struct rvu_hwinfo *hw = rvu->hw;
1804 u16 pcifunc = req->hdr.pcifunc;
1805 int lvl, schq, nixlf, blkaddr;
1806 struct nix_txsch *txsch;
1807 struct nix_hw *nix_hw;
1810 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1812 return NIX_AF_ERR_AF_LF_INVALID;
1814 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1818 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1820 return NIX_AF_ERR_AF_LF_INVALID;
1822 lvl = req->schq_lvl;
1824 txsch = &nix_hw->txsch[lvl];
1826 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1829 pfvf_map = txsch->pfvf_map;
1830 mutex_lock(&rvu->rsrc_lock);
1832 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1833 mutex_unlock(&rvu->rsrc_lock);
1837 /* Flush if it is a SMQ. Onus of disabling
1838 * TL2/3 queue links before SMQ flush is on user
1840 if (lvl == NIX_TXSCH_LVL_SMQ)
1841 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1843 /* Free the resource */
1844 rvu_free_rsrc(&txsch->schq, schq);
1845 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1846 mutex_unlock(&rvu->rsrc_lock);
1849 return NIX_AF_ERR_TLX_INVALID;
1852 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1853 struct nix_txsch_free_req *req,
1854 struct msg_rsp *rsp)
1856 if (req->flags & TXSCHQ_FREE_ALL)
1857 return nix_txschq_free(rvu, req->hdr.pcifunc);
1859 return nix_txschq_free_one(rvu, req);
1862 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1863 int lvl, u64 reg, u64 regval)
1865 u64 regbase = reg & 0xFFFF;
1868 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1871 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1872 /* Check if this schq belongs to this PF/VF or not */
1873 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1876 parent = (regval >> 16) & 0x1FF;
1877 /* Validate MDQ's TL4 parent */
1878 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1879 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1882 /* Validate TL4's TL3 parent */
1883 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1884 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1887 /* Validate TL3's TL2 parent */
1888 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1889 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1892 /* Validate TL2's TL1 parent */
1893 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1894 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1900 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1904 if (hw->cap.nix_shaping)
1907 /* If shaping and coloring is not supported, then
1908 * *_CIR and *_PIR registers should not be configured.
1910 regbase = reg & 0xFFFF;
1913 case NIX_TXSCH_LVL_TL1:
1914 if (regbase == NIX_AF_TL1X_CIR(0))
1917 case NIX_TXSCH_LVL_TL2:
1918 if (regbase == NIX_AF_TL2X_CIR(0) ||
1919 regbase == NIX_AF_TL2X_PIR(0))
1922 case NIX_TXSCH_LVL_TL3:
1923 if (regbase == NIX_AF_TL3X_CIR(0) ||
1924 regbase == NIX_AF_TL3X_PIR(0))
1927 case NIX_TXSCH_LVL_TL4:
1928 if (regbase == NIX_AF_TL4X_CIR(0) ||
1929 regbase == NIX_AF_TL4X_PIR(0))
1936 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1937 u16 pcifunc, int blkaddr)
1942 schq = nix_get_tx_link(rvu, pcifunc);
1943 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1944 /* Skip if PF has already done the config */
1945 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1947 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1948 (TXSCH_TL1_DFLT_RR_PRIO << 1));
1949 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1950 TXSCH_TL1_DFLT_RR_QTM);
1951 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1952 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1955 static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
1956 u16 pcifunc, struct nix_txsch *txsch)
1958 struct rvu_hwinfo *hw = rvu->hw;
1959 int lbk_link_start, lbk_links;
1960 u8 pf = rvu_get_pf(pcifunc);
1963 if (!is_pf_cgxmapped(rvu, pf))
1966 lbk_link_start = hw->cgx_links;
1968 for (schq = 0; schq < txsch->schq.max; schq++) {
1969 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1971 /* Enable all LBK links with channel 63 by default so that
1972 * packets can be sent to LBK with a NPC TX MCAM rule
1974 lbk_links = hw->lbk_links;
1976 rvu_write64(rvu, blkaddr,
1977 NIX_AF_TL3_TL2X_LINKX_CFG(schq,
1980 BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
1984 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1985 struct nix_txschq_config *req,
1986 struct msg_rsp *rsp)
1988 struct rvu_hwinfo *hw = rvu->hw;
1989 u16 pcifunc = req->hdr.pcifunc;
1990 u64 reg, regval, schq_regbase;
1991 struct nix_txsch *txsch;
1992 struct nix_hw *nix_hw;
1993 int blkaddr, idx, err;
1997 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1998 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1999 return NIX_AF_INVAL_TXSCHQ_CFG;
2001 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2005 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2009 txsch = &nix_hw->txsch[req->lvl];
2010 pfvf_map = txsch->pfvf_map;
2012 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2013 pcifunc & RVU_PFVF_FUNC_MASK) {
2014 mutex_lock(&rvu->rsrc_lock);
2015 if (req->lvl == NIX_TXSCH_LVL_TL1)
2016 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2017 mutex_unlock(&rvu->rsrc_lock);
2021 for (idx = 0; idx < req->num_regs; idx++) {
2022 reg = req->reg[idx];
2023 regval = req->regval[idx];
2024 schq_regbase = reg & 0xFFFF;
2026 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2027 txsch->lvl, reg, regval))
2028 return NIX_AF_INVAL_TXSCHQ_CFG;
2030 /* Check if shaping and coloring is supported */
2031 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2034 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2035 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2036 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2038 regval &= ~(0x7FULL << 24);
2039 regval |= ((u64)nixlf << 24);
2042 /* Clear 'BP_ENA' config, if it's not allowed */
2043 if (!hw->cap.nix_tx_link_bp) {
2044 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2045 (schq_regbase & 0xFF00) ==
2046 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2047 regval &= ~BIT_ULL(13);
2050 /* Mark config as done for TL1 by PF */
2051 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2052 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2053 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2054 mutex_lock(&rvu->rsrc_lock);
2055 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2056 NIX_TXSCHQ_CFG_DONE);
2057 mutex_unlock(&rvu->rsrc_lock);
2060 /* SMQ flush is special hence split register writes such
2061 * that flush first and write rest of the bits later.
2063 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2064 (regval & BIT_ULL(49))) {
2065 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2066 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2067 regval &= ~BIT_ULL(49);
2069 rvu_write64(rvu, blkaddr, reg, regval);
2072 rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
2073 &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
2078 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2079 struct nix_vtag_config *req)
2081 u64 regval = req->vtag_size;
2083 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2084 req->vtag_size > VTAGSIZE_T8)
2087 /* RX VTAG Type 7 reserved for vf vlan */
2088 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2089 return NIX_AF_ERR_RX_VTAG_INUSE;
2091 if (req->rx.capture_vtag)
2092 regval |= BIT_ULL(5);
2093 if (req->rx.strip_vtag)
2094 regval |= BIT_ULL(4);
2096 rvu_write64(rvu, blkaddr,
2097 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2101 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2102 u16 pcifunc, int index)
2104 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2105 struct nix_txvlan *vlan = &nix_hw->txvlan;
2107 if (vlan->entry2pfvf_map[index] != pcifunc)
2108 return NIX_AF_ERR_PARAM;
2110 rvu_write64(rvu, blkaddr,
2111 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2112 rvu_write64(rvu, blkaddr,
2113 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2115 vlan->entry2pfvf_map[index] = 0;
2116 rvu_free_rsrc(&vlan->rsrc, index);
2121 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2123 struct nix_txvlan *vlan;
2124 struct nix_hw *nix_hw;
2127 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2131 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2132 vlan = &nix_hw->txvlan;
2134 mutex_lock(&vlan->rsrc_lock);
2135 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2136 for (index = 0; index < vlan->rsrc.max; index++) {
2137 if (vlan->entry2pfvf_map[index] == pcifunc)
2138 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2140 mutex_unlock(&vlan->rsrc_lock);
2143 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2146 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2147 struct nix_txvlan *vlan = &nix_hw->txvlan;
2151 mutex_lock(&vlan->rsrc_lock);
2153 index = rvu_alloc_rsrc(&vlan->rsrc);
2155 mutex_unlock(&vlan->rsrc_lock);
2159 mutex_unlock(&vlan->rsrc_lock);
2161 regval = size ? vtag : vtag << 32;
2163 rvu_write64(rvu, blkaddr,
2164 NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2165 rvu_write64(rvu, blkaddr,
2166 NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2171 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2172 struct nix_vtag_config *req)
2174 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2175 struct nix_txvlan *vlan = &nix_hw->txvlan;
2176 u16 pcifunc = req->hdr.pcifunc;
2177 int idx0 = req->tx.vtag0_idx;
2178 int idx1 = req->tx.vtag1_idx;
2181 if (req->tx.free_vtag0 && req->tx.free_vtag1)
2182 if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2183 vlan->entry2pfvf_map[idx1] != pcifunc)
2184 return NIX_AF_ERR_PARAM;
2186 mutex_lock(&vlan->rsrc_lock);
2188 if (req->tx.free_vtag0) {
2189 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2194 if (req->tx.free_vtag1)
2195 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2198 mutex_unlock(&vlan->rsrc_lock);
2202 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2203 struct nix_vtag_config *req,
2204 struct nix_vtag_config_rsp *rsp)
2206 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2207 struct nix_txvlan *vlan = &nix_hw->txvlan;
2208 u16 pcifunc = req->hdr.pcifunc;
2210 if (req->tx.cfg_vtag0) {
2212 nix_tx_vtag_alloc(rvu, blkaddr,
2213 req->tx.vtag0, req->vtag_size);
2215 if (rsp->vtag0_idx < 0)
2216 return NIX_AF_ERR_TX_VTAG_NOSPC;
2218 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2221 if (req->tx.cfg_vtag1) {
2223 nix_tx_vtag_alloc(rvu, blkaddr,
2224 req->tx.vtag1, req->vtag_size);
2226 if (rsp->vtag1_idx < 0)
2229 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2235 if (req->tx.cfg_vtag0)
2236 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2238 return NIX_AF_ERR_TX_VTAG_NOSPC;
2241 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2242 struct nix_vtag_config *req,
2243 struct nix_vtag_config_rsp *rsp)
2245 u16 pcifunc = req->hdr.pcifunc;
2246 int blkaddr, nixlf, err;
2248 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2252 if (req->cfg_type) {
2253 /* rx vtag configuration */
2254 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2256 return NIX_AF_ERR_PARAM;
2258 /* tx vtag configuration */
2259 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2260 (req->tx.free_vtag0 || req->tx.free_vtag1))
2261 return NIX_AF_ERR_PARAM;
2263 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2264 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2266 if (req->tx.free_vtag0 || req->tx.free_vtag1)
2267 return nix_tx_vtag_decfg(rvu, blkaddr, req);
2273 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2274 int mce, u8 op, u16 pcifunc, int next, bool eol)
2276 struct nix_aq_enq_req aq_req;
2279 aq_req.hdr.pcifunc = 0;
2280 aq_req.ctype = NIX_AQ_CTYPE_MCE;
2284 /* Use RSS with RSS index 0 */
2286 aq_req.mce.index = 0;
2287 aq_req.mce.eol = eol;
2288 aq_req.mce.pf_func = pcifunc;
2289 aq_req.mce.next = next;
2291 /* All fields valid */
2292 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
2294 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2296 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2297 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2303 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2304 u16 pcifunc, bool add)
2306 struct mce *mce, *tail = NULL;
2307 bool delete = false;
2309 /* Scan through the current list */
2310 hlist_for_each_entry(mce, &mce_list->head, node) {
2311 /* If already exists, then delete */
2312 if (mce->pcifunc == pcifunc && !add) {
2315 } else if (mce->pcifunc == pcifunc && add) {
2316 /* entry already exists */
2323 hlist_del(&mce->node);
2332 /* Add a new one to the list, at the tail */
2333 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2336 mce->pcifunc = pcifunc;
2338 hlist_add_head(&mce->node, &mce_list->head);
2340 hlist_add_behind(&mce->node, &tail->node);
2345 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
2346 struct nix_mce_list *mce_list,
2347 int mce_idx, int mcam_index, bool add)
2349 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
2350 struct npc_mcam *mcam = &rvu->hw->mcam;
2351 struct nix_mcast *mcast;
2352 struct nix_hw *nix_hw;
2358 /* Get this PF/VF func's MCE index */
2359 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2361 if (idx > (mce_idx + mce_list->max)) {
2363 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2364 __func__, idx, mce_list->max,
2365 pcifunc >> RVU_PFVF_PF_SHIFT);
2369 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
2373 mcast = &nix_hw->mcast;
2374 mutex_lock(&mcast->mce_lock);
2376 err = nix_update_mce_list_entry(mce_list, pcifunc, add);
2380 /* Disable MCAM entry in NPC */
2381 if (!mce_list->count) {
2382 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2383 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
2387 /* Dump the updated list to HW */
2389 last_idx = idx + mce_list->count - 1;
2390 hlist_for_each_entry(mce, &mce_list->head, node) {
2395 /* EOL should be set in last MCE */
2396 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2397 mce->pcifunc, next_idx,
2398 (next_idx > last_idx) ? true : false);
2405 mutex_unlock(&mcast->mce_lock);
2409 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
2410 struct nix_mce_list **mce_list, int *mce_idx)
2412 struct rvu_hwinfo *hw = rvu->hw;
2413 struct rvu_pfvf *pfvf;
2415 if (!hw->cap.nix_rx_multicast ||
2416 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
2422 /* Get this PF/VF func's MCE index */
2423 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2425 if (type == NIXLF_BCAST_ENTRY) {
2426 *mce_list = &pfvf->bcast_mce_list;
2427 *mce_idx = pfvf->bcast_mce_idx;
2428 } else if (type == NIXLF_ALLMULTI_ENTRY) {
2429 *mce_list = &pfvf->mcast_mce_list;
2430 *mce_idx = pfvf->mcast_mce_idx;
2431 } else if (type == NIXLF_PROMISC_ENTRY) {
2432 *mce_list = &pfvf->promisc_mce_list;
2433 *mce_idx = pfvf->promisc_mce_idx;
2440 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
2443 int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
2444 struct npc_mcam *mcam = &rvu->hw->mcam;
2445 struct rvu_hwinfo *hw = rvu->hw;
2446 struct nix_mce_list *mce_list;
2448 /* skip multicast pkt replication for AF's VFs */
2449 if (is_afvf(pcifunc))
2452 if (!hw->cap.nix_rx_multicast)
2455 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2459 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2463 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
2465 mcam_index = npc_get_nixlf_mcam_index(mcam,
2466 pcifunc & ~RVU_PFVF_FUNC_MASK,
2468 err = nix_update_mce_list(rvu, pcifunc, mce_list,
2469 mce_idx, mcam_index, add);
2473 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2475 struct nix_mcast *mcast = &nix_hw->mcast;
2476 int err, pf, numvfs, idx;
2477 struct rvu_pfvf *pfvf;
2481 /* Skip PF0 (i.e AF) */
2482 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2483 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2484 /* If PF is not enabled, nothing to do */
2485 if (!((cfg >> 20) & 0x01))
2487 /* Get numVFs attached to this PF */
2488 numvfs = (cfg >> 12) & 0xFF;
2490 pfvf = &rvu->pf[pf];
2492 /* This NIX0/1 block mapped to PF ? */
2493 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2496 /* save start idx of broadcast mce list */
2497 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2498 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2500 /* save start idx of multicast mce list */
2501 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2502 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
2504 /* save the start idx of promisc mce list */
2505 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2506 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
2508 for (idx = 0; idx < (numvfs + 1); idx++) {
2509 /* idx-0 is for PF, followed by VFs */
2510 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2512 /* Add dummy entries now, so that we don't have to check
2513 * for whether AQ_OP should be INIT/WRITE later on.
2514 * Will be updated when a NIXLF is attached/detached to
2517 err = nix_blk_setup_mce(rvu, nix_hw,
2518 pfvf->bcast_mce_idx + idx,
2524 /* add dummy entries to multicast mce list */
2525 err = nix_blk_setup_mce(rvu, nix_hw,
2526 pfvf->mcast_mce_idx + idx,
2532 /* add dummy entries to promisc mce list */
2533 err = nix_blk_setup_mce(rvu, nix_hw,
2534 pfvf->promisc_mce_idx + idx,
2544 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2546 struct nix_mcast *mcast = &nix_hw->mcast;
2547 struct rvu_hwinfo *hw = rvu->hw;
2550 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2551 size = (1ULL << size);
2553 /* Alloc memory for multicast/mirror replication entries */
2554 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2555 (256UL << MC_TBL_SIZE), size);
2559 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2560 (u64)mcast->mce_ctx->iova);
2562 /* Set max list length equal to max no of VFs per PF + PF itself */
2563 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2564 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2566 /* Alloc memory for multicast replication buffers */
2567 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2568 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2569 (8UL << MC_BUF_CNT), size);
2573 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2574 (u64)mcast->mcast_buf->iova);
2576 /* Alloc pkind for NIX internal RX multicast/mirror replay */
2577 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2579 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2580 BIT_ULL(63) | (mcast->replay_pkind << 24) |
2581 BIT_ULL(20) | MC_BUF_CNT);
2583 mutex_init(&mcast->mce_lock);
2585 return nix_setup_mce_tables(rvu, nix_hw);
2588 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2590 struct nix_txvlan *vlan = &nix_hw->txvlan;
2593 /* Allocate resource bimap for tx vtag def registers*/
2594 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2595 err = rvu_alloc_bitmap(&vlan->rsrc);
2599 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2600 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
2601 sizeof(u16), GFP_KERNEL);
2602 if (!vlan->entry2pfvf_map)
2605 mutex_init(&vlan->rsrc_lock);
2609 kfree(vlan->rsrc.bmap);
2613 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2615 struct nix_txsch *txsch;
2619 /* Get scheduler queue count of each type and alloc
2620 * bitmap for each for alloc/free/attach operations.
2622 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2623 txsch = &nix_hw->txsch[lvl];
2626 case NIX_TXSCH_LVL_SMQ:
2627 reg = NIX_AF_MDQ_CONST;
2629 case NIX_TXSCH_LVL_TL4:
2630 reg = NIX_AF_TL4_CONST;
2632 case NIX_TXSCH_LVL_TL3:
2633 reg = NIX_AF_TL3_CONST;
2635 case NIX_TXSCH_LVL_TL2:
2636 reg = NIX_AF_TL2_CONST;
2638 case NIX_TXSCH_LVL_TL1:
2639 reg = NIX_AF_TL1_CONST;
2642 cfg = rvu_read64(rvu, blkaddr, reg);
2643 txsch->schq.max = cfg & 0xFFFF;
2644 err = rvu_alloc_bitmap(&txsch->schq);
2648 /* Allocate memory for scheduler queues to
2649 * PF/VF pcifunc mapping info.
2651 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2652 sizeof(u32), GFP_KERNEL);
2653 if (!txsch->pfvf_map)
2655 for (schq = 0; schq < txsch->schq.max; schq++)
2656 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2661 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2662 int blkaddr, u32 cfg)
2666 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2667 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2670 if (fmt_idx >= nix_hw->mark_format.total)
2673 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2674 nix_hw->mark_format.cfg[fmt_idx] = cfg;
2675 nix_hw->mark_format.in_use++;
2679 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2683 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
2684 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
2685 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
2686 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
2687 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
2688 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
2689 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
2690 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
2691 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2696 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2697 nix_hw->mark_format.total = (u8)total;
2698 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2700 if (!nix_hw->mark_format.cfg)
2702 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2703 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2705 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2712 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2714 /* CN10K supports LBK FIFO size 72 KB */
2715 if (rvu->hw->lbk_bufsize == 0x12000)
2716 *max_mtu = CN10K_LBK_LINK_MAX_FRS;
2718 *max_mtu = NIC_HW_MAX_FRS;
2721 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2723 /* RPM supports FIFO len 128 KB */
2724 if (rvu_cgx_get_fifolen(rvu) == 0x20000)
2725 *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
2727 *max_mtu = NIC_HW_MAX_FRS;
2730 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
2731 struct nix_hw_info *rsp)
2733 u16 pcifunc = req->hdr.pcifunc;
2736 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2738 return NIX_AF_ERR_AF_LF_INVALID;
2740 if (is_afvf(pcifunc))
2741 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
2743 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
2745 rsp->min_mtu = NIC_HW_MIN_FRS;
2749 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2750 struct msg_rsp *rsp)
2752 u16 pcifunc = req->hdr.pcifunc;
2753 int i, nixlf, blkaddr, err;
2756 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2760 /* Get stats count supported by HW */
2761 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2763 /* Reset tx stats */
2764 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2765 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2767 /* Reset rx stats */
2768 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2769 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2774 /* Returns the ALG index to be set into NPC_RX_ACTION */
2775 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2779 /* Scan over exiting algo entries to find a match */
2780 for (i = 0; i < nix_hw->flowkey.in_use; i++)
2781 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2787 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2789 int idx, nr_field, key_off, field_marker, keyoff_marker;
2790 int max_key_off, max_bit_pos, group_member;
2791 struct nix_rx_flowkey_alg *field;
2792 struct nix_rx_flowkey_alg tmp;
2793 u32 key_type, valid_key;
2794 int l4_key_offset = 0;
2799 #define FIELDS_PER_ALG 5
2800 #define MAX_KEY_OFF 40
2801 /* Clear all fields */
2802 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2804 /* Each of the 32 possible flow key algorithm definitions should
2805 * fall into above incremental config (except ALG0). Otherwise a
2806 * single NPC MCAM entry is not sufficient for supporting RSS.
2808 * If a different definition or combination needed then NPC MCAM
2809 * has to be programmed to filter such pkts and it's action should
2810 * point to this definition to calculate flowtag or hash.
2812 * The `for loop` goes over _all_ protocol field and the following
2813 * variables depicts the state machine forward progress logic.
2815 * keyoff_marker - Enabled when hash byte length needs to be accounted
2816 * in field->key_offset update.
2817 * field_marker - Enabled when a new field needs to be selected.
2818 * group_member - Enabled when protocol is part of a group.
2821 keyoff_marker = 0; max_key_off = 0; group_member = 0;
2822 nr_field = 0; key_off = 0; field_marker = 1;
2823 field = &tmp; max_bit_pos = fls(flow_cfg);
2825 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2826 key_off < MAX_KEY_OFF; idx++) {
2827 key_type = BIT(idx);
2828 valid_key = flow_cfg & key_type;
2829 /* Found a field marker, reset the field values */
2831 memset(&tmp, 0, sizeof(tmp));
2833 field_marker = true;
2834 keyoff_marker = true;
2836 case NIX_FLOW_KEY_TYPE_PORT:
2837 field->sel_chan = true;
2838 /* This should be set to 1, when SEL_CHAN is set */
2841 case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
2842 field->lid = NPC_LID_LC;
2843 field->hdr_offset = 9; /* offset */
2844 field->bytesm1 = 0; /* 1 byte */
2845 field->ltype_match = NPC_LT_LC_IP;
2846 field->ltype_mask = 0xF;
2848 case NIX_FLOW_KEY_TYPE_IPV4:
2849 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2850 field->lid = NPC_LID_LC;
2851 field->ltype_match = NPC_LT_LC_IP;
2852 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2853 field->lid = NPC_LID_LG;
2854 field->ltype_match = NPC_LT_LG_TU_IP;
2856 field->hdr_offset = 12; /* SIP offset */
2857 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2858 field->ltype_mask = 0xF; /* Match only IPv4 */
2859 keyoff_marker = false;
2861 case NIX_FLOW_KEY_TYPE_IPV6:
2862 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2863 field->lid = NPC_LID_LC;
2864 field->ltype_match = NPC_LT_LC_IP6;
2865 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2866 field->lid = NPC_LID_LG;
2867 field->ltype_match = NPC_LT_LG_TU_IP6;
2869 field->hdr_offset = 8; /* SIP offset */
2870 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2871 field->ltype_mask = 0xF; /* Match only IPv6 */
2873 case NIX_FLOW_KEY_TYPE_TCP:
2874 case NIX_FLOW_KEY_TYPE_UDP:
2875 case NIX_FLOW_KEY_TYPE_SCTP:
2876 case NIX_FLOW_KEY_TYPE_INNR_TCP:
2877 case NIX_FLOW_KEY_TYPE_INNR_UDP:
2878 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2879 field->lid = NPC_LID_LD;
2880 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2881 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2882 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2883 field->lid = NPC_LID_LH;
2884 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2886 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2887 * so no need to change the ltype_match, just change
2888 * the lid for inner protocols
2890 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2891 (int)NPC_LT_LH_TU_TCP);
2892 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2893 (int)NPC_LT_LH_TU_UDP);
2894 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2895 (int)NPC_LT_LH_TU_SCTP);
2897 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2898 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2900 field->ltype_match |= NPC_LT_LD_TCP;
2901 group_member = true;
2902 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2903 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2905 field->ltype_match |= NPC_LT_LD_UDP;
2906 group_member = true;
2907 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2908 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2910 field->ltype_match |= NPC_LT_LD_SCTP;
2911 group_member = true;
2913 field->ltype_mask = ~field->ltype_match;
2914 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2915 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2916 /* Handle the case where any of the group item
2917 * is enabled in the group but not the final one
2921 group_member = false;
2924 field_marker = false;
2925 keyoff_marker = false;
2928 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
2929 * remember the TCP key offset of 40 byte hash key.
2931 if (key_type == NIX_FLOW_KEY_TYPE_TCP)
2932 l4_key_offset = key_off;
2934 case NIX_FLOW_KEY_TYPE_NVGRE:
2935 field->lid = NPC_LID_LD;
2936 field->hdr_offset = 4; /* VSID offset */
2938 field->ltype_match = NPC_LT_LD_NVGRE;
2939 field->ltype_mask = 0xF;
2941 case NIX_FLOW_KEY_TYPE_VXLAN:
2942 case NIX_FLOW_KEY_TYPE_GENEVE:
2943 field->lid = NPC_LID_LE;
2945 field->hdr_offset = 4;
2946 field->ltype_mask = 0xF;
2947 field_marker = false;
2948 keyoff_marker = false;
2950 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2951 field->ltype_match |= NPC_LT_LE_VXLAN;
2952 group_member = true;
2955 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2956 field->ltype_match |= NPC_LT_LE_GENEVE;
2957 group_member = true;
2960 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2962 field->ltype_mask = ~field->ltype_match;
2963 field_marker = true;
2964 keyoff_marker = true;
2966 group_member = false;
2970 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2971 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2972 field->lid = NPC_LID_LA;
2973 field->ltype_match = NPC_LT_LA_ETHER;
2974 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2975 field->lid = NPC_LID_LF;
2976 field->ltype_match = NPC_LT_LF_TU_ETHER;
2978 field->hdr_offset = 0;
2979 field->bytesm1 = 5; /* DMAC 6 Byte */
2980 field->ltype_mask = 0xF;
2982 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2983 field->lid = NPC_LID_LC;
2984 field->hdr_offset = 40; /* IPV6 hdr */
2985 field->bytesm1 = 0; /* 1 Byte ext hdr*/
2986 field->ltype_match = NPC_LT_LC_IP6_EXT;
2987 field->ltype_mask = 0xF;
2989 case NIX_FLOW_KEY_TYPE_GTPU:
2990 field->lid = NPC_LID_LE;
2991 field->hdr_offset = 4;
2992 field->bytesm1 = 3; /* 4 bytes TID*/
2993 field->ltype_match = NPC_LT_LE_GTPU;
2994 field->ltype_mask = 0xF;
2996 case NIX_FLOW_KEY_TYPE_VLAN:
2997 field->lid = NPC_LID_LB;
2998 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2999 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
3000 field->ltype_match = NPC_LT_LB_CTAG;
3001 field->ltype_mask = 0xF;
3002 field->fn_mask = 1; /* Mask out the first nibble */
3004 case NIX_FLOW_KEY_TYPE_AH:
3005 case NIX_FLOW_KEY_TYPE_ESP:
3006 field->hdr_offset = 0;
3007 field->bytesm1 = 7; /* SPI + sequence number */
3008 field->ltype_mask = 0xF;
3009 field->lid = NPC_LID_LE;
3010 field->ltype_match = NPC_LT_LE_ESP;
3011 if (key_type == NIX_FLOW_KEY_TYPE_AH) {
3012 field->lid = NPC_LID_LD;
3013 field->ltype_match = NPC_LT_LD_AH;
3014 field->hdr_offset = 4;
3015 keyoff_marker = false;
3021 /* Found a valid flow key type */
3023 /* Use the key offset of TCP/UDP/SCTP fields
3024 * for ESP/AH fields.
3026 if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
3027 key_type == NIX_FLOW_KEY_TYPE_AH)
3028 key_off = l4_key_offset;
3029 field->key_offset = key_off;
3030 memcpy(&alg[nr_field], field, sizeof(*field));
3031 max_key_off = max(max_key_off, field->bytesm1 + 1);
3033 /* Found a field marker, get the next field */
3038 /* Found a keyoff marker, update the new key_off */
3039 if (keyoff_marker) {
3040 key_off += max_key_off;
3044 /* Processed all the flow key types */
3045 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3048 return NIX_AF_ERR_RSS_NOSPC_FIELD;
3051 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3053 u64 field[FIELDS_PER_ALG];
3057 hw = get_nix_hw(rvu->hw, blkaddr);
3061 /* No room to add new flow hash algoritham */
3062 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3063 return NIX_AF_ERR_RSS_NOSPC_ALGO;
3065 /* Generate algo fields for the given flow_cfg */
3066 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3070 /* Update ALGX_FIELDX register with generated fields */
3071 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3072 rvu_write64(rvu, blkaddr,
3073 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3076 /* Store the flow_cfg for futher lookup */
3077 rc = hw->flowkey.in_use;
3078 hw->flowkey.flowkey[rc] = flow_cfg;
3079 hw->flowkey.in_use++;
3084 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3085 struct nix_rss_flowkey_cfg *req,
3086 struct nix_rss_flowkey_cfg_rsp *rsp)
3088 u16 pcifunc = req->hdr.pcifunc;
3089 int alg_idx, nixlf, blkaddr;
3090 struct nix_hw *nix_hw;
3093 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3097 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3101 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3102 /* Failed to get algo index from the exiting list, reserve new */
3104 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3109 rsp->alg_idx = alg_idx;
3110 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3111 alg_idx, req->mcam_index);
3115 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3117 u32 flowkey_cfg, minkey_cfg;
3120 /* Disable all flow key algx fieldx */
3121 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3122 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3123 rvu_write64(rvu, blkaddr,
3124 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3128 /* IPv4/IPv6 SIP/DIPs */
3129 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3130 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3134 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3135 minkey_cfg = flowkey_cfg;
3136 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3137 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3141 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3142 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3143 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3147 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3148 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3149 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3153 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3154 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3155 NIX_FLOW_KEY_TYPE_UDP;
3156 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3160 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3161 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3162 NIX_FLOW_KEY_TYPE_SCTP;
3163 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3167 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3168 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3169 NIX_FLOW_KEY_TYPE_SCTP;
3170 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3174 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3175 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3176 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3177 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3184 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3185 struct nix_set_mac_addr *req,
3186 struct msg_rsp *rsp)
3188 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3189 u16 pcifunc = req->hdr.pcifunc;
3190 int blkaddr, nixlf, err;
3191 struct rvu_pfvf *pfvf;
3193 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3197 pfvf = rvu_get_pfvf(rvu, pcifunc);
3199 /* untrusted VF can't overwrite admin(PF) changes */
3200 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3201 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3203 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3207 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3209 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3210 pfvf->rx_chan_base, req->mac_addr);
3212 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3213 ether_addr_copy(pfvf->default_mac, req->mac_addr);
3215 rvu_switch_update_rules(rvu, pcifunc);
3220 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3221 struct msg_req *req,
3222 struct nix_get_mac_addr_rsp *rsp)
3224 u16 pcifunc = req->hdr.pcifunc;
3225 struct rvu_pfvf *pfvf;
3227 if (!is_nixlf_attached(rvu, pcifunc))
3228 return NIX_AF_ERR_AF_LF_INVALID;
3230 pfvf = rvu_get_pfvf(rvu, pcifunc);
3232 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3237 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3238 struct msg_rsp *rsp)
3240 bool allmulti, promisc, nix_rx_multicast;
3241 u16 pcifunc = req->hdr.pcifunc;
3242 struct rvu_pfvf *pfvf;
3245 pfvf = rvu_get_pfvf(rvu, pcifunc);
3246 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
3247 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
3248 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
3250 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
3252 if (is_vf(pcifunc) && !nix_rx_multicast &&
3253 (promisc || allmulti)) {
3254 dev_warn_ratelimited(rvu->dev,
3255 "VF promisc/multicast not supported\n");
3259 /* untrusted VF can't configure promisc/allmulti */
3260 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3261 (promisc || allmulti))
3264 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3268 if (nix_rx_multicast) {
3269 /* add/del this PF_FUNC to/from mcast pkt replication list */
3270 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
3274 "Failed to update pcifunc 0x%x to multicast list\n",
3279 /* add/del this PF_FUNC to/from promisc pkt replication list */
3280 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
3284 "Failed to update pcifunc 0x%x to promisc list\n",
3290 /* install/uninstall allmulti entry */
3292 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
3293 pfvf->rx_chan_base);
3295 if (!nix_rx_multicast)
3296 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
3299 /* install/uninstall promisc entry */
3301 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3305 if (!nix_rx_multicast)
3306 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
3312 static void nix_find_link_frs(struct rvu *rvu,
3313 struct nix_frs_cfg *req, u16 pcifunc)
3315 int pf = rvu_get_pf(pcifunc);
3316 struct rvu_pfvf *pfvf;
3321 /* Update with requester's min/max lengths */
3322 pfvf = rvu_get_pfvf(rvu, pcifunc);
3323 pfvf->maxlen = req->maxlen;
3324 if (req->update_minlen)
3325 pfvf->minlen = req->minlen;
3327 maxlen = req->maxlen;
3328 minlen = req->update_minlen ? req->minlen : 0;
3330 /* Get this PF's numVFs and starting hwvf */
3331 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3333 /* For each VF, compare requested max/minlen */
3334 for (vf = 0; vf < numvfs; vf++) {
3335 pfvf = &rvu->hwvf[hwvf + vf];
3336 if (pfvf->maxlen > maxlen)
3337 maxlen = pfvf->maxlen;
3338 if (req->update_minlen &&
3339 pfvf->minlen && pfvf->minlen < minlen)
3340 minlen = pfvf->minlen;
3343 /* Compare requested max/minlen with PF's max/minlen */
3344 pfvf = &rvu->pf[pf];
3345 if (pfvf->maxlen > maxlen)
3346 maxlen = pfvf->maxlen;
3347 if (req->update_minlen &&
3348 pfvf->minlen && pfvf->minlen < minlen)
3349 minlen = pfvf->minlen;
3351 /* Update the request with max/min PF's and it's VF's max/min */
3352 req->maxlen = maxlen;
3353 if (req->update_minlen)
3354 req->minlen = minlen;
3357 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3358 struct msg_rsp *rsp)
3360 struct rvu_hwinfo *hw = rvu->hw;
3361 u16 pcifunc = req->hdr.pcifunc;
3362 int pf = rvu_get_pf(pcifunc);
3363 int blkaddr, schq, link = -1;
3364 struct nix_txsch *txsch;
3365 u64 cfg, lmac_fifo_len;
3366 struct nix_hw *nix_hw;
3367 u8 cgx = 0, lmac = 0;
3370 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3372 return NIX_AF_ERR_AF_LF_INVALID;
3374 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3378 if (is_afvf(pcifunc))
3379 rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3381 rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3383 if (!req->sdp_link && req->maxlen > max_mtu)
3384 return NIX_AF_ERR_FRS_INVALID;
3386 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3387 return NIX_AF_ERR_FRS_INVALID;
3389 /* Check if requester wants to update SMQ's */
3390 if (!req->update_smq)
3393 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
3394 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3395 mutex_lock(&rvu->rsrc_lock);
3396 for (schq = 0; schq < txsch->schq.max; schq++) {
3397 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3399 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3400 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3401 if (req->update_minlen)
3402 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3403 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3405 mutex_unlock(&rvu->rsrc_lock);
3408 /* Check if config is for SDP link */
3409 if (req->sdp_link) {
3411 return NIX_AF_ERR_RX_LINK_INVALID;
3412 link = hw->cgx_links + hw->lbk_links;
3416 /* Check if the request is from CGX mapped RVU PF */
3417 if (is_pf_cgxmapped(rvu, pf)) {
3418 /* Get CGX and LMAC to which this PF is mapped and find link */
3419 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3420 link = (cgx * hw->lmac_per_cgx) + lmac;
3421 } else if (pf == 0) {
3422 /* For VFs of PF0 ingress is LBK port, so config LBK link */
3423 link = hw->cgx_links;
3427 return NIX_AF_ERR_RX_LINK_INVALID;
3429 nix_find_link_frs(rvu, req, pcifunc);
3432 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3433 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3434 if (req->update_minlen)
3435 cfg = (cfg & ~0xFFFFULL) | req->minlen;
3436 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3438 if (req->sdp_link || pf == 0)
3441 /* Update transmit credits for CGX links */
3443 rvu_cgx_get_fifolen(rvu) /
3444 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3445 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
3446 cfg &= ~(0xFFFFFULL << 12);
3447 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
3448 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3452 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3453 struct msg_rsp *rsp)
3455 int nixlf, blkaddr, err;
3458 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3462 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3463 /* Set the interface configuration */
3464 if (req->len_verify & BIT(0))
3467 cfg &= ~BIT_ULL(41);
3469 if (req->len_verify & BIT(1))
3472 cfg &= ~BIT_ULL(40);
3474 if (req->csum_verify & BIT(0))
3477 cfg &= ~BIT_ULL(37);
3479 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3484 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3486 /* CN10k supports 72KB FIFO size and max packet size of 64k */
3487 if (rvu->hw->lbk_bufsize == 0x12000)
3488 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3490 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3493 static void nix_link_config(struct rvu *rvu, int blkaddr)
3495 struct rvu_hwinfo *hw = rvu->hw;
3496 int cgx, lmac_cnt, slink, link;
3497 u16 lbk_max_frs, lmac_max_frs;
3500 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3501 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3503 /* Set default min/max packet lengths allowed on NIX Rx links.
3505 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3506 * as undersize and report them to SW as error pkts, hence
3507 * setting it to 40 bytes.
3509 for (link = 0; link < hw->cgx_links; link++) {
3510 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3511 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
3514 for (link = hw->cgx_links; link < hw->lbk_links; link++) {
3515 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3516 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
3518 if (hw->sdp_links) {
3519 link = hw->cgx_links + hw->lbk_links;
3520 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3521 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3524 /* Set credits for Tx links assuming max packet length allowed.
3525 * This will be reconfigured based on MTU set for PF/VF.
3527 for (cgx = 0; cgx < hw->cgx; cgx++) {
3528 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3529 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
3531 /* Enable credits and set credit pkt count to max allowed */
3532 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3533 slink = cgx * hw->lmac_per_cgx;
3534 for (link = slink; link < (slink + lmac_cnt); link++) {
3535 rvu_write64(rvu, blkaddr,
3536 NIX_AF_TX_LINKX_NORM_CREDIT(link),
3541 /* Set Tx credits for LBK link */
3542 slink = hw->cgx_links;
3543 for (link = slink; link < (slink + hw->lbk_links); link++) {
3544 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
3545 /* Enable credits and set credit pkt count to max allowed */
3546 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3547 rvu_write64(rvu, blkaddr,
3548 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3552 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3557 /* Start X2P bus calibration */
3558 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3559 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3560 /* Wait for calibration to complete */
3561 err = rvu_poll_reg(rvu, blkaddr,
3562 NIX_AF_STATUS, BIT_ULL(10), false);
3564 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3568 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3569 /* Check if CGX devices are ready */
3570 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3571 /* Skip when cgx port is not available */
3572 if (!rvu_cgx_pdata(idx, rvu) ||
3573 (status & (BIT_ULL(16 + idx))))
3576 "CGX%d didn't respond to NIX X2P calibration\n", idx);
3580 /* Check if LBK is ready */
3581 if (!(status & BIT_ULL(19))) {
3583 "LBK didn't respond to NIX X2P calibration\n");
3587 /* Clear 'calibrate_x2p' bit */
3588 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3589 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3590 if (err || (status & 0x3FFULL))
3592 "NIX X2P calibration failed, status 0x%llx\n", status);
3598 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3603 /* Set admin queue endianness */
3604 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3607 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3610 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3613 /* Do not bypass NDC cache */
3614 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3616 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3617 /* Disable caching of SQB aka SQEs */
3620 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3622 /* Result structure can be followed by RQ/SQ/CQ context at
3623 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3624 * operation type. Alloc sufficient result memory for all operations.
3626 err = rvu_aq_alloc(rvu, &block->aq,
3627 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3628 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3632 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3633 rvu_write64(rvu, block->addr,
3634 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3638 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3640 const struct npc_lt_def_cfg *ltdefs;
3641 struct rvu_hwinfo *hw = rvu->hw;
3642 int blkaddr = nix_hw->blkaddr;
3643 struct rvu_block *block;
3647 block = &hw->block[blkaddr];
3649 if (is_rvu_96xx_B0(rvu)) {
3650 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3651 * internal state when conditional clocks are turned off.
3652 * Hence enable them.
3654 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3655 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3657 /* Set chan/link to backpressure TL3 instead of TL2 */
3658 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3660 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
3661 * This sticky mode is known to cause SQ stalls when multiple
3662 * SQs are mapped to same SMQ and transmitting pkts at a time.
3664 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3665 cfg &= ~BIT_ULL(15);
3666 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3669 ltdefs = rvu->kpu.lt_def;
3670 /* Calibrate X2P bus to check if CGX/LBK links are fine */
3671 err = nix_calibrate_x2p(rvu, blkaddr);
3675 /* Initialize admin queue */
3676 err = nix_aq_init(rvu, block);
3680 /* Restore CINT timer delay to HW reset values */
3681 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3683 if (is_block_implemented(hw, blkaddr)) {
3684 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3688 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
3692 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3696 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3700 err = nix_setup_txvlan(rvu, nix_hw);
3704 /* Configure segmentation offload formats */
3705 nix_setup_lso(rvu, nix_hw, blkaddr);
3707 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3708 * This helps HW protocol checker to identify headers
3709 * and validate length and checksums.
3711 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3712 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3713 ltdefs->rx_ol2.ltype_mask);
3714 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3715 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3716 ltdefs->rx_oip4.ltype_mask);
3717 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3718 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3719 ltdefs->rx_iip4.ltype_mask);
3720 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3721 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3722 ltdefs->rx_oip6.ltype_mask);
3723 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3724 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3725 ltdefs->rx_iip6.ltype_mask);
3726 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3727 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3728 ltdefs->rx_otcp.ltype_mask);
3729 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3730 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3731 ltdefs->rx_itcp.ltype_mask);
3732 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3733 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3734 ltdefs->rx_oudp.ltype_mask);
3735 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3736 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3737 ltdefs->rx_iudp.ltype_mask);
3738 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3739 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3740 ltdefs->rx_osctp.ltype_mask);
3741 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3742 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3743 ltdefs->rx_isctp.ltype_mask);
3745 if (!is_rvu_otx2(rvu)) {
3746 /* Enable APAD calculation for other protocols
3747 * matching APAD0 and APAD1 lt def registers.
3749 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
3750 (ltdefs->rx_apad0.valid << 11) |
3751 (ltdefs->rx_apad0.lid << 8) |
3752 (ltdefs->rx_apad0.ltype_match << 4) |
3753 ltdefs->rx_apad0.ltype_mask);
3754 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
3755 (ltdefs->rx_apad1.valid << 11) |
3756 (ltdefs->rx_apad1.lid << 8) |
3757 (ltdefs->rx_apad1.ltype_match << 4) |
3758 ltdefs->rx_apad1.ltype_mask);
3760 /* Receive ethertype defination register defines layer
3761 * information in NPC_RESULT_S to identify the Ethertype
3762 * location in L2 header. Used for Ethertype overwriting
3763 * in inline IPsec flow.
3765 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
3766 (ltdefs->rx_et[0].offset << 12) |
3767 (ltdefs->rx_et[0].valid << 11) |
3768 (ltdefs->rx_et[0].lid << 8) |
3769 (ltdefs->rx_et[0].ltype_match << 4) |
3770 ltdefs->rx_et[0].ltype_mask);
3771 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
3772 (ltdefs->rx_et[1].offset << 12) |
3773 (ltdefs->rx_et[1].valid << 11) |
3774 (ltdefs->rx_et[1].lid << 8) |
3775 (ltdefs->rx_et[1].ltype_match << 4) |
3776 ltdefs->rx_et[1].ltype_mask);
3779 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3783 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3784 nix_link_config(rvu, blkaddr);
3786 /* Enable Channel backpressure */
3787 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3792 int rvu_nix_init(struct rvu *rvu)
3794 struct rvu_hwinfo *hw = rvu->hw;
3795 struct nix_hw *nix_hw;
3796 int blkaddr = 0, err;
3799 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3804 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3806 nix_hw = &hw->nix[i];
3808 nix_hw->blkaddr = blkaddr;
3809 err = rvu_nix_block_init(rvu, nix_hw);
3812 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3819 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3820 struct rvu_block *block)
3822 struct nix_txsch *txsch;
3823 struct nix_mcast *mcast;
3824 struct nix_txvlan *vlan;
3825 struct nix_hw *nix_hw;
3828 rvu_aq_free(rvu, block->aq);
3830 if (is_block_implemented(rvu->hw, blkaddr)) {
3831 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3835 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3836 txsch = &nix_hw->txsch[lvl];
3837 kfree(txsch->schq.bmap);
3840 nix_ipolicer_freemem(nix_hw);
3842 vlan = &nix_hw->txvlan;
3843 kfree(vlan->rsrc.bmap);
3844 mutex_destroy(&vlan->rsrc_lock);
3845 devm_kfree(rvu->dev, vlan->entry2pfvf_map);
3847 mcast = &nix_hw->mcast;
3848 qmem_free(rvu->dev, mcast->mce_ctx);
3849 qmem_free(rvu->dev, mcast->mcast_buf);
3850 mutex_destroy(&mcast->mce_lock);
3854 void rvu_nix_freemem(struct rvu *rvu)
3856 struct rvu_hwinfo *hw = rvu->hw;
3857 struct rvu_block *block;
3860 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3862 block = &hw->block[blkaddr];
3863 rvu_nix_block_freemem(rvu, blkaddr, block);
3864 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3868 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3869 struct msg_rsp *rsp)
3871 u16 pcifunc = req->hdr.pcifunc;
3872 struct rvu_pfvf *pfvf;
3875 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3879 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3881 npc_mcam_enable_flows(rvu, pcifunc);
3883 pfvf = rvu_get_pfvf(rvu, pcifunc);
3884 set_bit(NIXLF_INITIALIZED, &pfvf->flags);
3886 rvu_switch_update_rules(rvu, pcifunc);
3888 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3891 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3892 struct msg_rsp *rsp)
3894 u16 pcifunc = req->hdr.pcifunc;
3895 struct rvu_pfvf *pfvf;
3898 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3902 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3904 pfvf = rvu_get_pfvf(rvu, pcifunc);
3905 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3907 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3910 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3912 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3913 struct hwctx_disable_req ctx_req;
3916 ctx_req.hdr.pcifunc = pcifunc;
3918 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3919 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3920 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
3921 nix_interface_deinit(rvu, pcifunc, nixlf);
3922 nix_rx_sync(rvu, blkaddr);
3923 nix_txschq_free(rvu, pcifunc);
3925 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3927 rvu_cgx_start_stop_io(rvu, pcifunc, false);
3930 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3931 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3933 dev_err(rvu->dev, "SQ ctx disable failed\n");
3937 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3938 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3940 dev_err(rvu->dev, "RQ ctx disable failed\n");
3944 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3945 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3947 dev_err(rvu->dev, "CQ ctx disable failed\n");
3950 nix_ctx_free(rvu, pfvf);
3952 nix_free_all_bandprof(rvu, pcifunc);
3955 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
3957 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3959 struct rvu_hwinfo *hw = rvu->hw;
3960 struct rvu_block *block;
3965 pf = rvu_get_pf(pcifunc);
3966 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
3969 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3971 return NIX_AF_ERR_AF_LF_INVALID;
3973 block = &hw->block[blkaddr];
3974 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3976 return NIX_AF_ERR_AF_LF_INVALID;
3978 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3981 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3983 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3985 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3990 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3991 struct msg_rsp *rsp)
3993 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3996 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3997 struct msg_rsp *rsp)
3999 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
4002 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
4003 struct nix_lso_format_cfg *req,
4004 struct nix_lso_format_cfg_rsp *rsp)
4006 u16 pcifunc = req->hdr.pcifunc;
4007 struct nix_hw *nix_hw;
4008 struct rvu_pfvf *pfvf;
4009 int blkaddr, idx, f;
4012 pfvf = rvu_get_pfvf(rvu, pcifunc);
4013 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4014 if (!pfvf->nixlf || blkaddr < 0)
4015 return NIX_AF_ERR_AF_LF_INVALID;
4017 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4021 /* Find existing matching LSO format, if any */
4022 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
4023 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
4024 reg = rvu_read64(rvu, blkaddr,
4025 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
4026 if (req->fields[f] != (reg & req->field_mask))
4030 if (f == NIX_LSO_FIELD_MAX)
4034 if (idx < nix_hw->lso.in_use) {
4036 rsp->lso_format_idx = idx;
4040 if (nix_hw->lso.in_use == nix_hw->lso.total)
4041 return NIX_AF_ERR_LSO_CFG_FAIL;
4043 rsp->lso_format_idx = nix_hw->lso.in_use++;
4045 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
4046 rvu_write64(rvu, blkaddr,
4047 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
4053 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
4055 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
4057 /* overwrite vf mac address with default_mac */
4059 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
4062 /* NIX ingress policers or bandwidth profiles APIs */
4063 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
4065 struct npc_lt_def_cfg defs, *ltdefs;
4068 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
4070 /* Extract PCP and DEI fields from outer VLAN from byte offset
4071 * 2 from the start of LB_PTR (ie TAG).
4072 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
4073 * fields are considered when 'Tunnel enable' is set in profile.
4075 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
4076 (2UL << 12) | (ltdefs->ovlan.lid << 8) |
4077 (ltdefs->ovlan.ltype_match << 4) |
4078 ltdefs->ovlan.ltype_mask);
4079 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
4080 (2UL << 12) | (ltdefs->ivlan.lid << 8) |
4081 (ltdefs->ivlan.ltype_match << 4) |
4082 ltdefs->ivlan.ltype_mask);
4084 /* DSCP field in outer and tunneled IPv4 packets */
4085 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
4086 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
4087 (ltdefs->rx_oip4.ltype_match << 4) |
4088 ltdefs->rx_oip4.ltype_mask);
4089 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
4090 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
4091 (ltdefs->rx_iip4.ltype_match << 4) |
4092 ltdefs->rx_iip4.ltype_mask);
4094 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
4095 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
4096 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
4097 (ltdefs->rx_oip6.ltype_match << 4) |
4098 ltdefs->rx_oip6.ltype_mask);
4099 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
4100 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
4101 (ltdefs->rx_iip6.ltype_match << 4) |
4102 ltdefs->rx_iip6.ltype_mask);
4105 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
4106 int layer, int prof_idx)
4108 struct nix_cn10k_aq_enq_req aq_req;
4111 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4113 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
4114 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4115 aq_req.op = NIX_AQ_INSTOP_INIT;
4117 /* Context is all zeros, submit to AQ */
4118 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4119 (struct nix_aq_enq_req *)&aq_req, NULL);
4121 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
4126 static int nix_setup_ipolicers(struct rvu *rvu,
4127 struct nix_hw *nix_hw, int blkaddr)
4129 struct rvu_hwinfo *hw = rvu->hw;
4130 struct nix_ipolicer *ipolicer;
4131 int err, layer, prof_idx;
4134 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
4135 if (!(cfg & BIT_ULL(61))) {
4136 hw->cap.ipolicer = false;
4140 hw->cap.ipolicer = true;
4141 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
4142 sizeof(*ipolicer), GFP_KERNEL);
4143 if (!nix_hw->ipolicer)
4146 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
4148 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4149 ipolicer = &nix_hw->ipolicer[layer];
4151 case BAND_PROF_LEAF_LAYER:
4152 ipolicer->band_prof.max = cfg & 0XFFFF;
4154 case BAND_PROF_MID_LAYER:
4155 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
4157 case BAND_PROF_TOP_LAYER:
4158 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
4162 if (!ipolicer->band_prof.max)
4165 err = rvu_alloc_bitmap(&ipolicer->band_prof);
4169 ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
4170 ipolicer->band_prof.max,
4171 sizeof(u16), GFP_KERNEL);
4172 if (!ipolicer->pfvf_map)
4175 ipolicer->match_id = devm_kcalloc(rvu->dev,
4176 ipolicer->band_prof.max,
4177 sizeof(u16), GFP_KERNEL);
4178 if (!ipolicer->match_id)
4182 prof_idx < ipolicer->band_prof.max; prof_idx++) {
4183 /* Set AF as current owner for INIT ops to succeed */
4184 ipolicer->pfvf_map[prof_idx] = 0x00;
4186 /* There is no enable bit in the profile context,
4187 * so no context disable. So let's INIT them here
4188 * so that PF/VF later on have to just do WRITE to
4189 * setup policer rates and config.
4191 err = nix_init_policer_context(rvu, nix_hw,
4197 /* Allocate memory for maintaining ref_counts for MID level
4198 * profiles, this will be needed for leaf layer profiles'
4201 if (layer != BAND_PROF_MID_LAYER)
4204 ipolicer->ref_count = devm_kcalloc(rvu->dev,
4205 ipolicer->band_prof.max,
4206 sizeof(u16), GFP_KERNEL);
4209 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
4210 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
4212 nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
4217 static void nix_ipolicer_freemem(struct nix_hw *nix_hw)
4219 struct nix_ipolicer *ipolicer;
4222 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4223 ipolicer = &nix_hw->ipolicer[layer];
4225 if (!ipolicer->band_prof.max)
4228 kfree(ipolicer->band_prof.bmap);
4232 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
4233 struct nix_hw *nix_hw, u16 pcifunc)
4235 struct nix_ipolicer *ipolicer;
4236 int layer, hi_layer, prof_idx;
4238 /* Bits [15:14] in profile index represent layer */
4239 layer = (req->qidx >> 14) & 0x03;
4240 prof_idx = req->qidx & 0x3FFF;
4242 ipolicer = &nix_hw->ipolicer[layer];
4243 if (prof_idx >= ipolicer->band_prof.max)
4246 /* Check if the profile is allocated to the requesting PCIFUNC or not
4247 * with the exception of AF. AF is allowed to read and update contexts.
4249 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
4252 /* If this profile is linked to higher layer profile then check
4253 * if that profile is also allocated to the requesting PCIFUNC
4256 if (!req->prof.hl_en)
4259 /* Leaf layer profile can link only to mid layer and
4260 * mid layer to top layer.
4262 if (layer == BAND_PROF_LEAF_LAYER)
4263 hi_layer = BAND_PROF_MID_LAYER;
4264 else if (layer == BAND_PROF_MID_LAYER)
4265 hi_layer = BAND_PROF_TOP_LAYER;
4269 ipolicer = &nix_hw->ipolicer[hi_layer];
4270 prof_idx = req->prof.band_prof_id;
4271 if (prof_idx >= ipolicer->band_prof.max ||
4272 ipolicer->pfvf_map[prof_idx] != pcifunc)
4278 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
4279 struct nix_bandprof_alloc_req *req,
4280 struct nix_bandprof_alloc_rsp *rsp)
4282 int blkaddr, layer, prof, idx, err;
4283 u16 pcifunc = req->hdr.pcifunc;
4284 struct nix_ipolicer *ipolicer;
4285 struct nix_hw *nix_hw;
4287 if (!rvu->hw->cap.ipolicer)
4288 return NIX_AF_ERR_IPOLICER_NOTSUPP;
4290 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4294 mutex_lock(&rvu->rsrc_lock);
4295 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4296 if (layer == BAND_PROF_INVAL_LAYER)
4298 if (!req->prof_count[layer])
4301 ipolicer = &nix_hw->ipolicer[layer];
4302 for (idx = 0; idx < req->prof_count[layer]; idx++) {
4303 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
4304 if (idx == MAX_BANDPROF_PER_PFFUNC)
4307 prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4310 rsp->prof_count[layer]++;
4311 rsp->prof_idx[layer][idx] = prof;
4312 ipolicer->pfvf_map[prof] = pcifunc;
4315 mutex_unlock(&rvu->rsrc_lock);
4319 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
4321 int blkaddr, layer, prof_idx, err;
4322 struct nix_ipolicer *ipolicer;
4323 struct nix_hw *nix_hw;
4325 if (!rvu->hw->cap.ipolicer)
4326 return NIX_AF_ERR_IPOLICER_NOTSUPP;
4328 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4332 mutex_lock(&rvu->rsrc_lock);
4333 /* Free all the profiles allocated to the PCIFUNC */
4334 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4335 if (layer == BAND_PROF_INVAL_LAYER)
4337 ipolicer = &nix_hw->ipolicer[layer];
4339 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
4340 if (ipolicer->pfvf_map[prof_idx] != pcifunc)
4343 /* Clear ratelimit aggregation, if any */
4344 if (layer == BAND_PROF_LEAF_LAYER &&
4345 ipolicer->match_id[prof_idx])
4346 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4348 ipolicer->pfvf_map[prof_idx] = 0x00;
4349 ipolicer->match_id[prof_idx] = 0;
4350 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4353 mutex_unlock(&rvu->rsrc_lock);
4357 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
4358 struct nix_bandprof_free_req *req,
4359 struct msg_rsp *rsp)
4361 int blkaddr, layer, prof_idx, idx, err;
4362 u16 pcifunc = req->hdr.pcifunc;
4363 struct nix_ipolicer *ipolicer;
4364 struct nix_hw *nix_hw;
4367 return nix_free_all_bandprof(rvu, pcifunc);
4369 if (!rvu->hw->cap.ipolicer)
4370 return NIX_AF_ERR_IPOLICER_NOTSUPP;
4372 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4376 mutex_lock(&rvu->rsrc_lock);
4377 /* Free the requested profile indices */
4378 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4379 if (layer == BAND_PROF_INVAL_LAYER)
4381 if (!req->prof_count[layer])
4384 ipolicer = &nix_hw->ipolicer[layer];
4385 for (idx = 0; idx < req->prof_count[layer]; idx++) {
4386 prof_idx = req->prof_idx[layer][idx];
4387 if (prof_idx >= ipolicer->band_prof.max ||
4388 ipolicer->pfvf_map[prof_idx] != pcifunc)
4391 /* Clear ratelimit aggregation, if any */
4392 if (layer == BAND_PROF_LEAF_LAYER &&
4393 ipolicer->match_id[prof_idx])
4394 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4396 ipolicer->pfvf_map[prof_idx] = 0x00;
4397 ipolicer->match_id[prof_idx] = 0;
4398 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4399 if (idx == MAX_BANDPROF_PER_PFFUNC)
4403 mutex_unlock(&rvu->rsrc_lock);
4407 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
4408 struct nix_cn10k_aq_enq_req *aq_req,
4409 struct nix_cn10k_aq_enq_rsp *aq_rsp,
4410 u16 pcifunc, u8 ctype, u32 qidx)
4412 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4413 aq_req->hdr.pcifunc = pcifunc;
4414 aq_req->ctype = ctype;
4415 aq_req->op = NIX_AQ_INSTOP_READ;
4416 aq_req->qidx = qidx;
4418 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4419 (struct nix_aq_enq_req *)aq_req,
4420 (struct nix_aq_enq_rsp *)aq_rsp);
4423 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
4424 struct nix_hw *nix_hw,
4425 struct nix_cn10k_aq_enq_req *aq_req,
4426 struct nix_cn10k_aq_enq_rsp *aq_rsp,
4427 u32 leaf_prof, u16 mid_prof)
4429 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4430 aq_req->hdr.pcifunc = 0x00;
4431 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
4432 aq_req->op = NIX_AQ_INSTOP_WRITE;
4433 aq_req->qidx = leaf_prof;
4435 aq_req->prof.band_prof_id = mid_prof;
4436 aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
4437 aq_req->prof.hl_en = 1;
4438 aq_req->prof_mask.hl_en = 1;
4440 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4441 (struct nix_aq_enq_req *)aq_req,
4442 (struct nix_aq_enq_rsp *)aq_rsp);
4445 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
4446 u16 rq_idx, u16 match_id)
4448 int leaf_prof, mid_prof, leaf_match;
4449 struct nix_cn10k_aq_enq_req aq_req;
4450 struct nix_cn10k_aq_enq_rsp aq_rsp;
4451 struct nix_ipolicer *ipolicer;
4452 struct nix_hw *nix_hw;
4453 int blkaddr, idx, rc;
4455 if (!rvu->hw->cap.ipolicer)
4458 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4462 /* Fetch the RQ's context to see if policing is enabled */
4463 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
4464 NIX_AQ_CTYPE_RQ, rq_idx);
4467 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
4468 __func__, rq_idx, pcifunc);
4472 if (!aq_rsp.rq.policer_ena)
4475 /* Get the bandwidth profile ID mapped to this RQ */
4476 leaf_prof = aq_rsp.rq.band_prof_id;
4478 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
4479 ipolicer->match_id[leaf_prof] = match_id;
4481 /* Check if any other leaf profile is marked with same match_id */
4482 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
4483 if (idx == leaf_prof)
4485 if (ipolicer->match_id[idx] != match_id)
4492 if (idx == ipolicer->band_prof.max)
4495 /* Fetch the matching profile's context to check if it's already
4496 * mapped to a mid level profile.
4498 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4499 NIX_AQ_CTYPE_BANDPROF, leaf_match);
4502 "%s: Failed to fetch context of leaf profile %d\n",
4503 __func__, leaf_match);
4507 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4508 if (aq_rsp.prof.hl_en) {
4509 /* Get Mid layer prof index and map leaf_prof index
4510 * also such that flows that are being steered
4511 * to different RQs and marked with same match_id
4512 * are rate limited in a aggregate fashion
4514 mid_prof = aq_rsp.prof.band_prof_id;
4515 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4517 leaf_prof, mid_prof);
4520 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4521 __func__, leaf_prof, mid_prof);
4525 mutex_lock(&rvu->rsrc_lock);
4526 ipolicer->ref_count[mid_prof]++;
4527 mutex_unlock(&rvu->rsrc_lock);
4531 /* Allocate a mid layer profile and
4532 * map both 'leaf_prof' and 'leaf_match' profiles to it.
4534 mutex_lock(&rvu->rsrc_lock);
4535 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4538 "%s: Unable to allocate mid layer profile\n", __func__);
4539 mutex_unlock(&rvu->rsrc_lock);
4542 mutex_unlock(&rvu->rsrc_lock);
4543 ipolicer->pfvf_map[mid_prof] = 0x00;
4544 ipolicer->ref_count[mid_prof] = 0;
4546 /* Initialize mid layer profile same as 'leaf_prof' */
4547 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4548 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4551 "%s: Failed to fetch context of leaf profile %d\n",
4552 __func__, leaf_prof);
4556 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4557 aq_req.hdr.pcifunc = 0x00;
4558 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
4559 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4560 aq_req.op = NIX_AQ_INSTOP_WRITE;
4561 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
4562 /* Clear higher layer enable bit in the mid profile, just in case */
4563 aq_req.prof.hl_en = 0;
4564 aq_req.prof_mask.hl_en = 1;
4566 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4567 (struct nix_aq_enq_req *)&aq_req, NULL);
4570 "%s: Failed to INIT context of mid layer profile %d\n",
4571 __func__, mid_prof);
4575 /* Map both leaf profiles to this mid layer profile */
4576 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4578 leaf_prof, mid_prof);
4581 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4582 __func__, leaf_prof, mid_prof);
4586 mutex_lock(&rvu->rsrc_lock);
4587 ipolicer->ref_count[mid_prof]++;
4588 mutex_unlock(&rvu->rsrc_lock);
4590 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4592 leaf_match, mid_prof);
4595 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4596 __func__, leaf_match, mid_prof);
4597 ipolicer->ref_count[mid_prof]--;
4601 mutex_lock(&rvu->rsrc_lock);
4602 ipolicer->ref_count[mid_prof]++;
4603 mutex_unlock(&rvu->rsrc_lock);
4609 /* Called with mutex rsrc_lock */
4610 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
4613 struct nix_cn10k_aq_enq_req aq_req;
4614 struct nix_cn10k_aq_enq_rsp aq_rsp;
4615 struct nix_ipolicer *ipolicer;
4619 mutex_unlock(&rvu->rsrc_lock);
4621 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4622 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4624 mutex_lock(&rvu->rsrc_lock);
4627 "%s: Failed to fetch context of leaf profile %d\n",
4628 __func__, leaf_prof);
4632 if (!aq_rsp.prof.hl_en)
4635 mid_prof = aq_rsp.prof.band_prof_id;
4636 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4637 ipolicer->ref_count[mid_prof]--;
4638 /* If ref_count is zero, free mid layer profile */
4639 if (!ipolicer->ref_count[mid_prof]) {
4640 ipolicer->pfvf_map[mid_prof] = 0x00;
4641 rvu_free_rsrc(&ipolicer->band_prof, mid_prof);