1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
19 #include "lmac_common.h"
21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
23 int type, int chan_id);
24 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
26 static int nix_setup_ipolicers(struct rvu *rvu,
27 struct nix_hw *nix_hw, int blkaddr);
28 static void nix_ipolicer_freemem(struct nix_hw *nix_hw);
29 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
30 struct nix_hw *nix_hw, u16 pcifunc);
31 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
32 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
59 enum nix_makr_fmt_indexes {
60 NIX_MARK_CFG_IP_DSCP_RED,
61 NIX_MARK_CFG_IP_DSCP_YELLOW,
62 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
63 NIX_MARK_CFG_IP_ECN_RED,
64 NIX_MARK_CFG_IP_ECN_YELLOW,
65 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
66 NIX_MARK_CFG_VLAN_DEI_RED,
67 NIX_MARK_CFG_VLAN_DEI_YELLOW,
68 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
72 /* For now considering MC resources needed for broadcast
73 * pkt replication only. i.e 256 HWVFs + 12 PFs.
75 #define MC_TBL_SIZE MC_TBL_SZ_512
76 #define MC_BUF_CNT MC_BUF_CNT_128
79 struct hlist_node node;
83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
87 /*If blkaddr is 0, return the first nix block address*/
89 return rvu->nix_blkaddr[blkaddr];
91 while (i + 1 < MAX_NIX_BLKS) {
92 if (rvu->nix_blkaddr[i] == blkaddr)
93 return rvu->nix_blkaddr[i + 1];
100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
102 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
105 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
106 if (!pfvf->nixlf || blkaddr < 0)
111 int rvu_get_nixlf_count(struct rvu *rvu)
113 int blkaddr = 0, max = 0;
114 struct rvu_block *block;
116 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
118 block = &rvu->hw->block[blkaddr];
119 max += block->lf.max;
120 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
127 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
128 struct rvu_hwinfo *hw = rvu->hw;
131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
132 if (!pfvf->nixlf || blkaddr < 0)
133 return NIX_AF_ERR_AF_LF_INVALID;
135 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
137 return NIX_AF_ERR_AF_LF_INVALID;
140 *nix_blkaddr = blkaddr;
145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
146 struct nix_hw **nix_hw, int *blkaddr)
148 struct rvu_pfvf *pfvf;
150 pfvf = rvu_get_pfvf(rvu, pcifunc);
151 *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
152 if (!pfvf->nixlf || *blkaddr < 0)
153 return NIX_AF_ERR_AF_LF_INVALID;
155 *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
157 return NIX_AF_ERR_INVALID_NIXBLK;
161 static void nix_mce_list_init(struct nix_mce_list *list, int max)
163 INIT_HLIST_HEAD(&list->head);
168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
175 idx = mcast->next_free_mce;
176 mcast->next_free_mce += count;
180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
182 int nix_blkaddr = 0, i = 0;
183 struct rvu *rvu = hw->rvu;
185 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
186 while (nix_blkaddr) {
187 if (blkaddr == nix_blkaddr && hw->nix)
189 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
195 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
199 /* Sync all in flight RX packets to LLC/DRAM */
200 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
201 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
203 dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
205 /* SW_SYNC ensures all existing transactions are finished and pkts
206 * are written to LLC/DRAM, queues should be teared down after
207 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
208 * an existing transaction might end after SW_SYNC operation. To
209 * ensure operation is fully done, do the SW_SYNC twice.
211 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
212 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
214 dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
217 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
218 int lvl, u16 pcifunc, u16 schq)
220 struct rvu_hwinfo *hw = rvu->hw;
221 struct nix_txsch *txsch;
222 struct nix_hw *nix_hw;
225 nix_hw = get_nix_hw(rvu->hw, blkaddr);
229 txsch = &nix_hw->txsch[lvl];
230 /* Check out of bounds */
231 if (schq >= txsch->schq.max)
234 mutex_lock(&rvu->rsrc_lock);
235 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
236 mutex_unlock(&rvu->rsrc_lock);
238 /* TLs aggegating traffic are shared across PF and VFs */
239 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
240 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
246 if (map_func != pcifunc)
252 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
254 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
255 struct mac_ops *mac_ops;
256 int pkind, pf, vf, lbkid;
260 pf = rvu_get_pf(pcifunc);
261 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
265 case NIX_INTF_TYPE_CGX:
266 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
267 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
269 pkind = rvu_npc_get_pkind(rvu, pf);
272 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
275 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
276 pfvf->tx_chan_base = pfvf->rx_chan_base;
277 pfvf->rx_chan_cnt = 1;
278 pfvf->tx_chan_cnt = 1;
279 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
280 rvu_npc_set_pkind(rvu, pkind, pfvf);
282 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
283 /* By default we enable pause frames */
284 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
285 mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
287 lmac_id, true, true);
289 case NIX_INTF_TYPE_LBK:
290 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
292 /* If NIX1 block is present on the silicon then NIXes are
293 * assigned alternatively for lbk interfaces. NIX0 should
294 * send packets on lbk link 1 channels and NIX1 should send
295 * on lbk link 0 channels for the communication between
299 if (rvu->hw->lbk_links > 1)
300 lbkid = vf & 0x1 ? 0 : 1;
302 /* Note that AF's VFs work in pairs and talk over consecutive
303 * loopback channels.Therefore if odd number of AF VFs are
304 * enabled then the last VF remains with no pair.
306 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
307 pfvf->tx_chan_base = vf & 0x1 ?
308 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
309 rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
310 pfvf->rx_chan_cnt = 1;
311 pfvf->tx_chan_cnt = 1;
312 rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
313 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
319 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
320 * RVU PF/VF's MAC address.
322 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
323 pfvf->rx_chan_base, pfvf->mac_addr);
325 /* Add this PF_FUNC to bcast pkt replication list */
326 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
329 "Bcast list, failed to enable PF_FUNC 0x%x\n",
333 /* Install MCAM rule matching Ethernet broadcast mac address */
334 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
335 nixlf, pfvf->rx_chan_base);
337 pfvf->maxlen = NIC_HW_MIN_FRS;
338 pfvf->minlen = NIC_HW_MIN_FRS;
343 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
345 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
351 /* Remove this PF_FUNC from bcast pkt replication list */
352 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
355 "Bcast list, failed to disable PF_FUNC 0x%x\n",
359 /* Free and disable any MCAM entries used by this NIX LF */
360 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
362 /* Disable DMAC filters used */
363 rvu_cgx_disable_dmac_entries(rvu, pcifunc);
366 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
367 struct nix_bp_cfg_req *req,
370 u16 pcifunc = req->hdr.pcifunc;
371 struct rvu_pfvf *pfvf;
372 int blkaddr, pf, type;
376 pf = rvu_get_pf(pcifunc);
377 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
378 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
381 pfvf = rvu_get_pfvf(rvu, pcifunc);
382 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
384 chan_base = pfvf->rx_chan_base + req->chan_base;
385 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
386 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
387 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
393 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
394 int type, int chan_id)
396 int bpid, blkaddr, lmac_chan_cnt;
397 struct rvu_hwinfo *hw = rvu->hw;
398 u16 cgx_bpid_cnt, lbk_bpid_cnt;
399 struct rvu_pfvf *pfvf;
403 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
404 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
405 lmac_chan_cnt = cfg & 0xFF;
407 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
408 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
410 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
412 /* Backpressure IDs range division
413 * CGX channles are mapped to (0 - 191) BPIDs
414 * LBK channles are mapped to (192 - 255) BPIDs
415 * SDP channles are mapped to (256 - 511) BPIDs
417 * Lmac channles and bpids mapped as follows
418 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
419 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
420 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
423 case NIX_INTF_TYPE_CGX:
424 if ((req->chan_base + req->chan_cnt) > 15)
426 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
427 /* Assign bpid based on cgx, lmac and chan id */
428 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
429 (lmac_id * lmac_chan_cnt) + req->chan_base;
431 if (req->bpid_per_chan)
433 if (bpid > cgx_bpid_cnt)
437 case NIX_INTF_TYPE_LBK:
438 if ((req->chan_base + req->chan_cnt) > 63)
440 bpid = cgx_bpid_cnt + req->chan_base;
441 if (req->bpid_per_chan)
443 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
452 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
453 struct nix_bp_cfg_req *req,
454 struct nix_bp_cfg_rsp *rsp)
456 int blkaddr, pf, type, chan_id = 0;
457 u16 pcifunc = req->hdr.pcifunc;
458 struct rvu_pfvf *pfvf;
463 pf = rvu_get_pf(pcifunc);
464 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
466 /* Enable backpressure only for CGX mapped PFs and LBK interface */
467 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
470 pfvf = rvu_get_pfvf(rvu, pcifunc);
471 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
473 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
474 chan_base = pfvf->rx_chan_base + req->chan_base;
477 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
479 dev_warn(rvu->dev, "Fail to enable backpressure\n");
483 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
484 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
485 cfg | (bpid & 0xFF) | BIT_ULL(16));
487 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
490 for (chan = 0; chan < req->chan_cnt; chan++) {
491 /* Map channel and bpid assign to it */
492 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
494 if (req->bpid_per_chan)
497 rsp->chan_cnt = req->chan_cnt;
502 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
503 u64 format, bool v4, u64 *fidx)
505 struct nix_lso_format field = {0};
507 /* IP's Length field */
508 field.layer = NIX_TXLAYER_OL3;
509 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
510 field.offset = v4 ? 2 : 4;
511 field.sizem1 = 1; /* i.e 2 bytes */
512 field.alg = NIX_LSOALG_ADD_PAYLEN;
513 rvu_write64(rvu, blkaddr,
514 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
517 /* No ID field in IPv6 header */
522 field.layer = NIX_TXLAYER_OL3;
524 field.sizem1 = 1; /* i.e 2 bytes */
525 field.alg = NIX_LSOALG_ADD_SEGNUM;
526 rvu_write64(rvu, blkaddr,
527 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
531 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
532 u64 format, u64 *fidx)
534 struct nix_lso_format field = {0};
536 /* TCP's sequence number field */
537 field.layer = NIX_TXLAYER_OL4;
539 field.sizem1 = 3; /* i.e 4 bytes */
540 field.alg = NIX_LSOALG_ADD_OFFSET;
541 rvu_write64(rvu, blkaddr,
542 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
545 /* TCP's flags field */
546 field.layer = NIX_TXLAYER_OL4;
548 field.sizem1 = 1; /* 2 bytes */
549 field.alg = NIX_LSOALG_TCP_FLAGS;
550 rvu_write64(rvu, blkaddr,
551 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
555 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
557 u64 cfg, idx, fidx = 0;
559 /* Get max HW supported format indices */
560 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
561 nix_hw->lso.total = cfg;
564 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
565 /* For TSO, set first and middle segment flags to
566 * mask out PSH, RST & FIN flags in TCP packet
568 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
569 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
570 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
572 /* Setup default static LSO formats
574 * Configure format fields for TCPv4 segmentation offload
576 idx = NIX_LSO_FORMAT_IDX_TSOV4;
577 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
578 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
580 /* Set rest of the fields to NOP */
581 for (; fidx < 8; fidx++) {
582 rvu_write64(rvu, blkaddr,
583 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
585 nix_hw->lso.in_use++;
587 /* Configure format fields for TCPv6 segmentation offload */
588 idx = NIX_LSO_FORMAT_IDX_TSOV6;
590 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
591 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
593 /* Set rest of the fields to NOP */
594 for (; fidx < 8; fidx++) {
595 rvu_write64(rvu, blkaddr,
596 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
598 nix_hw->lso.in_use++;
601 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
603 kfree(pfvf->rq_bmap);
604 kfree(pfvf->sq_bmap);
605 kfree(pfvf->cq_bmap);
607 qmem_free(rvu->dev, pfvf->rq_ctx);
609 qmem_free(rvu->dev, pfvf->sq_ctx);
611 qmem_free(rvu->dev, pfvf->cq_ctx);
613 qmem_free(rvu->dev, pfvf->rss_ctx);
614 if (pfvf->nix_qints_ctx)
615 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
616 if (pfvf->cq_ints_ctx)
617 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
619 pfvf->rq_bmap = NULL;
620 pfvf->cq_bmap = NULL;
621 pfvf->sq_bmap = NULL;
625 pfvf->rss_ctx = NULL;
626 pfvf->nix_qints_ctx = NULL;
627 pfvf->cq_ints_ctx = NULL;
630 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
631 struct rvu_pfvf *pfvf, int nixlf,
632 int rss_sz, int rss_grps, int hwctx_size,
635 int err, grp, num_indices;
637 /* RSS is not requested for this NIXLF */
640 num_indices = rss_sz * rss_grps;
642 /* Alloc NIX RSS HW context memory and config the base */
643 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
647 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
648 (u64)pfvf->rss_ctx->iova);
650 /* Config full RSS table size, enable RSS and caching */
651 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
652 BIT_ULL(36) | BIT_ULL(4) |
653 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
655 /* Config RSS group offset and sizes */
656 for (grp = 0; grp < rss_grps; grp++)
657 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
658 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
662 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
663 struct nix_aq_inst_s *inst)
665 struct admin_queue *aq = block->aq;
666 struct nix_aq_res_s *result;
670 result = (struct nix_aq_res_s *)aq->res->base;
672 /* Get current head pointer where to append this instruction */
673 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
674 head = (reg >> 4) & AQ_PTR_MASK;
676 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
677 (void *)inst, aq->inst->entry_sz);
678 memset(result, 0, sizeof(*result));
679 /* sync into memory */
682 /* Ring the doorbell and wait for result */
683 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
684 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
692 if (result->compcode != NIX_AQ_COMP_GOOD)
693 /* TODO: Replace this with some error code */
699 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
700 struct nix_aq_enq_req *req,
701 struct nix_aq_enq_rsp *rsp)
703 struct rvu_hwinfo *hw = rvu->hw;
704 u16 pcifunc = req->hdr.pcifunc;
705 int nixlf, blkaddr, rc = 0;
706 struct nix_aq_inst_s inst;
707 struct rvu_block *block;
708 struct admin_queue *aq;
709 struct rvu_pfvf *pfvf;
714 blkaddr = nix_hw->blkaddr;
715 block = &hw->block[blkaddr];
718 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
719 return NIX_AF_ERR_AQ_ENQUEUE;
722 pfvf = rvu_get_pfvf(rvu, pcifunc);
723 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
725 /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
726 * operations done by AF itself.
728 if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
729 (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
730 if (!pfvf->nixlf || nixlf < 0)
731 return NIX_AF_ERR_AF_LF_INVALID;
734 switch (req->ctype) {
735 case NIX_AQ_CTYPE_RQ:
736 /* Check if index exceeds max no of queues */
737 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
738 rc = NIX_AF_ERR_AQ_ENQUEUE;
740 case NIX_AQ_CTYPE_SQ:
741 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
742 rc = NIX_AF_ERR_AQ_ENQUEUE;
744 case NIX_AQ_CTYPE_CQ:
745 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
746 rc = NIX_AF_ERR_AQ_ENQUEUE;
748 case NIX_AQ_CTYPE_RSS:
749 /* Check if RSS is enabled and qidx is within range */
750 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
751 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
752 (req->qidx >= (256UL << (cfg & 0xF))))
753 rc = NIX_AF_ERR_AQ_ENQUEUE;
755 case NIX_AQ_CTYPE_MCE:
756 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
758 /* Check if index exceeds MCE list length */
759 if (!nix_hw->mcast.mce_ctx ||
760 (req->qidx >= (256UL << (cfg & 0xF))))
761 rc = NIX_AF_ERR_AQ_ENQUEUE;
763 /* Adding multicast lists for requests from PF/VFs is not
764 * yet supported, so ignore this.
767 rc = NIX_AF_ERR_AQ_ENQUEUE;
769 case NIX_AQ_CTYPE_BANDPROF:
770 if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
772 rc = NIX_AF_ERR_INVALID_BANDPROF;
775 rc = NIX_AF_ERR_AQ_ENQUEUE;
781 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
782 if (req->ctype == NIX_AQ_CTYPE_SQ &&
783 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
784 (req->op == NIX_AQ_INSTOP_WRITE &&
785 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
786 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
787 pcifunc, req->sq.smq))
788 return NIX_AF_ERR_AQ_ENQUEUE;
791 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
793 inst.cindex = req->qidx;
794 inst.ctype = req->ctype;
796 /* Currently we are not supporting enqueuing multiple instructions,
797 * so always choose first entry in result memory.
799 inst.res_addr = (u64)aq->res->iova;
801 /* Hardware uses same aq->res->base for updating result of
802 * previous instruction hence wait here till it is done.
804 spin_lock(&aq->lock);
806 /* Clean result + context memory */
807 memset(aq->res->base, 0, aq->res->entry_sz);
808 /* Context needs to be written at RES_ADDR + 128 */
809 ctx = aq->res->base + 128;
810 /* Mask needs to be written at RES_ADDR + 256 */
811 mask = aq->res->base + 256;
814 case NIX_AQ_INSTOP_WRITE:
815 if (req->ctype == NIX_AQ_CTYPE_RQ)
816 memcpy(mask, &req->rq_mask,
817 sizeof(struct nix_rq_ctx_s));
818 else if (req->ctype == NIX_AQ_CTYPE_SQ)
819 memcpy(mask, &req->sq_mask,
820 sizeof(struct nix_sq_ctx_s));
821 else if (req->ctype == NIX_AQ_CTYPE_CQ)
822 memcpy(mask, &req->cq_mask,
823 sizeof(struct nix_cq_ctx_s));
824 else if (req->ctype == NIX_AQ_CTYPE_RSS)
825 memcpy(mask, &req->rss_mask,
826 sizeof(struct nix_rsse_s));
827 else if (req->ctype == NIX_AQ_CTYPE_MCE)
828 memcpy(mask, &req->mce_mask,
829 sizeof(struct nix_rx_mce_s));
830 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
831 memcpy(mask, &req->prof_mask,
832 sizeof(struct nix_bandprof_s));
834 case NIX_AQ_INSTOP_INIT:
835 if (req->ctype == NIX_AQ_CTYPE_RQ)
836 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
837 else if (req->ctype == NIX_AQ_CTYPE_SQ)
838 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
839 else if (req->ctype == NIX_AQ_CTYPE_CQ)
840 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
841 else if (req->ctype == NIX_AQ_CTYPE_RSS)
842 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
843 else if (req->ctype == NIX_AQ_CTYPE_MCE)
844 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
845 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
846 memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
848 case NIX_AQ_INSTOP_NOP:
849 case NIX_AQ_INSTOP_READ:
850 case NIX_AQ_INSTOP_LOCK:
851 case NIX_AQ_INSTOP_UNLOCK:
854 rc = NIX_AF_ERR_AQ_ENQUEUE;
855 spin_unlock(&aq->lock);
859 /* Submit the instruction to AQ */
860 rc = nix_aq_enqueue_wait(rvu, block, &inst);
862 spin_unlock(&aq->lock);
866 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
867 if (req->op == NIX_AQ_INSTOP_INIT) {
868 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
869 __set_bit(req->qidx, pfvf->rq_bmap);
870 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
871 __set_bit(req->qidx, pfvf->sq_bmap);
872 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
873 __set_bit(req->qidx, pfvf->cq_bmap);
876 if (req->op == NIX_AQ_INSTOP_WRITE) {
877 if (req->ctype == NIX_AQ_CTYPE_RQ) {
878 ena = (req->rq.ena & req->rq_mask.ena) |
879 (test_bit(req->qidx, pfvf->rq_bmap) &
882 __set_bit(req->qidx, pfvf->rq_bmap);
884 __clear_bit(req->qidx, pfvf->rq_bmap);
886 if (req->ctype == NIX_AQ_CTYPE_SQ) {
887 ena = (req->rq.ena & req->sq_mask.ena) |
888 (test_bit(req->qidx, pfvf->sq_bmap) &
891 __set_bit(req->qidx, pfvf->sq_bmap);
893 __clear_bit(req->qidx, pfvf->sq_bmap);
895 if (req->ctype == NIX_AQ_CTYPE_CQ) {
896 ena = (req->rq.ena & req->cq_mask.ena) |
897 (test_bit(req->qidx, pfvf->cq_bmap) &
900 __set_bit(req->qidx, pfvf->cq_bmap);
902 __clear_bit(req->qidx, pfvf->cq_bmap);
907 /* Copy read context into mailbox */
908 if (req->op == NIX_AQ_INSTOP_READ) {
909 if (req->ctype == NIX_AQ_CTYPE_RQ)
910 memcpy(&rsp->rq, ctx,
911 sizeof(struct nix_rq_ctx_s));
912 else if (req->ctype == NIX_AQ_CTYPE_SQ)
913 memcpy(&rsp->sq, ctx,
914 sizeof(struct nix_sq_ctx_s));
915 else if (req->ctype == NIX_AQ_CTYPE_CQ)
916 memcpy(&rsp->cq, ctx,
917 sizeof(struct nix_cq_ctx_s));
918 else if (req->ctype == NIX_AQ_CTYPE_RSS)
919 memcpy(&rsp->rss, ctx,
920 sizeof(struct nix_rsse_s));
921 else if (req->ctype == NIX_AQ_CTYPE_MCE)
922 memcpy(&rsp->mce, ctx,
923 sizeof(struct nix_rx_mce_s));
924 else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
925 memcpy(&rsp->prof, ctx,
926 sizeof(struct nix_bandprof_s));
930 spin_unlock(&aq->lock);
934 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
935 struct nix_aq_enq_rsp *rsp)
937 struct nix_hw *nix_hw;
940 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
942 return NIX_AF_ERR_AF_LF_INVALID;
944 nix_hw = get_nix_hw(rvu->hw, blkaddr);
948 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
951 static const char *nix_get_ctx_name(int ctype)
954 case NIX_AQ_CTYPE_CQ:
956 case NIX_AQ_CTYPE_SQ:
958 case NIX_AQ_CTYPE_RQ:
960 case NIX_AQ_CTYPE_RSS:
966 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
968 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
969 struct nix_aq_enq_req aq_req;
974 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
975 return NIX_AF_ERR_AQ_ENQUEUE;
977 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
978 aq_req.hdr.pcifunc = req->hdr.pcifunc;
980 if (req->ctype == NIX_AQ_CTYPE_CQ) {
982 aq_req.cq_mask.ena = 1;
983 aq_req.cq.bp_ena = 0;
984 aq_req.cq_mask.bp_ena = 1;
985 q_cnt = pfvf->cq_ctx->qsize;
986 bmap = pfvf->cq_bmap;
988 if (req->ctype == NIX_AQ_CTYPE_SQ) {
990 aq_req.sq_mask.ena = 1;
991 q_cnt = pfvf->sq_ctx->qsize;
992 bmap = pfvf->sq_bmap;
994 if (req->ctype == NIX_AQ_CTYPE_RQ) {
996 aq_req.rq_mask.ena = 1;
997 q_cnt = pfvf->rq_ctx->qsize;
998 bmap = pfvf->rq_bmap;
1001 aq_req.ctype = req->ctype;
1002 aq_req.op = NIX_AQ_INSTOP_WRITE;
1004 for (qidx = 0; qidx < q_cnt; qidx++) {
1005 if (!test_bit(qidx, bmap))
1008 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1011 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1012 nix_get_ctx_name(req->ctype), qidx);
1019 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1020 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1022 struct nix_aq_enq_req lock_ctx_req;
1025 if (req->op != NIX_AQ_INSTOP_INIT)
1028 if (req->ctype == NIX_AQ_CTYPE_MCE ||
1029 req->ctype == NIX_AQ_CTYPE_DYNO)
1032 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1033 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1034 lock_ctx_req.ctype = req->ctype;
1035 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1036 lock_ctx_req.qidx = req->qidx;
1037 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1040 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1042 nix_get_ctx_name(req->ctype), req->qidx);
1046 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1047 struct nix_aq_enq_req *req,
1048 struct nix_aq_enq_rsp *rsp)
1052 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1054 err = nix_lf_hwctx_lockdown(rvu, req);
1059 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1060 struct nix_aq_enq_req *req,
1061 struct nix_aq_enq_rsp *rsp)
1063 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1066 /* CN10K mbox handler */
1067 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1068 struct nix_cn10k_aq_enq_req *req,
1069 struct nix_cn10k_aq_enq_rsp *rsp)
1071 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1072 (struct nix_aq_enq_rsp *)rsp);
1075 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1076 struct hwctx_disable_req *req,
1077 struct msg_rsp *rsp)
1079 return nix_lf_hwctx_disable(rvu, req);
1082 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1083 struct nix_lf_alloc_req *req,
1084 struct nix_lf_alloc_rsp *rsp)
1086 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1087 struct rvu_hwinfo *hw = rvu->hw;
1088 u16 pcifunc = req->hdr.pcifunc;
1089 struct rvu_block *block;
1090 struct rvu_pfvf *pfvf;
1094 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1095 return NIX_AF_ERR_PARAM;
1098 req->way_mask &= 0xFFFF;
1100 pfvf = rvu_get_pfvf(rvu, pcifunc);
1101 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1102 if (!pfvf->nixlf || blkaddr < 0)
1103 return NIX_AF_ERR_AF_LF_INVALID;
1105 block = &hw->block[blkaddr];
1106 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1108 return NIX_AF_ERR_AF_LF_INVALID;
1110 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1111 if (req->npa_func) {
1112 /* If default, use 'this' NIXLF's PFFUNC */
1113 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1114 req->npa_func = pcifunc;
1115 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1116 return NIX_AF_INVAL_NPA_PF_FUNC;
1119 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1120 if (req->sso_func) {
1121 /* If default, use 'this' NIXLF's PFFUNC */
1122 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1123 req->sso_func = pcifunc;
1124 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1125 return NIX_AF_INVAL_SSO_PF_FUNC;
1128 /* If RSS is being enabled, check if requested config is valid.
1129 * RSS table size should be power of two, otherwise
1130 * RSS_GRP::OFFSET + adder might go beyond that group or
1131 * won't be able to use entire table.
1133 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1134 !is_power_of_2(req->rss_sz)))
1135 return NIX_AF_ERR_RSS_SIZE_INVALID;
1138 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1139 return NIX_AF_ERR_RSS_GRPS_INVALID;
1141 /* Reset this NIX LF */
1142 err = rvu_lf_reset(rvu, block, nixlf);
1144 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1145 block->addr - BLKADDR_NIX0, nixlf);
1146 return NIX_AF_ERR_LF_RESET;
1149 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1151 /* Alloc NIX RQ HW context memory and config the base */
1152 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1153 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1157 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1161 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1162 (u64)pfvf->rq_ctx->iova);
1164 /* Set caching and queue count in HW */
1165 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1166 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1168 /* Alloc NIX SQ HW context memory and config the base */
1169 hwctx_size = 1UL << (ctx_cfg & 0xF);
1170 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1174 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1178 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1179 (u64)pfvf->sq_ctx->iova);
1181 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1182 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1184 /* Alloc NIX CQ HW context memory and config the base */
1185 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1186 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1190 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1194 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1195 (u64)pfvf->cq_ctx->iova);
1197 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1198 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1200 /* Initialize receive side scaling (RSS) */
1201 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1202 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1203 req->rss_grps, hwctx_size, req->way_mask);
1207 /* Alloc memory for CQINT's HW contexts */
1208 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1209 qints = (cfg >> 24) & 0xFFF;
1210 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1211 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1215 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1216 (u64)pfvf->cq_ints_ctx->iova);
1218 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1219 BIT_ULL(36) | req->way_mask << 20);
1221 /* Alloc memory for QINT's HW contexts */
1222 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1223 qints = (cfg >> 12) & 0xFFF;
1224 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1225 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1229 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1230 (u64)pfvf->nix_qints_ctx->iova);
1231 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1232 BIT_ULL(36) | req->way_mask << 20);
1234 /* Setup VLANX TPID's.
1235 * Use VLAN1 for 802.1Q
1236 * and VLAN0 for 802.1AD.
1238 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1239 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1241 /* Enable LMTST for this NIX LF */
1242 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1244 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1246 cfg = req->npa_func;
1248 cfg |= (u64)req->sso_func << 16;
1250 cfg |= (u64)req->xqe_sz << 33;
1251 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1253 /* Config Rx pkt length, csum checks and apad enable / disable */
1254 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1256 /* Configure pkind for TX parse config */
1257 cfg = NPC_TX_DEF_PKIND;
1258 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1260 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1261 err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1265 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1266 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1268 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1269 rvu_write64(rvu, blkaddr,
1270 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1271 VTAGSIZE_T4 | VTAG_STRIP);
1276 nix_ctx_free(rvu, pfvf);
1280 /* Set macaddr of this PF/VF */
1281 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1283 /* set SQB size info */
1284 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1285 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1286 rsp->rx_chan_base = pfvf->rx_chan_base;
1287 rsp->tx_chan_base = pfvf->tx_chan_base;
1288 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1289 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1290 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1291 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1292 /* Get HW supported stat count */
1293 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1294 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1295 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1296 /* Get count of CQ IRQs and error IRQs supported per LF */
1297 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1298 rsp->qints = ((cfg >> 12) & 0xFFF);
1299 rsp->cints = ((cfg >> 24) & 0xFFF);
1300 rsp->cgx_links = hw->cgx_links;
1301 rsp->lbk_links = hw->lbk_links;
1302 rsp->sdp_links = hw->sdp_links;
1307 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1308 struct msg_rsp *rsp)
1310 struct rvu_hwinfo *hw = rvu->hw;
1311 u16 pcifunc = req->hdr.pcifunc;
1312 struct rvu_block *block;
1313 int blkaddr, nixlf, err;
1314 struct rvu_pfvf *pfvf;
1316 pfvf = rvu_get_pfvf(rvu, pcifunc);
1317 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1318 if (!pfvf->nixlf || blkaddr < 0)
1319 return NIX_AF_ERR_AF_LF_INVALID;
1321 block = &hw->block[blkaddr];
1322 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1324 return NIX_AF_ERR_AF_LF_INVALID;
1326 if (req->flags & NIX_LF_DISABLE_FLOWS)
1327 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1329 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1331 /* Free any tx vtag def entries used by this NIX LF */
1332 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1333 nix_free_tx_vtag_entries(rvu, pcifunc);
1335 nix_interface_deinit(rvu, pcifunc, nixlf);
1337 /* Reset this NIX LF */
1338 err = rvu_lf_reset(rvu, block, nixlf);
1340 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1341 block->addr - BLKADDR_NIX0, nixlf);
1342 return NIX_AF_ERR_LF_RESET;
1345 nix_ctx_free(rvu, pfvf);
1350 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1351 struct nix_mark_format_cfg *req,
1352 struct nix_mark_format_cfg_rsp *rsp)
1354 u16 pcifunc = req->hdr.pcifunc;
1355 struct nix_hw *nix_hw;
1356 struct rvu_pfvf *pfvf;
1360 pfvf = rvu_get_pfvf(rvu, pcifunc);
1361 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1362 if (!pfvf->nixlf || blkaddr < 0)
1363 return NIX_AF_ERR_AF_LF_INVALID;
1365 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1369 cfg = (((u32)req->offset & 0x7) << 16) |
1370 (((u32)req->y_mask & 0xF) << 12) |
1371 (((u32)req->y_val & 0xF) << 8) |
1372 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1374 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1376 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1377 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1378 return NIX_AF_ERR_MARK_CFG_FAIL;
1381 rsp->mark_format_idx = rc;
1385 /* Disable shaping of pkts by a scheduler queue
1386 * at a given scheduler level.
1388 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1391 u64 cir_reg = 0, pir_reg = 0;
1395 case NIX_TXSCH_LVL_TL1:
1396 cir_reg = NIX_AF_TL1X_CIR(schq);
1397 pir_reg = 0; /* PIR not available at TL1 */
1399 case NIX_TXSCH_LVL_TL2:
1400 cir_reg = NIX_AF_TL2X_CIR(schq);
1401 pir_reg = NIX_AF_TL2X_PIR(schq);
1403 case NIX_TXSCH_LVL_TL3:
1404 cir_reg = NIX_AF_TL3X_CIR(schq);
1405 pir_reg = NIX_AF_TL3X_PIR(schq);
1407 case NIX_TXSCH_LVL_TL4:
1408 cir_reg = NIX_AF_TL4X_CIR(schq);
1409 pir_reg = NIX_AF_TL4X_PIR(schq);
1415 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1416 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1420 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1421 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1424 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1427 struct rvu_hwinfo *hw = rvu->hw;
1430 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1433 /* Reset TL4's SDP link config */
1434 if (lvl == NIX_TXSCH_LVL_TL4)
1435 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1437 if (lvl != NIX_TXSCH_LVL_TL2)
1440 /* Reset TL2's CGX or LBK link config */
1441 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1442 rvu_write64(rvu, blkaddr,
1443 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1446 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1448 struct rvu_hwinfo *hw = rvu->hw;
1449 int pf = rvu_get_pf(pcifunc);
1450 u8 cgx_id = 0, lmac_id = 0;
1452 if (is_afvf(pcifunc)) {/* LBK links */
1453 return hw->cgx_links;
1454 } else if (is_pf_cgxmapped(rvu, pf)) {
1455 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1456 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1460 return hw->cgx_links + hw->lbk_links;
1463 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1464 int link, int *start, int *end)
1466 struct rvu_hwinfo *hw = rvu->hw;
1467 int pf = rvu_get_pf(pcifunc);
1469 if (is_afvf(pcifunc)) { /* LBK links */
1470 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1471 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1472 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1473 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1474 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1475 } else { /* SDP link */
1476 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1477 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1478 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1482 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1483 struct nix_hw *nix_hw,
1484 struct nix_txsch_alloc_req *req)
1486 struct rvu_hwinfo *hw = rvu->hw;
1487 int schq, req_schq, free_cnt;
1488 struct nix_txsch *txsch;
1489 int link, start, end;
1491 txsch = &nix_hw->txsch[lvl];
1492 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1497 link = nix_get_tx_link(rvu, pcifunc);
1499 /* For traffic aggregating scheduler level, one queue is enough */
1500 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1502 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1506 /* Get free SCHQ count and check if request can be accomodated */
1507 if (hw->cap.nix_fixed_txschq_mapping) {
1508 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1509 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1510 if (end <= txsch->schq.max && schq < end &&
1511 !test_bit(schq, txsch->schq.bmap))
1516 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1519 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1520 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1522 /* If contiguous queues are needed, check for availability */
1523 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1524 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1525 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1530 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1531 struct nix_txsch_alloc_rsp *rsp,
1532 int lvl, int start, int end)
1534 struct rvu_hwinfo *hw = rvu->hw;
1535 u16 pcifunc = rsp->hdr.pcifunc;
1538 /* For traffic aggregating levels, queue alloc is based
1539 * on transmit link to which PF_FUNC is mapped to.
1541 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1542 /* A single TL queue is allocated */
1543 if (rsp->schq_contig[lvl]) {
1544 rsp->schq_contig[lvl] = 1;
1545 rsp->schq_contig_list[lvl][0] = start;
1548 /* Both contig and non-contig reqs doesn't make sense here */
1549 if (rsp->schq_contig[lvl])
1552 if (rsp->schq[lvl]) {
1554 rsp->schq_list[lvl][0] = start;
1559 /* Adjust the queue request count if HW supports
1560 * only one queue per level configuration.
1562 if (hw->cap.nix_fixed_txschq_mapping) {
1563 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1565 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1566 rsp->schq_contig[lvl] = 0;
1571 if (rsp->schq_contig[lvl]) {
1572 rsp->schq_contig[lvl] = 1;
1573 set_bit(schq, txsch->schq.bmap);
1574 rsp->schq_contig_list[lvl][0] = schq;
1576 } else if (rsp->schq[lvl]) {
1578 set_bit(schq, txsch->schq.bmap);
1579 rsp->schq_list[lvl][0] = schq;
1584 /* Allocate contiguous queue indices requesty first */
1585 if (rsp->schq_contig[lvl]) {
1586 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1587 txsch->schq.max, start,
1588 rsp->schq_contig[lvl], 0);
1590 rsp->schq_contig[lvl] = 0;
1591 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1592 set_bit(schq, txsch->schq.bmap);
1593 rsp->schq_contig_list[lvl][idx] = schq;
1598 /* Allocate non-contiguous queue indices */
1599 if (rsp->schq[lvl]) {
1601 for (schq = start; schq < end; schq++) {
1602 if (!test_bit(schq, txsch->schq.bmap)) {
1603 set_bit(schq, txsch->schq.bmap);
1604 rsp->schq_list[lvl][idx++] = schq;
1606 if (idx == rsp->schq[lvl])
1609 /* Update how many were allocated */
1610 rsp->schq[lvl] = idx;
1614 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1615 struct nix_txsch_alloc_req *req,
1616 struct nix_txsch_alloc_rsp *rsp)
1618 struct rvu_hwinfo *hw = rvu->hw;
1619 u16 pcifunc = req->hdr.pcifunc;
1620 int link, blkaddr, rc = 0;
1621 int lvl, idx, start, end;
1622 struct nix_txsch *txsch;
1623 struct rvu_pfvf *pfvf;
1624 struct nix_hw *nix_hw;
1628 pfvf = rvu_get_pfvf(rvu, pcifunc);
1629 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1630 if (!pfvf->nixlf || blkaddr < 0)
1631 return NIX_AF_ERR_AF_LF_INVALID;
1633 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1637 mutex_lock(&rvu->rsrc_lock);
1639 /* Check if request is valid as per HW capabilities
1640 * and can be accomodated.
1642 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1643 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1648 /* Allocate requested Tx scheduler queues */
1649 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1650 txsch = &nix_hw->txsch[lvl];
1651 pfvf_map = txsch->pfvf_map;
1653 if (!req->schq[lvl] && !req->schq_contig[lvl])
1656 rsp->schq[lvl] = req->schq[lvl];
1657 rsp->schq_contig[lvl] = req->schq_contig[lvl];
1659 link = nix_get_tx_link(rvu, pcifunc);
1661 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1664 } else if (hw->cap.nix_fixed_txschq_mapping) {
1665 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1668 end = txsch->schq.max;
1671 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1673 /* Reset queue config */
1674 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1675 schq = rsp->schq_contig_list[lvl][idx];
1676 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1677 NIX_TXSCHQ_CFG_DONE))
1678 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1679 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1680 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1683 for (idx = 0; idx < req->schq[lvl]; idx++) {
1684 schq = rsp->schq_list[lvl][idx];
1685 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1686 NIX_TXSCHQ_CFG_DONE))
1687 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1688 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1689 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1693 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1694 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1695 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1696 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1697 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1700 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1702 mutex_unlock(&rvu->rsrc_lock);
1706 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1707 int smq, u16 pcifunc, int nixlf)
1709 int pf = rvu_get_pf(pcifunc);
1710 u8 cgx_id = 0, lmac_id = 0;
1711 int err, restore_tx_en = 0;
1714 /* enable cgx tx if disabled */
1715 if (is_pf_cgxmapped(rvu, pf)) {
1716 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1717 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1721 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1722 /* Do SMQ flush and set enqueue xoff */
1723 cfg |= BIT_ULL(50) | BIT_ULL(49);
1724 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1726 /* Disable backpressure from physical link,
1727 * otherwise SMQ flush may stall.
1729 rvu_cgx_enadis_rx_bp(rvu, pf, false);
1731 /* Wait for flush to complete */
1732 err = rvu_poll_reg(rvu, blkaddr,
1733 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1736 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1738 rvu_cgx_enadis_rx_bp(rvu, pf, true);
1739 /* restore cgx tx state */
1741 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1744 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1746 int blkaddr, nixlf, lvl, schq, err;
1747 struct rvu_hwinfo *hw = rvu->hw;
1748 struct nix_txsch *txsch;
1749 struct nix_hw *nix_hw;
1751 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1753 return NIX_AF_ERR_AF_LF_INVALID;
1755 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1759 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1761 return NIX_AF_ERR_AF_LF_INVALID;
1763 /* Disable TL2/3 queue links before SMQ flush*/
1764 mutex_lock(&rvu->rsrc_lock);
1765 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1766 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1769 txsch = &nix_hw->txsch[lvl];
1770 for (schq = 0; schq < txsch->schq.max; schq++) {
1771 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1773 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1778 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1779 for (schq = 0; schq < txsch->schq.max; schq++) {
1780 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1782 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1785 /* Now free scheduler queues to free pool */
1786 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1787 /* TLs above aggregation level are shared across all PF
1788 * and it's VFs, hence skip freeing them.
1790 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1793 txsch = &nix_hw->txsch[lvl];
1794 for (schq = 0; schq < txsch->schq.max; schq++) {
1795 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1797 rvu_free_rsrc(&txsch->schq, schq);
1798 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1801 mutex_unlock(&rvu->rsrc_lock);
1803 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1804 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1805 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1807 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1812 static int nix_txschq_free_one(struct rvu *rvu,
1813 struct nix_txsch_free_req *req)
1815 struct rvu_hwinfo *hw = rvu->hw;
1816 u16 pcifunc = req->hdr.pcifunc;
1817 int lvl, schq, nixlf, blkaddr;
1818 struct nix_txsch *txsch;
1819 struct nix_hw *nix_hw;
1822 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1824 return NIX_AF_ERR_AF_LF_INVALID;
1826 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1830 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1832 return NIX_AF_ERR_AF_LF_INVALID;
1834 lvl = req->schq_lvl;
1836 txsch = &nix_hw->txsch[lvl];
1838 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1841 pfvf_map = txsch->pfvf_map;
1842 mutex_lock(&rvu->rsrc_lock);
1844 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1845 mutex_unlock(&rvu->rsrc_lock);
1849 /* Flush if it is a SMQ. Onus of disabling
1850 * TL2/3 queue links before SMQ flush is on user
1852 if (lvl == NIX_TXSCH_LVL_SMQ)
1853 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1855 /* Free the resource */
1856 rvu_free_rsrc(&txsch->schq, schq);
1857 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1858 mutex_unlock(&rvu->rsrc_lock);
1861 return NIX_AF_ERR_TLX_INVALID;
1864 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1865 struct nix_txsch_free_req *req,
1866 struct msg_rsp *rsp)
1868 if (req->flags & TXSCHQ_FREE_ALL)
1869 return nix_txschq_free(rvu, req->hdr.pcifunc);
1871 return nix_txschq_free_one(rvu, req);
1874 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1875 int lvl, u64 reg, u64 regval)
1877 u64 regbase = reg & 0xFFFF;
1880 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1883 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1884 /* Check if this schq belongs to this PF/VF or not */
1885 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1888 parent = (regval >> 16) & 0x1FF;
1889 /* Validate MDQ's TL4 parent */
1890 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1891 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1894 /* Validate TL4's TL3 parent */
1895 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1896 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1899 /* Validate TL3's TL2 parent */
1900 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1901 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1904 /* Validate TL2's TL1 parent */
1905 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1906 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1912 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1916 if (hw->cap.nix_shaping)
1919 /* If shaping and coloring is not supported, then
1920 * *_CIR and *_PIR registers should not be configured.
1922 regbase = reg & 0xFFFF;
1925 case NIX_TXSCH_LVL_TL1:
1926 if (regbase == NIX_AF_TL1X_CIR(0))
1929 case NIX_TXSCH_LVL_TL2:
1930 if (regbase == NIX_AF_TL2X_CIR(0) ||
1931 regbase == NIX_AF_TL2X_PIR(0))
1934 case NIX_TXSCH_LVL_TL3:
1935 if (regbase == NIX_AF_TL3X_CIR(0) ||
1936 regbase == NIX_AF_TL3X_PIR(0))
1939 case NIX_TXSCH_LVL_TL4:
1940 if (regbase == NIX_AF_TL4X_CIR(0) ||
1941 regbase == NIX_AF_TL4X_PIR(0))
1948 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1949 u16 pcifunc, int blkaddr)
1954 schq = nix_get_tx_link(rvu, pcifunc);
1955 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1956 /* Skip if PF has already done the config */
1957 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1959 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1960 (TXSCH_TL1_DFLT_RR_PRIO << 1));
1961 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1962 TXSCH_TL1_DFLT_RR_QTM);
1963 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1964 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1967 static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
1968 u16 pcifunc, struct nix_txsch *txsch)
1970 struct rvu_hwinfo *hw = rvu->hw;
1971 int lbk_link_start, lbk_links;
1972 u8 pf = rvu_get_pf(pcifunc);
1975 if (!is_pf_cgxmapped(rvu, pf))
1978 lbk_link_start = hw->cgx_links;
1980 for (schq = 0; schq < txsch->schq.max; schq++) {
1981 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1983 /* Enable all LBK links with channel 63 by default so that
1984 * packets can be sent to LBK with a NPC TX MCAM rule
1986 lbk_links = hw->lbk_links;
1988 rvu_write64(rvu, blkaddr,
1989 NIX_AF_TL3_TL2X_LINKX_CFG(schq,
1992 BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
1996 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1997 struct nix_txschq_config *req,
1998 struct msg_rsp *rsp)
2000 struct rvu_hwinfo *hw = rvu->hw;
2001 u16 pcifunc = req->hdr.pcifunc;
2002 u64 reg, regval, schq_regbase;
2003 struct nix_txsch *txsch;
2004 struct nix_hw *nix_hw;
2005 int blkaddr, idx, err;
2009 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2010 req->num_regs > MAX_REGS_PER_MBOX_MSG)
2011 return NIX_AF_INVAL_TXSCHQ_CFG;
2013 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2017 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2021 txsch = &nix_hw->txsch[req->lvl];
2022 pfvf_map = txsch->pfvf_map;
2024 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2025 pcifunc & RVU_PFVF_FUNC_MASK) {
2026 mutex_lock(&rvu->rsrc_lock);
2027 if (req->lvl == NIX_TXSCH_LVL_TL1)
2028 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2029 mutex_unlock(&rvu->rsrc_lock);
2033 for (idx = 0; idx < req->num_regs; idx++) {
2034 reg = req->reg[idx];
2035 regval = req->regval[idx];
2036 schq_regbase = reg & 0xFFFF;
2038 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2039 txsch->lvl, reg, regval))
2040 return NIX_AF_INVAL_TXSCHQ_CFG;
2042 /* Check if shaping and coloring is supported */
2043 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2046 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2047 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2048 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2050 regval &= ~(0x7FULL << 24);
2051 regval |= ((u64)nixlf << 24);
2054 /* Clear 'BP_ENA' config, if it's not allowed */
2055 if (!hw->cap.nix_tx_link_bp) {
2056 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2057 (schq_regbase & 0xFF00) ==
2058 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2059 regval &= ~BIT_ULL(13);
2062 /* Mark config as done for TL1 by PF */
2063 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2064 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2065 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2066 mutex_lock(&rvu->rsrc_lock);
2067 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2068 NIX_TXSCHQ_CFG_DONE);
2069 mutex_unlock(&rvu->rsrc_lock);
2072 /* SMQ flush is special hence split register writes such
2073 * that flush first and write rest of the bits later.
2075 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2076 (regval & BIT_ULL(49))) {
2077 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2078 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2079 regval &= ~BIT_ULL(49);
2081 rvu_write64(rvu, blkaddr, reg, regval);
2084 rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
2085 &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
2090 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2091 struct nix_vtag_config *req)
2093 u64 regval = req->vtag_size;
2095 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2096 req->vtag_size > VTAGSIZE_T8)
2099 /* RX VTAG Type 7 reserved for vf vlan */
2100 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2101 return NIX_AF_ERR_RX_VTAG_INUSE;
2103 if (req->rx.capture_vtag)
2104 regval |= BIT_ULL(5);
2105 if (req->rx.strip_vtag)
2106 regval |= BIT_ULL(4);
2108 rvu_write64(rvu, blkaddr,
2109 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2113 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2114 u16 pcifunc, int index)
2116 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2117 struct nix_txvlan *vlan = &nix_hw->txvlan;
2119 if (vlan->entry2pfvf_map[index] != pcifunc)
2120 return NIX_AF_ERR_PARAM;
2122 rvu_write64(rvu, blkaddr,
2123 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2124 rvu_write64(rvu, blkaddr,
2125 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2127 vlan->entry2pfvf_map[index] = 0;
2128 rvu_free_rsrc(&vlan->rsrc, index);
2133 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2135 struct nix_txvlan *vlan;
2136 struct nix_hw *nix_hw;
2139 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2143 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2144 vlan = &nix_hw->txvlan;
2146 mutex_lock(&vlan->rsrc_lock);
2147 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2148 for (index = 0; index < vlan->rsrc.max; index++) {
2149 if (vlan->entry2pfvf_map[index] == pcifunc)
2150 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2152 mutex_unlock(&vlan->rsrc_lock);
2155 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2158 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2159 struct nix_txvlan *vlan = &nix_hw->txvlan;
2163 mutex_lock(&vlan->rsrc_lock);
2165 index = rvu_alloc_rsrc(&vlan->rsrc);
2167 mutex_unlock(&vlan->rsrc_lock);
2171 mutex_unlock(&vlan->rsrc_lock);
2173 regval = size ? vtag : vtag << 32;
2175 rvu_write64(rvu, blkaddr,
2176 NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2177 rvu_write64(rvu, blkaddr,
2178 NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2183 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2184 struct nix_vtag_config *req)
2186 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2187 struct nix_txvlan *vlan = &nix_hw->txvlan;
2188 u16 pcifunc = req->hdr.pcifunc;
2189 int idx0 = req->tx.vtag0_idx;
2190 int idx1 = req->tx.vtag1_idx;
2193 if (req->tx.free_vtag0 && req->tx.free_vtag1)
2194 if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2195 vlan->entry2pfvf_map[idx1] != pcifunc)
2196 return NIX_AF_ERR_PARAM;
2198 mutex_lock(&vlan->rsrc_lock);
2200 if (req->tx.free_vtag0) {
2201 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2206 if (req->tx.free_vtag1)
2207 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2210 mutex_unlock(&vlan->rsrc_lock);
2214 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2215 struct nix_vtag_config *req,
2216 struct nix_vtag_config_rsp *rsp)
2218 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2219 struct nix_txvlan *vlan = &nix_hw->txvlan;
2220 u16 pcifunc = req->hdr.pcifunc;
2222 if (req->tx.cfg_vtag0) {
2224 nix_tx_vtag_alloc(rvu, blkaddr,
2225 req->tx.vtag0, req->vtag_size);
2227 if (rsp->vtag0_idx < 0)
2228 return NIX_AF_ERR_TX_VTAG_NOSPC;
2230 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2233 if (req->tx.cfg_vtag1) {
2235 nix_tx_vtag_alloc(rvu, blkaddr,
2236 req->tx.vtag1, req->vtag_size);
2238 if (rsp->vtag1_idx < 0)
2241 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2247 if (req->tx.cfg_vtag0)
2248 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2250 return NIX_AF_ERR_TX_VTAG_NOSPC;
2253 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2254 struct nix_vtag_config *req,
2255 struct nix_vtag_config_rsp *rsp)
2257 u16 pcifunc = req->hdr.pcifunc;
2258 int blkaddr, nixlf, err;
2260 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2264 if (req->cfg_type) {
2265 /* rx vtag configuration */
2266 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2268 return NIX_AF_ERR_PARAM;
2270 /* tx vtag configuration */
2271 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2272 (req->tx.free_vtag0 || req->tx.free_vtag1))
2273 return NIX_AF_ERR_PARAM;
2275 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2276 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2278 if (req->tx.free_vtag0 || req->tx.free_vtag1)
2279 return nix_tx_vtag_decfg(rvu, blkaddr, req);
2285 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2286 int mce, u8 op, u16 pcifunc, int next, bool eol)
2288 struct nix_aq_enq_req aq_req;
2291 aq_req.hdr.pcifunc = 0;
2292 aq_req.ctype = NIX_AQ_CTYPE_MCE;
2296 /* Use RSS with RSS index 0 */
2298 aq_req.mce.index = 0;
2299 aq_req.mce.eol = eol;
2300 aq_req.mce.pf_func = pcifunc;
2301 aq_req.mce.next = next;
2303 /* All fields valid */
2304 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
2306 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2308 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2309 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2315 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2316 u16 pcifunc, bool add)
2318 struct mce *mce, *tail = NULL;
2319 bool delete = false;
2321 /* Scan through the current list */
2322 hlist_for_each_entry(mce, &mce_list->head, node) {
2323 /* If already exists, then delete */
2324 if (mce->pcifunc == pcifunc && !add) {
2327 } else if (mce->pcifunc == pcifunc && add) {
2328 /* entry already exists */
2335 hlist_del(&mce->node);
2344 /* Add a new one to the list, at the tail */
2345 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2348 mce->pcifunc = pcifunc;
2350 hlist_add_head(&mce->node, &mce_list->head);
2352 hlist_add_behind(&mce->node, &tail->node);
2357 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
2358 struct nix_mce_list *mce_list,
2359 int mce_idx, int mcam_index, bool add)
2361 int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
2362 struct npc_mcam *mcam = &rvu->hw->mcam;
2363 struct nix_mcast *mcast;
2364 struct nix_hw *nix_hw;
2370 /* Get this PF/VF func's MCE index */
2371 idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2373 if (idx > (mce_idx + mce_list->max)) {
2375 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2376 __func__, idx, mce_list->max,
2377 pcifunc >> RVU_PFVF_PF_SHIFT);
2381 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
2385 mcast = &nix_hw->mcast;
2386 mutex_lock(&mcast->mce_lock);
2388 err = nix_update_mce_list_entry(mce_list, pcifunc, add);
2392 /* Disable MCAM entry in NPC */
2393 if (!mce_list->count) {
2394 npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2395 npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
2399 /* Dump the updated list to HW */
2401 last_idx = idx + mce_list->count - 1;
2402 hlist_for_each_entry(mce, &mce_list->head, node) {
2407 /* EOL should be set in last MCE */
2408 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2409 mce->pcifunc, next_idx,
2410 (next_idx > last_idx) ? true : false);
2417 mutex_unlock(&mcast->mce_lock);
2421 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
2422 struct nix_mce_list **mce_list, int *mce_idx)
2424 struct rvu_hwinfo *hw = rvu->hw;
2425 struct rvu_pfvf *pfvf;
2427 if (!hw->cap.nix_rx_multicast ||
2428 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
2434 /* Get this PF/VF func's MCE index */
2435 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2437 if (type == NIXLF_BCAST_ENTRY) {
2438 *mce_list = &pfvf->bcast_mce_list;
2439 *mce_idx = pfvf->bcast_mce_idx;
2440 } else if (type == NIXLF_ALLMULTI_ENTRY) {
2441 *mce_list = &pfvf->mcast_mce_list;
2442 *mce_idx = pfvf->mcast_mce_idx;
2443 } else if (type == NIXLF_PROMISC_ENTRY) {
2444 *mce_list = &pfvf->promisc_mce_list;
2445 *mce_idx = pfvf->promisc_mce_idx;
2452 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
2455 int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
2456 struct npc_mcam *mcam = &rvu->hw->mcam;
2457 struct rvu_hwinfo *hw = rvu->hw;
2458 struct nix_mce_list *mce_list;
2460 /* skip multicast pkt replication for AF's VFs */
2461 if (is_afvf(pcifunc))
2464 if (!hw->cap.nix_rx_multicast)
2467 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2471 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2475 nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
2477 mcam_index = npc_get_nixlf_mcam_index(mcam,
2478 pcifunc & ~RVU_PFVF_FUNC_MASK,
2480 err = nix_update_mce_list(rvu, pcifunc, mce_list,
2481 mce_idx, mcam_index, add);
2485 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2487 struct nix_mcast *mcast = &nix_hw->mcast;
2488 int err, pf, numvfs, idx;
2489 struct rvu_pfvf *pfvf;
2493 /* Skip PF0 (i.e AF) */
2494 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2495 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2496 /* If PF is not enabled, nothing to do */
2497 if (!((cfg >> 20) & 0x01))
2499 /* Get numVFs attached to this PF */
2500 numvfs = (cfg >> 12) & 0xFF;
2502 pfvf = &rvu->pf[pf];
2504 /* This NIX0/1 block mapped to PF ? */
2505 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2508 /* save start idx of broadcast mce list */
2509 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2510 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2512 /* save start idx of multicast mce list */
2513 pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2514 nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
2516 /* save the start idx of promisc mce list */
2517 pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2518 nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
2520 for (idx = 0; idx < (numvfs + 1); idx++) {
2521 /* idx-0 is for PF, followed by VFs */
2522 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2524 /* Add dummy entries now, so that we don't have to check
2525 * for whether AQ_OP should be INIT/WRITE later on.
2526 * Will be updated when a NIXLF is attached/detached to
2529 err = nix_blk_setup_mce(rvu, nix_hw,
2530 pfvf->bcast_mce_idx + idx,
2536 /* add dummy entries to multicast mce list */
2537 err = nix_blk_setup_mce(rvu, nix_hw,
2538 pfvf->mcast_mce_idx + idx,
2544 /* add dummy entries to promisc mce list */
2545 err = nix_blk_setup_mce(rvu, nix_hw,
2546 pfvf->promisc_mce_idx + idx,
2556 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2558 struct nix_mcast *mcast = &nix_hw->mcast;
2559 struct rvu_hwinfo *hw = rvu->hw;
2562 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2563 size = (1ULL << size);
2565 /* Alloc memory for multicast/mirror replication entries */
2566 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2567 (256UL << MC_TBL_SIZE), size);
2571 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2572 (u64)mcast->mce_ctx->iova);
2574 /* Set max list length equal to max no of VFs per PF + PF itself */
2575 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2576 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2578 /* Alloc memory for multicast replication buffers */
2579 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2580 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2581 (8UL << MC_BUF_CNT), size);
2585 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2586 (u64)mcast->mcast_buf->iova);
2588 /* Alloc pkind for NIX internal RX multicast/mirror replay */
2589 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2591 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2592 BIT_ULL(63) | (mcast->replay_pkind << 24) |
2593 BIT_ULL(20) | MC_BUF_CNT);
2595 mutex_init(&mcast->mce_lock);
2597 return nix_setup_mce_tables(rvu, nix_hw);
2600 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2602 struct nix_txvlan *vlan = &nix_hw->txvlan;
2605 /* Allocate resource bimap for tx vtag def registers*/
2606 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2607 err = rvu_alloc_bitmap(&vlan->rsrc);
2611 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2612 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
2613 sizeof(u16), GFP_KERNEL);
2614 if (!vlan->entry2pfvf_map)
2617 mutex_init(&vlan->rsrc_lock);
2621 kfree(vlan->rsrc.bmap);
2625 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2627 struct nix_txsch *txsch;
2631 /* Get scheduler queue count of each type and alloc
2632 * bitmap for each for alloc/free/attach operations.
2634 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2635 txsch = &nix_hw->txsch[lvl];
2638 case NIX_TXSCH_LVL_SMQ:
2639 reg = NIX_AF_MDQ_CONST;
2641 case NIX_TXSCH_LVL_TL4:
2642 reg = NIX_AF_TL4_CONST;
2644 case NIX_TXSCH_LVL_TL3:
2645 reg = NIX_AF_TL3_CONST;
2647 case NIX_TXSCH_LVL_TL2:
2648 reg = NIX_AF_TL2_CONST;
2650 case NIX_TXSCH_LVL_TL1:
2651 reg = NIX_AF_TL1_CONST;
2654 cfg = rvu_read64(rvu, blkaddr, reg);
2655 txsch->schq.max = cfg & 0xFFFF;
2656 err = rvu_alloc_bitmap(&txsch->schq);
2660 /* Allocate memory for scheduler queues to
2661 * PF/VF pcifunc mapping info.
2663 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2664 sizeof(u32), GFP_KERNEL);
2665 if (!txsch->pfvf_map)
2667 for (schq = 0; schq < txsch->schq.max; schq++)
2668 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2673 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2674 int blkaddr, u32 cfg)
2678 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2679 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2682 if (fmt_idx >= nix_hw->mark_format.total)
2685 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2686 nix_hw->mark_format.cfg[fmt_idx] = cfg;
2687 nix_hw->mark_format.in_use++;
2691 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2695 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
2696 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
2697 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
2698 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
2699 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
2700 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
2701 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
2702 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
2703 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2708 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2709 nix_hw->mark_format.total = (u8)total;
2710 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2712 if (!nix_hw->mark_format.cfg)
2714 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2715 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2717 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2724 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2726 /* CN10K supports LBK FIFO size 72 KB */
2727 if (rvu->hw->lbk_bufsize == 0x12000)
2728 *max_mtu = CN10K_LBK_LINK_MAX_FRS;
2730 *max_mtu = NIC_HW_MAX_FRS;
2733 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2735 /* RPM supports FIFO len 128 KB */
2736 if (rvu_cgx_get_fifolen(rvu) == 0x20000)
2737 *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
2739 *max_mtu = NIC_HW_MAX_FRS;
2742 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
2743 struct nix_hw_info *rsp)
2745 u16 pcifunc = req->hdr.pcifunc;
2748 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2750 return NIX_AF_ERR_AF_LF_INVALID;
2752 if (is_afvf(pcifunc))
2753 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
2755 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
2757 rsp->min_mtu = NIC_HW_MIN_FRS;
2761 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2762 struct msg_rsp *rsp)
2764 u16 pcifunc = req->hdr.pcifunc;
2765 int i, nixlf, blkaddr, err;
2768 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2772 /* Get stats count supported by HW */
2773 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2775 /* Reset tx stats */
2776 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2777 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2779 /* Reset rx stats */
2780 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2781 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2786 /* Returns the ALG index to be set into NPC_RX_ACTION */
2787 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2791 /* Scan over exiting algo entries to find a match */
2792 for (i = 0; i < nix_hw->flowkey.in_use; i++)
2793 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2799 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2801 int idx, nr_field, key_off, field_marker, keyoff_marker;
2802 int max_key_off, max_bit_pos, group_member;
2803 struct nix_rx_flowkey_alg *field;
2804 struct nix_rx_flowkey_alg tmp;
2805 u32 key_type, valid_key;
2806 int l4_key_offset = 0;
2811 #define FIELDS_PER_ALG 5
2812 #define MAX_KEY_OFF 40
2813 /* Clear all fields */
2814 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2816 /* Each of the 32 possible flow key algorithm definitions should
2817 * fall into above incremental config (except ALG0). Otherwise a
2818 * single NPC MCAM entry is not sufficient for supporting RSS.
2820 * If a different definition or combination needed then NPC MCAM
2821 * has to be programmed to filter such pkts and it's action should
2822 * point to this definition to calculate flowtag or hash.
2824 * The `for loop` goes over _all_ protocol field and the following
2825 * variables depicts the state machine forward progress logic.
2827 * keyoff_marker - Enabled when hash byte length needs to be accounted
2828 * in field->key_offset update.
2829 * field_marker - Enabled when a new field needs to be selected.
2830 * group_member - Enabled when protocol is part of a group.
2833 keyoff_marker = 0; max_key_off = 0; group_member = 0;
2834 nr_field = 0; key_off = 0; field_marker = 1;
2835 field = &tmp; max_bit_pos = fls(flow_cfg);
2837 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2838 key_off < MAX_KEY_OFF; idx++) {
2839 key_type = BIT(idx);
2840 valid_key = flow_cfg & key_type;
2841 /* Found a field marker, reset the field values */
2843 memset(&tmp, 0, sizeof(tmp));
2845 field_marker = true;
2846 keyoff_marker = true;
2848 case NIX_FLOW_KEY_TYPE_PORT:
2849 field->sel_chan = true;
2850 /* This should be set to 1, when SEL_CHAN is set */
2853 case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
2854 field->lid = NPC_LID_LC;
2855 field->hdr_offset = 9; /* offset */
2856 field->bytesm1 = 0; /* 1 byte */
2857 field->ltype_match = NPC_LT_LC_IP;
2858 field->ltype_mask = 0xF;
2860 case NIX_FLOW_KEY_TYPE_IPV4:
2861 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2862 field->lid = NPC_LID_LC;
2863 field->ltype_match = NPC_LT_LC_IP;
2864 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2865 field->lid = NPC_LID_LG;
2866 field->ltype_match = NPC_LT_LG_TU_IP;
2868 field->hdr_offset = 12; /* SIP offset */
2869 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2870 field->ltype_mask = 0xF; /* Match only IPv4 */
2871 keyoff_marker = false;
2873 case NIX_FLOW_KEY_TYPE_IPV6:
2874 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2875 field->lid = NPC_LID_LC;
2876 field->ltype_match = NPC_LT_LC_IP6;
2877 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2878 field->lid = NPC_LID_LG;
2879 field->ltype_match = NPC_LT_LG_TU_IP6;
2881 field->hdr_offset = 8; /* SIP offset */
2882 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2883 field->ltype_mask = 0xF; /* Match only IPv6 */
2885 case NIX_FLOW_KEY_TYPE_TCP:
2886 case NIX_FLOW_KEY_TYPE_UDP:
2887 case NIX_FLOW_KEY_TYPE_SCTP:
2888 case NIX_FLOW_KEY_TYPE_INNR_TCP:
2889 case NIX_FLOW_KEY_TYPE_INNR_UDP:
2890 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2891 field->lid = NPC_LID_LD;
2892 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2893 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2894 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2895 field->lid = NPC_LID_LH;
2896 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2898 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2899 * so no need to change the ltype_match, just change
2900 * the lid for inner protocols
2902 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2903 (int)NPC_LT_LH_TU_TCP);
2904 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2905 (int)NPC_LT_LH_TU_UDP);
2906 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2907 (int)NPC_LT_LH_TU_SCTP);
2909 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2910 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2912 field->ltype_match |= NPC_LT_LD_TCP;
2913 group_member = true;
2914 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2915 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2917 field->ltype_match |= NPC_LT_LD_UDP;
2918 group_member = true;
2919 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2920 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2922 field->ltype_match |= NPC_LT_LD_SCTP;
2923 group_member = true;
2925 field->ltype_mask = ~field->ltype_match;
2926 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2927 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2928 /* Handle the case where any of the group item
2929 * is enabled in the group but not the final one
2933 group_member = false;
2936 field_marker = false;
2937 keyoff_marker = false;
2940 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
2941 * remember the TCP key offset of 40 byte hash key.
2943 if (key_type == NIX_FLOW_KEY_TYPE_TCP)
2944 l4_key_offset = key_off;
2946 case NIX_FLOW_KEY_TYPE_NVGRE:
2947 field->lid = NPC_LID_LD;
2948 field->hdr_offset = 4; /* VSID offset */
2950 field->ltype_match = NPC_LT_LD_NVGRE;
2951 field->ltype_mask = 0xF;
2953 case NIX_FLOW_KEY_TYPE_VXLAN:
2954 case NIX_FLOW_KEY_TYPE_GENEVE:
2955 field->lid = NPC_LID_LE;
2957 field->hdr_offset = 4;
2958 field->ltype_mask = 0xF;
2959 field_marker = false;
2960 keyoff_marker = false;
2962 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2963 field->ltype_match |= NPC_LT_LE_VXLAN;
2964 group_member = true;
2967 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2968 field->ltype_match |= NPC_LT_LE_GENEVE;
2969 group_member = true;
2972 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2974 field->ltype_mask = ~field->ltype_match;
2975 field_marker = true;
2976 keyoff_marker = true;
2978 group_member = false;
2982 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2983 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2984 field->lid = NPC_LID_LA;
2985 field->ltype_match = NPC_LT_LA_ETHER;
2986 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2987 field->lid = NPC_LID_LF;
2988 field->ltype_match = NPC_LT_LF_TU_ETHER;
2990 field->hdr_offset = 0;
2991 field->bytesm1 = 5; /* DMAC 6 Byte */
2992 field->ltype_mask = 0xF;
2994 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2995 field->lid = NPC_LID_LC;
2996 field->hdr_offset = 40; /* IPV6 hdr */
2997 field->bytesm1 = 0; /* 1 Byte ext hdr*/
2998 field->ltype_match = NPC_LT_LC_IP6_EXT;
2999 field->ltype_mask = 0xF;
3001 case NIX_FLOW_KEY_TYPE_GTPU:
3002 field->lid = NPC_LID_LE;
3003 field->hdr_offset = 4;
3004 field->bytesm1 = 3; /* 4 bytes TID*/
3005 field->ltype_match = NPC_LT_LE_GTPU;
3006 field->ltype_mask = 0xF;
3008 case NIX_FLOW_KEY_TYPE_VLAN:
3009 field->lid = NPC_LID_LB;
3010 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
3011 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
3012 field->ltype_match = NPC_LT_LB_CTAG;
3013 field->ltype_mask = 0xF;
3014 field->fn_mask = 1; /* Mask out the first nibble */
3016 case NIX_FLOW_KEY_TYPE_AH:
3017 case NIX_FLOW_KEY_TYPE_ESP:
3018 field->hdr_offset = 0;
3019 field->bytesm1 = 7; /* SPI + sequence number */
3020 field->ltype_mask = 0xF;
3021 field->lid = NPC_LID_LE;
3022 field->ltype_match = NPC_LT_LE_ESP;
3023 if (key_type == NIX_FLOW_KEY_TYPE_AH) {
3024 field->lid = NPC_LID_LD;
3025 field->ltype_match = NPC_LT_LD_AH;
3026 field->hdr_offset = 4;
3027 keyoff_marker = false;
3033 /* Found a valid flow key type */
3035 /* Use the key offset of TCP/UDP/SCTP fields
3036 * for ESP/AH fields.
3038 if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
3039 key_type == NIX_FLOW_KEY_TYPE_AH)
3040 key_off = l4_key_offset;
3041 field->key_offset = key_off;
3042 memcpy(&alg[nr_field], field, sizeof(*field));
3043 max_key_off = max(max_key_off, field->bytesm1 + 1);
3045 /* Found a field marker, get the next field */
3050 /* Found a keyoff marker, update the new key_off */
3051 if (keyoff_marker) {
3052 key_off += max_key_off;
3056 /* Processed all the flow key types */
3057 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3060 return NIX_AF_ERR_RSS_NOSPC_FIELD;
3063 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3065 u64 field[FIELDS_PER_ALG];
3069 hw = get_nix_hw(rvu->hw, blkaddr);
3073 /* No room to add new flow hash algoritham */
3074 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3075 return NIX_AF_ERR_RSS_NOSPC_ALGO;
3077 /* Generate algo fields for the given flow_cfg */
3078 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3082 /* Update ALGX_FIELDX register with generated fields */
3083 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3084 rvu_write64(rvu, blkaddr,
3085 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3088 /* Store the flow_cfg for futher lookup */
3089 rc = hw->flowkey.in_use;
3090 hw->flowkey.flowkey[rc] = flow_cfg;
3091 hw->flowkey.in_use++;
3096 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3097 struct nix_rss_flowkey_cfg *req,
3098 struct nix_rss_flowkey_cfg_rsp *rsp)
3100 u16 pcifunc = req->hdr.pcifunc;
3101 int alg_idx, nixlf, blkaddr;
3102 struct nix_hw *nix_hw;
3105 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3109 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3113 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3114 /* Failed to get algo index from the exiting list, reserve new */
3116 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3121 rsp->alg_idx = alg_idx;
3122 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3123 alg_idx, req->mcam_index);
3127 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3129 u32 flowkey_cfg, minkey_cfg;
3132 /* Disable all flow key algx fieldx */
3133 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3134 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3135 rvu_write64(rvu, blkaddr,
3136 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3140 /* IPv4/IPv6 SIP/DIPs */
3141 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3142 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3146 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3147 minkey_cfg = flowkey_cfg;
3148 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3149 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3153 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3154 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3155 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3159 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3160 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3161 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3165 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3166 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3167 NIX_FLOW_KEY_TYPE_UDP;
3168 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3172 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3173 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3174 NIX_FLOW_KEY_TYPE_SCTP;
3175 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3179 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3180 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3181 NIX_FLOW_KEY_TYPE_SCTP;
3182 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3186 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3187 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3188 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3189 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3196 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3197 struct nix_set_mac_addr *req,
3198 struct msg_rsp *rsp)
3200 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3201 u16 pcifunc = req->hdr.pcifunc;
3202 int blkaddr, nixlf, err;
3203 struct rvu_pfvf *pfvf;
3205 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3209 pfvf = rvu_get_pfvf(rvu, pcifunc);
3211 /* untrusted VF can't overwrite admin(PF) changes */
3212 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3213 (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3215 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3219 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3221 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3222 pfvf->rx_chan_base, req->mac_addr);
3224 if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3225 ether_addr_copy(pfvf->default_mac, req->mac_addr);
3227 rvu_switch_update_rules(rvu, pcifunc);
3232 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3233 struct msg_req *req,
3234 struct nix_get_mac_addr_rsp *rsp)
3236 u16 pcifunc = req->hdr.pcifunc;
3237 struct rvu_pfvf *pfvf;
3239 if (!is_nixlf_attached(rvu, pcifunc))
3240 return NIX_AF_ERR_AF_LF_INVALID;
3242 pfvf = rvu_get_pfvf(rvu, pcifunc);
3244 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3249 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3250 struct msg_rsp *rsp)
3252 bool allmulti, promisc, nix_rx_multicast;
3253 u16 pcifunc = req->hdr.pcifunc;
3254 struct rvu_pfvf *pfvf;
3257 pfvf = rvu_get_pfvf(rvu, pcifunc);
3258 promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
3259 allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
3260 pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
3262 nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
3264 if (is_vf(pcifunc) && !nix_rx_multicast &&
3265 (promisc || allmulti)) {
3266 dev_warn_ratelimited(rvu->dev,
3267 "VF promisc/multicast not supported\n");
3271 /* untrusted VF can't configure promisc/allmulti */
3272 if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3273 (promisc || allmulti))
3276 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3280 if (nix_rx_multicast) {
3281 /* add/del this PF_FUNC to/from mcast pkt replication list */
3282 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
3286 "Failed to update pcifunc 0x%x to multicast list\n",
3291 /* add/del this PF_FUNC to/from promisc pkt replication list */
3292 err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
3296 "Failed to update pcifunc 0x%x to promisc list\n",
3302 /* install/uninstall allmulti entry */
3304 rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
3305 pfvf->rx_chan_base);
3307 if (!nix_rx_multicast)
3308 rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
3311 /* install/uninstall promisc entry */
3313 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3317 if (!nix_rx_multicast)
3318 rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
3324 static void nix_find_link_frs(struct rvu *rvu,
3325 struct nix_frs_cfg *req, u16 pcifunc)
3327 int pf = rvu_get_pf(pcifunc);
3328 struct rvu_pfvf *pfvf;
3333 /* Update with requester's min/max lengths */
3334 pfvf = rvu_get_pfvf(rvu, pcifunc);
3335 pfvf->maxlen = req->maxlen;
3336 if (req->update_minlen)
3337 pfvf->minlen = req->minlen;
3339 maxlen = req->maxlen;
3340 minlen = req->update_minlen ? req->minlen : 0;
3342 /* Get this PF's numVFs and starting hwvf */
3343 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3345 /* For each VF, compare requested max/minlen */
3346 for (vf = 0; vf < numvfs; vf++) {
3347 pfvf = &rvu->hwvf[hwvf + vf];
3348 if (pfvf->maxlen > maxlen)
3349 maxlen = pfvf->maxlen;
3350 if (req->update_minlen &&
3351 pfvf->minlen && pfvf->minlen < minlen)
3352 minlen = pfvf->minlen;
3355 /* Compare requested max/minlen with PF's max/minlen */
3356 pfvf = &rvu->pf[pf];
3357 if (pfvf->maxlen > maxlen)
3358 maxlen = pfvf->maxlen;
3359 if (req->update_minlen &&
3360 pfvf->minlen && pfvf->minlen < minlen)
3361 minlen = pfvf->minlen;
3363 /* Update the request with max/min PF's and it's VF's max/min */
3364 req->maxlen = maxlen;
3365 if (req->update_minlen)
3366 req->minlen = minlen;
3369 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3370 struct msg_rsp *rsp)
3372 struct rvu_hwinfo *hw = rvu->hw;
3373 u16 pcifunc = req->hdr.pcifunc;
3374 int pf = rvu_get_pf(pcifunc);
3375 int blkaddr, schq, link = -1;
3376 struct nix_txsch *txsch;
3377 u64 cfg, lmac_fifo_len;
3378 struct nix_hw *nix_hw;
3379 u8 cgx = 0, lmac = 0;
3382 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3384 return NIX_AF_ERR_AF_LF_INVALID;
3386 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3390 if (is_afvf(pcifunc))
3391 rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3393 rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3395 if (!req->sdp_link && req->maxlen > max_mtu)
3396 return NIX_AF_ERR_FRS_INVALID;
3398 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3399 return NIX_AF_ERR_FRS_INVALID;
3401 /* Check if requester wants to update SMQ's */
3402 if (!req->update_smq)
3405 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
3406 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3407 mutex_lock(&rvu->rsrc_lock);
3408 for (schq = 0; schq < txsch->schq.max; schq++) {
3409 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3411 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3412 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3413 if (req->update_minlen)
3414 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3415 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3417 mutex_unlock(&rvu->rsrc_lock);
3420 /* Check if config is for SDP link */
3421 if (req->sdp_link) {
3423 return NIX_AF_ERR_RX_LINK_INVALID;
3424 link = hw->cgx_links + hw->lbk_links;
3428 /* Check if the request is from CGX mapped RVU PF */
3429 if (is_pf_cgxmapped(rvu, pf)) {
3430 /* Get CGX and LMAC to which this PF is mapped and find link */
3431 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3432 link = (cgx * hw->lmac_per_cgx) + lmac;
3433 } else if (pf == 0) {
3434 /* For VFs of PF0 ingress is LBK port, so config LBK link */
3435 link = hw->cgx_links;
3439 return NIX_AF_ERR_RX_LINK_INVALID;
3441 nix_find_link_frs(rvu, req, pcifunc);
3444 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3445 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3446 if (req->update_minlen)
3447 cfg = (cfg & ~0xFFFFULL) | req->minlen;
3448 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3450 if (req->sdp_link || pf == 0)
3453 /* Update transmit credits for CGX links */
3455 rvu_cgx_get_fifolen(rvu) /
3456 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3457 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
3458 cfg &= ~(0xFFFFFULL << 12);
3459 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
3460 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3464 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3465 struct msg_rsp *rsp)
3467 int nixlf, blkaddr, err;
3470 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3474 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3475 /* Set the interface configuration */
3476 if (req->len_verify & BIT(0))
3479 cfg &= ~BIT_ULL(41);
3481 if (req->len_verify & BIT(1))
3484 cfg &= ~BIT_ULL(40);
3486 if (req->csum_verify & BIT(0))
3489 cfg &= ~BIT_ULL(37);
3491 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3496 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3498 /* CN10k supports 72KB FIFO size and max packet size of 64k */
3499 if (rvu->hw->lbk_bufsize == 0x12000)
3500 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3502 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3505 static void nix_link_config(struct rvu *rvu, int blkaddr)
3507 struct rvu_hwinfo *hw = rvu->hw;
3508 int cgx, lmac_cnt, slink, link;
3509 u16 lbk_max_frs, lmac_max_frs;
3512 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3513 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3515 /* Set default min/max packet lengths allowed on NIX Rx links.
3517 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3518 * as undersize and report them to SW as error pkts, hence
3519 * setting it to 40 bytes.
3521 for (link = 0; link < hw->cgx_links; link++) {
3522 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3523 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
3526 for (link = hw->cgx_links; link < hw->lbk_links; link++) {
3527 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3528 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
3530 if (hw->sdp_links) {
3531 link = hw->cgx_links + hw->lbk_links;
3532 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3533 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3536 /* Set credits for Tx links assuming max packet length allowed.
3537 * This will be reconfigured based on MTU set for PF/VF.
3539 for (cgx = 0; cgx < hw->cgx; cgx++) {
3540 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3541 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
3543 /* Enable credits and set credit pkt count to max allowed */
3544 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3545 slink = cgx * hw->lmac_per_cgx;
3546 for (link = slink; link < (slink + lmac_cnt); link++) {
3547 rvu_write64(rvu, blkaddr,
3548 NIX_AF_TX_LINKX_NORM_CREDIT(link),
3553 /* Set Tx credits for LBK link */
3554 slink = hw->cgx_links;
3555 for (link = slink; link < (slink + hw->lbk_links); link++) {
3556 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
3557 /* Enable credits and set credit pkt count to max allowed */
3558 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3559 rvu_write64(rvu, blkaddr,
3560 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3564 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3569 /* Start X2P bus calibration */
3570 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3571 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3572 /* Wait for calibration to complete */
3573 err = rvu_poll_reg(rvu, blkaddr,
3574 NIX_AF_STATUS, BIT_ULL(10), false);
3576 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3580 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3581 /* Check if CGX devices are ready */
3582 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3583 /* Skip when cgx port is not available */
3584 if (!rvu_cgx_pdata(idx, rvu) ||
3585 (status & (BIT_ULL(16 + idx))))
3588 "CGX%d didn't respond to NIX X2P calibration\n", idx);
3592 /* Check if LBK is ready */
3593 if (!(status & BIT_ULL(19))) {
3595 "LBK didn't respond to NIX X2P calibration\n");
3599 /* Clear 'calibrate_x2p' bit */
3600 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3601 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3602 if (err || (status & 0x3FFULL))
3604 "NIX X2P calibration failed, status 0x%llx\n", status);
3610 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3615 /* Set admin queue endianness */
3616 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3619 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3622 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3625 /* Do not bypass NDC cache */
3626 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3628 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3629 /* Disable caching of SQB aka SQEs */
3632 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3634 /* Result structure can be followed by RQ/SQ/CQ context at
3635 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3636 * operation type. Alloc sufficient result memory for all operations.
3638 err = rvu_aq_alloc(rvu, &block->aq,
3639 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3640 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3644 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3645 rvu_write64(rvu, block->addr,
3646 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3650 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3652 const struct npc_lt_def_cfg *ltdefs;
3653 struct rvu_hwinfo *hw = rvu->hw;
3654 int blkaddr = nix_hw->blkaddr;
3655 struct rvu_block *block;
3659 block = &hw->block[blkaddr];
3661 if (is_rvu_96xx_B0(rvu)) {
3662 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3663 * internal state when conditional clocks are turned off.
3664 * Hence enable them.
3666 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3667 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3669 /* Set chan/link to backpressure TL3 instead of TL2 */
3670 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3672 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
3673 * This sticky mode is known to cause SQ stalls when multiple
3674 * SQs are mapped to same SMQ and transmitting pkts at a time.
3676 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3677 cfg &= ~BIT_ULL(15);
3678 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3681 ltdefs = rvu->kpu.lt_def;
3682 /* Calibrate X2P bus to check if CGX/LBK links are fine */
3683 err = nix_calibrate_x2p(rvu, blkaddr);
3687 /* Initialize admin queue */
3688 err = nix_aq_init(rvu, block);
3692 /* Restore CINT timer delay to HW reset values */
3693 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3695 if (is_block_implemented(hw, blkaddr)) {
3696 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3700 err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
3704 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3708 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3712 err = nix_setup_txvlan(rvu, nix_hw);
3716 /* Configure segmentation offload formats */
3717 nix_setup_lso(rvu, nix_hw, blkaddr);
3719 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3720 * This helps HW protocol checker to identify headers
3721 * and validate length and checksums.
3723 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3724 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3725 ltdefs->rx_ol2.ltype_mask);
3726 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3727 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3728 ltdefs->rx_oip4.ltype_mask);
3729 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3730 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3731 ltdefs->rx_iip4.ltype_mask);
3732 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3733 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3734 ltdefs->rx_oip6.ltype_mask);
3735 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3736 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3737 ltdefs->rx_iip6.ltype_mask);
3738 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3739 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3740 ltdefs->rx_otcp.ltype_mask);
3741 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3742 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3743 ltdefs->rx_itcp.ltype_mask);
3744 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3745 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3746 ltdefs->rx_oudp.ltype_mask);
3747 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3748 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3749 ltdefs->rx_iudp.ltype_mask);
3750 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3751 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3752 ltdefs->rx_osctp.ltype_mask);
3753 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3754 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3755 ltdefs->rx_isctp.ltype_mask);
3757 if (!is_rvu_otx2(rvu)) {
3758 /* Enable APAD calculation for other protocols
3759 * matching APAD0 and APAD1 lt def registers.
3761 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
3762 (ltdefs->rx_apad0.valid << 11) |
3763 (ltdefs->rx_apad0.lid << 8) |
3764 (ltdefs->rx_apad0.ltype_match << 4) |
3765 ltdefs->rx_apad0.ltype_mask);
3766 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
3767 (ltdefs->rx_apad1.valid << 11) |
3768 (ltdefs->rx_apad1.lid << 8) |
3769 (ltdefs->rx_apad1.ltype_match << 4) |
3770 ltdefs->rx_apad1.ltype_mask);
3772 /* Receive ethertype defination register defines layer
3773 * information in NPC_RESULT_S to identify the Ethertype
3774 * location in L2 header. Used for Ethertype overwriting
3775 * in inline IPsec flow.
3777 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
3778 (ltdefs->rx_et[0].offset << 12) |
3779 (ltdefs->rx_et[0].valid << 11) |
3780 (ltdefs->rx_et[0].lid << 8) |
3781 (ltdefs->rx_et[0].ltype_match << 4) |
3782 ltdefs->rx_et[0].ltype_mask);
3783 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
3784 (ltdefs->rx_et[1].offset << 12) |
3785 (ltdefs->rx_et[1].valid << 11) |
3786 (ltdefs->rx_et[1].lid << 8) |
3787 (ltdefs->rx_et[1].ltype_match << 4) |
3788 ltdefs->rx_et[1].ltype_mask);
3791 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3795 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3796 nix_link_config(rvu, blkaddr);
3798 /* Enable Channel backpressure */
3799 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3804 int rvu_nix_init(struct rvu *rvu)
3806 struct rvu_hwinfo *hw = rvu->hw;
3807 struct nix_hw *nix_hw;
3808 int blkaddr = 0, err;
3811 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3816 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3818 nix_hw = &hw->nix[i];
3820 nix_hw->blkaddr = blkaddr;
3821 err = rvu_nix_block_init(rvu, nix_hw);
3824 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3831 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3832 struct rvu_block *block)
3834 struct nix_txsch *txsch;
3835 struct nix_mcast *mcast;
3836 struct nix_txvlan *vlan;
3837 struct nix_hw *nix_hw;
3840 rvu_aq_free(rvu, block->aq);
3842 if (is_block_implemented(rvu->hw, blkaddr)) {
3843 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3847 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3848 txsch = &nix_hw->txsch[lvl];
3849 kfree(txsch->schq.bmap);
3852 nix_ipolicer_freemem(nix_hw);
3854 vlan = &nix_hw->txvlan;
3855 kfree(vlan->rsrc.bmap);
3856 mutex_destroy(&vlan->rsrc_lock);
3858 mcast = &nix_hw->mcast;
3859 qmem_free(rvu->dev, mcast->mce_ctx);
3860 qmem_free(rvu->dev, mcast->mcast_buf);
3861 mutex_destroy(&mcast->mce_lock);
3865 void rvu_nix_freemem(struct rvu *rvu)
3867 struct rvu_hwinfo *hw = rvu->hw;
3868 struct rvu_block *block;
3871 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3873 block = &hw->block[blkaddr];
3874 rvu_nix_block_freemem(rvu, blkaddr, block);
3875 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3879 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3880 struct msg_rsp *rsp)
3882 u16 pcifunc = req->hdr.pcifunc;
3883 struct rvu_pfvf *pfvf;
3886 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3890 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3892 npc_mcam_enable_flows(rvu, pcifunc);
3894 pfvf = rvu_get_pfvf(rvu, pcifunc);
3895 set_bit(NIXLF_INITIALIZED, &pfvf->flags);
3897 rvu_switch_update_rules(rvu, pcifunc);
3899 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3902 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3903 struct msg_rsp *rsp)
3905 u16 pcifunc = req->hdr.pcifunc;
3906 struct rvu_pfvf *pfvf;
3909 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3913 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3915 pfvf = rvu_get_pfvf(rvu, pcifunc);
3916 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3918 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3921 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3923 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3924 struct hwctx_disable_req ctx_req;
3927 ctx_req.hdr.pcifunc = pcifunc;
3929 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3930 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3931 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
3932 nix_interface_deinit(rvu, pcifunc, nixlf);
3933 nix_rx_sync(rvu, blkaddr);
3934 nix_txschq_free(rvu, pcifunc);
3936 clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3938 rvu_cgx_start_stop_io(rvu, pcifunc, false);
3941 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3942 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3944 dev_err(rvu->dev, "SQ ctx disable failed\n");
3948 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3949 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3951 dev_err(rvu->dev, "RQ ctx disable failed\n");
3955 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3956 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3958 dev_err(rvu->dev, "CQ ctx disable failed\n");
3961 nix_ctx_free(rvu, pfvf);
3963 nix_free_all_bandprof(rvu, pcifunc);
3966 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
3968 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3970 struct rvu_hwinfo *hw = rvu->hw;
3971 struct rvu_block *block;
3976 pf = rvu_get_pf(pcifunc);
3977 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
3980 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3982 return NIX_AF_ERR_AF_LF_INVALID;
3984 block = &hw->block[blkaddr];
3985 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3987 return NIX_AF_ERR_AF_LF_INVALID;
3989 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3992 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3994 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3996 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
4001 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
4002 struct msg_rsp *rsp)
4004 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
4007 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
4008 struct msg_rsp *rsp)
4010 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
4013 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
4014 struct nix_lso_format_cfg *req,
4015 struct nix_lso_format_cfg_rsp *rsp)
4017 u16 pcifunc = req->hdr.pcifunc;
4018 struct nix_hw *nix_hw;
4019 struct rvu_pfvf *pfvf;
4020 int blkaddr, idx, f;
4023 pfvf = rvu_get_pfvf(rvu, pcifunc);
4024 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4025 if (!pfvf->nixlf || blkaddr < 0)
4026 return NIX_AF_ERR_AF_LF_INVALID;
4028 nix_hw = get_nix_hw(rvu->hw, blkaddr);
4032 /* Find existing matching LSO format, if any */
4033 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
4034 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
4035 reg = rvu_read64(rvu, blkaddr,
4036 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
4037 if (req->fields[f] != (reg & req->field_mask))
4041 if (f == NIX_LSO_FIELD_MAX)
4045 if (idx < nix_hw->lso.in_use) {
4047 rsp->lso_format_idx = idx;
4051 if (nix_hw->lso.in_use == nix_hw->lso.total)
4052 return NIX_AF_ERR_LSO_CFG_FAIL;
4054 rsp->lso_format_idx = nix_hw->lso.in_use++;
4056 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
4057 rvu_write64(rvu, blkaddr,
4058 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
4064 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
4066 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
4068 /* overwrite vf mac address with default_mac */
4070 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
4073 /* NIX ingress policers or bandwidth profiles APIs */
4074 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
4076 struct npc_lt_def_cfg defs, *ltdefs;
4079 memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
4081 /* Extract PCP and DEI fields from outer VLAN from byte offset
4082 * 2 from the start of LB_PTR (ie TAG).
4083 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
4084 * fields are considered when 'Tunnel enable' is set in profile.
4086 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
4087 (2UL << 12) | (ltdefs->ovlan.lid << 8) |
4088 (ltdefs->ovlan.ltype_match << 4) |
4089 ltdefs->ovlan.ltype_mask);
4090 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
4091 (2UL << 12) | (ltdefs->ivlan.lid << 8) |
4092 (ltdefs->ivlan.ltype_match << 4) |
4093 ltdefs->ivlan.ltype_mask);
4095 /* DSCP field in outer and tunneled IPv4 packets */
4096 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
4097 (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
4098 (ltdefs->rx_oip4.ltype_match << 4) |
4099 ltdefs->rx_oip4.ltype_mask);
4100 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
4101 (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
4102 (ltdefs->rx_iip4.ltype_match << 4) |
4103 ltdefs->rx_iip4.ltype_mask);
4105 /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
4106 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
4107 (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
4108 (ltdefs->rx_oip6.ltype_match << 4) |
4109 ltdefs->rx_oip6.ltype_mask);
4110 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
4111 (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
4112 (ltdefs->rx_iip6.ltype_match << 4) |
4113 ltdefs->rx_iip6.ltype_mask);
4116 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
4117 int layer, int prof_idx)
4119 struct nix_cn10k_aq_enq_req aq_req;
4122 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4124 aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
4125 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4126 aq_req.op = NIX_AQ_INSTOP_INIT;
4128 /* Context is all zeros, submit to AQ */
4129 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4130 (struct nix_aq_enq_req *)&aq_req, NULL);
4132 dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
4137 static int nix_setup_ipolicers(struct rvu *rvu,
4138 struct nix_hw *nix_hw, int blkaddr)
4140 struct rvu_hwinfo *hw = rvu->hw;
4141 struct nix_ipolicer *ipolicer;
4142 int err, layer, prof_idx;
4145 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
4146 if (!(cfg & BIT_ULL(61))) {
4147 hw->cap.ipolicer = false;
4151 hw->cap.ipolicer = true;
4152 nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
4153 sizeof(*ipolicer), GFP_KERNEL);
4154 if (!nix_hw->ipolicer)
4157 cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
4159 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4160 ipolicer = &nix_hw->ipolicer[layer];
4162 case BAND_PROF_LEAF_LAYER:
4163 ipolicer->band_prof.max = cfg & 0XFFFF;
4165 case BAND_PROF_MID_LAYER:
4166 ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
4168 case BAND_PROF_TOP_LAYER:
4169 ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
4173 if (!ipolicer->band_prof.max)
4176 err = rvu_alloc_bitmap(&ipolicer->band_prof);
4180 ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
4181 ipolicer->band_prof.max,
4182 sizeof(u16), GFP_KERNEL);
4183 if (!ipolicer->pfvf_map)
4186 ipolicer->match_id = devm_kcalloc(rvu->dev,
4187 ipolicer->band_prof.max,
4188 sizeof(u16), GFP_KERNEL);
4189 if (!ipolicer->match_id)
4193 prof_idx < ipolicer->band_prof.max; prof_idx++) {
4194 /* Set AF as current owner for INIT ops to succeed */
4195 ipolicer->pfvf_map[prof_idx] = 0x00;
4197 /* There is no enable bit in the profile context,
4198 * so no context disable. So let's INIT them here
4199 * so that PF/VF later on have to just do WRITE to
4200 * setup policer rates and config.
4202 err = nix_init_policer_context(rvu, nix_hw,
4208 /* Allocate memory for maintaining ref_counts for MID level
4209 * profiles, this will be needed for leaf layer profiles'
4212 if (layer != BAND_PROF_MID_LAYER)
4215 ipolicer->ref_count = devm_kcalloc(rvu->dev,
4216 ipolicer->band_prof.max,
4217 sizeof(u16), GFP_KERNEL);
4220 /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
4221 rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
4223 nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
4228 static void nix_ipolicer_freemem(struct nix_hw *nix_hw)
4230 struct nix_ipolicer *ipolicer;
4233 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4234 ipolicer = &nix_hw->ipolicer[layer];
4236 if (!ipolicer->band_prof.max)
4239 kfree(ipolicer->band_prof.bmap);
4243 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
4244 struct nix_hw *nix_hw, u16 pcifunc)
4246 struct nix_ipolicer *ipolicer;
4247 int layer, hi_layer, prof_idx;
4249 /* Bits [15:14] in profile index represent layer */
4250 layer = (req->qidx >> 14) & 0x03;
4251 prof_idx = req->qidx & 0x3FFF;
4253 ipolicer = &nix_hw->ipolicer[layer];
4254 if (prof_idx >= ipolicer->band_prof.max)
4257 /* Check if the profile is allocated to the requesting PCIFUNC or not
4258 * with the exception of AF. AF is allowed to read and update contexts.
4260 if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
4263 /* If this profile is linked to higher layer profile then check
4264 * if that profile is also allocated to the requesting PCIFUNC
4267 if (!req->prof.hl_en)
4270 /* Leaf layer profile can link only to mid layer and
4271 * mid layer to top layer.
4273 if (layer == BAND_PROF_LEAF_LAYER)
4274 hi_layer = BAND_PROF_MID_LAYER;
4275 else if (layer == BAND_PROF_MID_LAYER)
4276 hi_layer = BAND_PROF_TOP_LAYER;
4280 ipolicer = &nix_hw->ipolicer[hi_layer];
4281 prof_idx = req->prof.band_prof_id;
4282 if (prof_idx >= ipolicer->band_prof.max ||
4283 ipolicer->pfvf_map[prof_idx] != pcifunc)
4289 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
4290 struct nix_bandprof_alloc_req *req,
4291 struct nix_bandprof_alloc_rsp *rsp)
4293 int blkaddr, layer, prof, idx, err;
4294 u16 pcifunc = req->hdr.pcifunc;
4295 struct nix_ipolicer *ipolicer;
4296 struct nix_hw *nix_hw;
4298 if (!rvu->hw->cap.ipolicer)
4299 return NIX_AF_ERR_IPOLICER_NOTSUPP;
4301 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4305 mutex_lock(&rvu->rsrc_lock);
4306 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4307 if (layer == BAND_PROF_INVAL_LAYER)
4309 if (!req->prof_count[layer])
4312 ipolicer = &nix_hw->ipolicer[layer];
4313 for (idx = 0; idx < req->prof_count[layer]; idx++) {
4314 /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
4315 if (idx == MAX_BANDPROF_PER_PFFUNC)
4318 prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4321 rsp->prof_count[layer]++;
4322 rsp->prof_idx[layer][idx] = prof;
4323 ipolicer->pfvf_map[prof] = pcifunc;
4326 mutex_unlock(&rvu->rsrc_lock);
4330 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
4332 int blkaddr, layer, prof_idx, err;
4333 struct nix_ipolicer *ipolicer;
4334 struct nix_hw *nix_hw;
4336 if (!rvu->hw->cap.ipolicer)
4337 return NIX_AF_ERR_IPOLICER_NOTSUPP;
4339 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4343 mutex_lock(&rvu->rsrc_lock);
4344 /* Free all the profiles allocated to the PCIFUNC */
4345 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4346 if (layer == BAND_PROF_INVAL_LAYER)
4348 ipolicer = &nix_hw->ipolicer[layer];
4350 for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
4351 if (ipolicer->pfvf_map[prof_idx] != pcifunc)
4354 /* Clear ratelimit aggregation, if any */
4355 if (layer == BAND_PROF_LEAF_LAYER &&
4356 ipolicer->match_id[prof_idx])
4357 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4359 ipolicer->pfvf_map[prof_idx] = 0x00;
4360 ipolicer->match_id[prof_idx] = 0;
4361 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4364 mutex_unlock(&rvu->rsrc_lock);
4368 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
4369 struct nix_bandprof_free_req *req,
4370 struct msg_rsp *rsp)
4372 int blkaddr, layer, prof_idx, idx, err;
4373 u16 pcifunc = req->hdr.pcifunc;
4374 struct nix_ipolicer *ipolicer;
4375 struct nix_hw *nix_hw;
4378 return nix_free_all_bandprof(rvu, pcifunc);
4380 if (!rvu->hw->cap.ipolicer)
4381 return NIX_AF_ERR_IPOLICER_NOTSUPP;
4383 err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4387 mutex_lock(&rvu->rsrc_lock);
4388 /* Free the requested profile indices */
4389 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4390 if (layer == BAND_PROF_INVAL_LAYER)
4392 if (!req->prof_count[layer])
4395 ipolicer = &nix_hw->ipolicer[layer];
4396 for (idx = 0; idx < req->prof_count[layer]; idx++) {
4397 prof_idx = req->prof_idx[layer][idx];
4398 if (prof_idx >= ipolicer->band_prof.max ||
4399 ipolicer->pfvf_map[prof_idx] != pcifunc)
4402 /* Clear ratelimit aggregation, if any */
4403 if (layer == BAND_PROF_LEAF_LAYER &&
4404 ipolicer->match_id[prof_idx])
4405 nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4407 ipolicer->pfvf_map[prof_idx] = 0x00;
4408 ipolicer->match_id[prof_idx] = 0;
4409 rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4410 if (idx == MAX_BANDPROF_PER_PFFUNC)
4414 mutex_unlock(&rvu->rsrc_lock);
4418 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
4419 struct nix_cn10k_aq_enq_req *aq_req,
4420 struct nix_cn10k_aq_enq_rsp *aq_rsp,
4421 u16 pcifunc, u8 ctype, u32 qidx)
4423 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4424 aq_req->hdr.pcifunc = pcifunc;
4425 aq_req->ctype = ctype;
4426 aq_req->op = NIX_AQ_INSTOP_READ;
4427 aq_req->qidx = qidx;
4429 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4430 (struct nix_aq_enq_req *)aq_req,
4431 (struct nix_aq_enq_rsp *)aq_rsp);
4434 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
4435 struct nix_hw *nix_hw,
4436 struct nix_cn10k_aq_enq_req *aq_req,
4437 struct nix_cn10k_aq_enq_rsp *aq_rsp,
4438 u32 leaf_prof, u16 mid_prof)
4440 memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4441 aq_req->hdr.pcifunc = 0x00;
4442 aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
4443 aq_req->op = NIX_AQ_INSTOP_WRITE;
4444 aq_req->qidx = leaf_prof;
4446 aq_req->prof.band_prof_id = mid_prof;
4447 aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
4448 aq_req->prof.hl_en = 1;
4449 aq_req->prof_mask.hl_en = 1;
4451 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4452 (struct nix_aq_enq_req *)aq_req,
4453 (struct nix_aq_enq_rsp *)aq_rsp);
4456 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
4457 u16 rq_idx, u16 match_id)
4459 int leaf_prof, mid_prof, leaf_match;
4460 struct nix_cn10k_aq_enq_req aq_req;
4461 struct nix_cn10k_aq_enq_rsp aq_rsp;
4462 struct nix_ipolicer *ipolicer;
4463 struct nix_hw *nix_hw;
4464 int blkaddr, idx, rc;
4466 if (!rvu->hw->cap.ipolicer)
4469 rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4473 /* Fetch the RQ's context to see if policing is enabled */
4474 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
4475 NIX_AQ_CTYPE_RQ, rq_idx);
4478 "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
4479 __func__, rq_idx, pcifunc);
4483 if (!aq_rsp.rq.policer_ena)
4486 /* Get the bandwidth profile ID mapped to this RQ */
4487 leaf_prof = aq_rsp.rq.band_prof_id;
4489 ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
4490 ipolicer->match_id[leaf_prof] = match_id;
4492 /* Check if any other leaf profile is marked with same match_id */
4493 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
4494 if (idx == leaf_prof)
4496 if (ipolicer->match_id[idx] != match_id)
4503 if (idx == ipolicer->band_prof.max)
4506 /* Fetch the matching profile's context to check if it's already
4507 * mapped to a mid level profile.
4509 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4510 NIX_AQ_CTYPE_BANDPROF, leaf_match);
4513 "%s: Failed to fetch context of leaf profile %d\n",
4514 __func__, leaf_match);
4518 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4519 if (aq_rsp.prof.hl_en) {
4520 /* Get Mid layer prof index and map leaf_prof index
4521 * also such that flows that are being steered
4522 * to different RQs and marked with same match_id
4523 * are rate limited in a aggregate fashion
4525 mid_prof = aq_rsp.prof.band_prof_id;
4526 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4528 leaf_prof, mid_prof);
4531 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4532 __func__, leaf_prof, mid_prof);
4536 mutex_lock(&rvu->rsrc_lock);
4537 ipolicer->ref_count[mid_prof]++;
4538 mutex_unlock(&rvu->rsrc_lock);
4542 /* Allocate a mid layer profile and
4543 * map both 'leaf_prof' and 'leaf_match' profiles to it.
4545 mutex_lock(&rvu->rsrc_lock);
4546 mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4549 "%s: Unable to allocate mid layer profile\n", __func__);
4550 mutex_unlock(&rvu->rsrc_lock);
4553 mutex_unlock(&rvu->rsrc_lock);
4554 ipolicer->pfvf_map[mid_prof] = 0x00;
4555 ipolicer->ref_count[mid_prof] = 0;
4557 /* Initialize mid layer profile same as 'leaf_prof' */
4558 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4559 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4562 "%s: Failed to fetch context of leaf profile %d\n",
4563 __func__, leaf_prof);
4567 memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4568 aq_req.hdr.pcifunc = 0x00;
4569 aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
4570 aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4571 aq_req.op = NIX_AQ_INSTOP_WRITE;
4572 memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
4573 /* Clear higher layer enable bit in the mid profile, just in case */
4574 aq_req.prof.hl_en = 0;
4575 aq_req.prof_mask.hl_en = 1;
4577 rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4578 (struct nix_aq_enq_req *)&aq_req, NULL);
4581 "%s: Failed to INIT context of mid layer profile %d\n",
4582 __func__, mid_prof);
4586 /* Map both leaf profiles to this mid layer profile */
4587 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4589 leaf_prof, mid_prof);
4592 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4593 __func__, leaf_prof, mid_prof);
4597 mutex_lock(&rvu->rsrc_lock);
4598 ipolicer->ref_count[mid_prof]++;
4599 mutex_unlock(&rvu->rsrc_lock);
4601 rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4603 leaf_match, mid_prof);
4606 "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4607 __func__, leaf_match, mid_prof);
4608 ipolicer->ref_count[mid_prof]--;
4612 mutex_lock(&rvu->rsrc_lock);
4613 ipolicer->ref_count[mid_prof]++;
4614 mutex_unlock(&rvu->rsrc_lock);
4620 /* Called with mutex rsrc_lock */
4621 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
4624 struct nix_cn10k_aq_enq_req aq_req;
4625 struct nix_cn10k_aq_enq_rsp aq_rsp;
4626 struct nix_ipolicer *ipolicer;
4630 mutex_unlock(&rvu->rsrc_lock);
4632 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4633 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4635 mutex_lock(&rvu->rsrc_lock);
4638 "%s: Failed to fetch context of leaf profile %d\n",
4639 __func__, leaf_prof);
4643 if (!aq_rsp.prof.hl_en)
4646 mid_prof = aq_rsp.prof.band_prof_id;
4647 ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4648 ipolicer->ref_count[mid_prof]--;
4649 /* If ref_count is zero, free mid layer profile */
4650 if (!ipolicer->ref_count[mid_prof]) {
4651 ipolicer->pfvf_map[mid_prof] = 0x00;
4652 rvu_free_rsrc(&ipolicer->band_prof, mid_prof);