1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
19 #include "lmac_common.h"
21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
23 int type, int chan_id);
49 enum nix_makr_fmt_indexes {
50 NIX_MARK_CFG_IP_DSCP_RED,
51 NIX_MARK_CFG_IP_DSCP_YELLOW,
52 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
53 NIX_MARK_CFG_IP_ECN_RED,
54 NIX_MARK_CFG_IP_ECN_YELLOW,
55 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
56 NIX_MARK_CFG_VLAN_DEI_RED,
57 NIX_MARK_CFG_VLAN_DEI_YELLOW,
58 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
62 /* For now considering MC resources needed for broadcast
63 * pkt replication only. i.e 256 HWVFs + 12 PFs.
65 #define MC_TBL_SIZE MC_TBL_SZ_512
66 #define MC_BUF_CNT MC_BUF_CNT_128
69 struct hlist_node node;
73 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
77 /*If blkaddr is 0, return the first nix block address*/
79 return rvu->nix_blkaddr[blkaddr];
81 while (i + 1 < MAX_NIX_BLKS) {
82 if (rvu->nix_blkaddr[i] == blkaddr)
83 return rvu->nix_blkaddr[i + 1];
90 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
92 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
95 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
96 if (!pfvf->nixlf || blkaddr < 0)
101 int rvu_get_nixlf_count(struct rvu *rvu)
103 int blkaddr = 0, max = 0;
104 struct rvu_block *block;
106 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
108 block = &rvu->hw->block[blkaddr];
109 max += block->lf.max;
110 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
115 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
117 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
118 struct rvu_hwinfo *hw = rvu->hw;
121 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
122 if (!pfvf->nixlf || blkaddr < 0)
123 return NIX_AF_ERR_AF_LF_INVALID;
125 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
127 return NIX_AF_ERR_AF_LF_INVALID;
130 *nix_blkaddr = blkaddr;
135 static void nix_mce_list_init(struct nix_mce_list *list, int max)
137 INIT_HLIST_HEAD(&list->head);
142 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
149 idx = mcast->next_free_mce;
150 mcast->next_free_mce += count;
154 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
156 int nix_blkaddr = 0, i = 0;
157 struct rvu *rvu = hw->rvu;
159 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
160 while (nix_blkaddr) {
161 if (blkaddr == nix_blkaddr && hw->nix)
163 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
169 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
173 /*Sync all in flight RX packets to LLC/DRAM */
174 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
175 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
177 dev_err(rvu->dev, "NIX RX software sync failed\n");
180 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
181 int lvl, u16 pcifunc, u16 schq)
183 struct rvu_hwinfo *hw = rvu->hw;
184 struct nix_txsch *txsch;
185 struct nix_hw *nix_hw;
188 nix_hw = get_nix_hw(rvu->hw, blkaddr);
192 txsch = &nix_hw->txsch[lvl];
193 /* Check out of bounds */
194 if (schq >= txsch->schq.max)
197 mutex_lock(&rvu->rsrc_lock);
198 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
199 mutex_unlock(&rvu->rsrc_lock);
201 /* TLs aggegating traffic are shared across PF and VFs */
202 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
203 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
209 if (map_func != pcifunc)
215 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
217 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
218 struct mac_ops *mac_ops;
219 int pkind, pf, vf, lbkid;
223 pf = rvu_get_pf(pcifunc);
224 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
228 case NIX_INTF_TYPE_CGX:
229 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
230 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
232 pkind = rvu_npc_get_pkind(rvu, pf);
235 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
238 pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
239 pfvf->tx_chan_base = pfvf->rx_chan_base;
240 pfvf->rx_chan_cnt = 1;
241 pfvf->tx_chan_cnt = 1;
242 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
243 rvu_npc_set_pkind(rvu, pkind, pfvf);
245 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
246 /* By default we enable pause frames */
247 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
248 mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
250 lmac_id, true, true);
252 case NIX_INTF_TYPE_LBK:
253 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
255 /* If NIX1 block is present on the silicon then NIXes are
256 * assigned alternatively for lbk interfaces. NIX0 should
257 * send packets on lbk link 1 channels and NIX1 should send
258 * on lbk link 0 channels for the communication between
262 if (rvu->hw->lbk_links > 1)
263 lbkid = vf & 0x1 ? 0 : 1;
265 /* Note that AF's VFs work in pairs and talk over consecutive
266 * loopback channels.Therefore if odd number of AF VFs are
267 * enabled then the last VF remains with no pair.
269 pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
270 pfvf->tx_chan_base = vf & 0x1 ?
271 rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
272 rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
273 pfvf->rx_chan_cnt = 1;
274 pfvf->tx_chan_cnt = 1;
275 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
277 pfvf->rx_chan_cnt, false);
281 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
282 * RVU PF/VF's MAC address.
284 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
285 pfvf->rx_chan_base, pfvf->mac_addr);
287 /* Add this PF_FUNC to bcast pkt replication list */
288 err = nix_update_bcast_mce_list(rvu, pcifunc, true);
291 "Bcast list, failed to enable PF_FUNC 0x%x\n",
296 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
297 nixlf, pfvf->rx_chan_base);
298 pfvf->maxlen = NIC_HW_MIN_FRS;
299 pfvf->minlen = NIC_HW_MIN_FRS;
304 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
306 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
312 /* Remove this PF_FUNC from bcast pkt replication list */
313 err = nix_update_bcast_mce_list(rvu, pcifunc, false);
316 "Bcast list, failed to disable PF_FUNC 0x%x\n",
320 /* Free and disable any MCAM entries used by this NIX LF */
321 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
324 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
325 struct nix_bp_cfg_req *req,
328 u16 pcifunc = req->hdr.pcifunc;
329 struct rvu_pfvf *pfvf;
330 int blkaddr, pf, type;
334 pf = rvu_get_pf(pcifunc);
335 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
336 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
339 pfvf = rvu_get_pfvf(rvu, pcifunc);
340 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
342 chan_base = pfvf->rx_chan_base + req->chan_base;
343 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
344 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
345 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
351 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
352 int type, int chan_id)
354 int bpid, blkaddr, lmac_chan_cnt;
355 struct rvu_hwinfo *hw = rvu->hw;
356 u16 cgx_bpid_cnt, lbk_bpid_cnt;
357 struct rvu_pfvf *pfvf;
361 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
362 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
363 lmac_chan_cnt = cfg & 0xFF;
365 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
366 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
368 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
370 /* Backpressure IDs range division
371 * CGX channles are mapped to (0 - 191) BPIDs
372 * LBK channles are mapped to (192 - 255) BPIDs
373 * SDP channles are mapped to (256 - 511) BPIDs
375 * Lmac channles and bpids mapped as follows
376 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
377 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
378 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
381 case NIX_INTF_TYPE_CGX:
382 if ((req->chan_base + req->chan_cnt) > 15)
384 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
385 /* Assign bpid based on cgx, lmac and chan id */
386 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
387 (lmac_id * lmac_chan_cnt) + req->chan_base;
389 if (req->bpid_per_chan)
391 if (bpid > cgx_bpid_cnt)
395 case NIX_INTF_TYPE_LBK:
396 if ((req->chan_base + req->chan_cnt) > 63)
398 bpid = cgx_bpid_cnt + req->chan_base;
399 if (req->bpid_per_chan)
401 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
410 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
411 struct nix_bp_cfg_req *req,
412 struct nix_bp_cfg_rsp *rsp)
414 int blkaddr, pf, type, chan_id = 0;
415 u16 pcifunc = req->hdr.pcifunc;
416 struct rvu_pfvf *pfvf;
421 pf = rvu_get_pf(pcifunc);
422 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
424 /* Enable backpressure only for CGX mapped PFs and LBK interface */
425 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
428 pfvf = rvu_get_pfvf(rvu, pcifunc);
429 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
431 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
432 chan_base = pfvf->rx_chan_base + req->chan_base;
435 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
437 dev_warn(rvu->dev, "Fail to enable backpressure\n");
441 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
442 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
443 cfg | (bpid & 0xFF) | BIT_ULL(16));
445 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
448 for (chan = 0; chan < req->chan_cnt; chan++) {
449 /* Map channel and bpid assign to it */
450 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
452 if (req->bpid_per_chan)
455 rsp->chan_cnt = req->chan_cnt;
460 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
461 u64 format, bool v4, u64 *fidx)
463 struct nix_lso_format field = {0};
465 /* IP's Length field */
466 field.layer = NIX_TXLAYER_OL3;
467 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
468 field.offset = v4 ? 2 : 4;
469 field.sizem1 = 1; /* i.e 2 bytes */
470 field.alg = NIX_LSOALG_ADD_PAYLEN;
471 rvu_write64(rvu, blkaddr,
472 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
475 /* No ID field in IPv6 header */
480 field.layer = NIX_TXLAYER_OL3;
482 field.sizem1 = 1; /* i.e 2 bytes */
483 field.alg = NIX_LSOALG_ADD_SEGNUM;
484 rvu_write64(rvu, blkaddr,
485 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
489 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
490 u64 format, u64 *fidx)
492 struct nix_lso_format field = {0};
494 /* TCP's sequence number field */
495 field.layer = NIX_TXLAYER_OL4;
497 field.sizem1 = 3; /* i.e 4 bytes */
498 field.alg = NIX_LSOALG_ADD_OFFSET;
499 rvu_write64(rvu, blkaddr,
500 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
503 /* TCP's flags field */
504 field.layer = NIX_TXLAYER_OL4;
506 field.sizem1 = 1; /* 2 bytes */
507 field.alg = NIX_LSOALG_TCP_FLAGS;
508 rvu_write64(rvu, blkaddr,
509 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
513 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
515 u64 cfg, idx, fidx = 0;
517 /* Get max HW supported format indices */
518 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
519 nix_hw->lso.total = cfg;
522 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
523 /* For TSO, set first and middle segment flags to
524 * mask out PSH, RST & FIN flags in TCP packet
526 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
527 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
528 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
530 /* Setup default static LSO formats
532 * Configure format fields for TCPv4 segmentation offload
534 idx = NIX_LSO_FORMAT_IDX_TSOV4;
535 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
536 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
538 /* Set rest of the fields to NOP */
539 for (; fidx < 8; fidx++) {
540 rvu_write64(rvu, blkaddr,
541 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
543 nix_hw->lso.in_use++;
545 /* Configure format fields for TCPv6 segmentation offload */
546 idx = NIX_LSO_FORMAT_IDX_TSOV6;
548 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
549 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
551 /* Set rest of the fields to NOP */
552 for (; fidx < 8; fidx++) {
553 rvu_write64(rvu, blkaddr,
554 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
556 nix_hw->lso.in_use++;
559 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
561 kfree(pfvf->rq_bmap);
562 kfree(pfvf->sq_bmap);
563 kfree(pfvf->cq_bmap);
565 qmem_free(rvu->dev, pfvf->rq_ctx);
567 qmem_free(rvu->dev, pfvf->sq_ctx);
569 qmem_free(rvu->dev, pfvf->cq_ctx);
571 qmem_free(rvu->dev, pfvf->rss_ctx);
572 if (pfvf->nix_qints_ctx)
573 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
574 if (pfvf->cq_ints_ctx)
575 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
577 pfvf->rq_bmap = NULL;
578 pfvf->cq_bmap = NULL;
579 pfvf->sq_bmap = NULL;
583 pfvf->rss_ctx = NULL;
584 pfvf->nix_qints_ctx = NULL;
585 pfvf->cq_ints_ctx = NULL;
588 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
589 struct rvu_pfvf *pfvf, int nixlf,
590 int rss_sz, int rss_grps, int hwctx_size,
593 int err, grp, num_indices;
595 /* RSS is not requested for this NIXLF */
598 num_indices = rss_sz * rss_grps;
600 /* Alloc NIX RSS HW context memory and config the base */
601 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
605 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
606 (u64)pfvf->rss_ctx->iova);
608 /* Config full RSS table size, enable RSS and caching */
609 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
610 BIT_ULL(36) | BIT_ULL(4) |
611 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
613 /* Config RSS group offset and sizes */
614 for (grp = 0; grp < rss_grps; grp++)
615 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
616 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
620 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
621 struct nix_aq_inst_s *inst)
623 struct admin_queue *aq = block->aq;
624 struct nix_aq_res_s *result;
628 result = (struct nix_aq_res_s *)aq->res->base;
630 /* Get current head pointer where to append this instruction */
631 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
632 head = (reg >> 4) & AQ_PTR_MASK;
634 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
635 (void *)inst, aq->inst->entry_sz);
636 memset(result, 0, sizeof(*result));
637 /* sync into memory */
640 /* Ring the doorbell and wait for result */
641 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
642 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
650 if (result->compcode != NIX_AQ_COMP_GOOD)
651 /* TODO: Replace this with some error code */
657 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
658 struct nix_aq_enq_req *req,
659 struct nix_aq_enq_rsp *rsp)
661 struct rvu_hwinfo *hw = rvu->hw;
662 u16 pcifunc = req->hdr.pcifunc;
663 int nixlf, blkaddr, rc = 0;
664 struct nix_aq_inst_s inst;
665 struct rvu_block *block;
666 struct admin_queue *aq;
667 struct rvu_pfvf *pfvf;
672 blkaddr = nix_hw->blkaddr;
673 block = &hw->block[blkaddr];
676 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
677 return NIX_AF_ERR_AQ_ENQUEUE;
680 pfvf = rvu_get_pfvf(rvu, pcifunc);
681 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
683 /* Skip NIXLF check for broadcast MCE entry init */
684 if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
685 if (!pfvf->nixlf || nixlf < 0)
686 return NIX_AF_ERR_AF_LF_INVALID;
689 switch (req->ctype) {
690 case NIX_AQ_CTYPE_RQ:
691 /* Check if index exceeds max no of queues */
692 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
693 rc = NIX_AF_ERR_AQ_ENQUEUE;
695 case NIX_AQ_CTYPE_SQ:
696 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
697 rc = NIX_AF_ERR_AQ_ENQUEUE;
699 case NIX_AQ_CTYPE_CQ:
700 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
701 rc = NIX_AF_ERR_AQ_ENQUEUE;
703 case NIX_AQ_CTYPE_RSS:
704 /* Check if RSS is enabled and qidx is within range */
705 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
706 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
707 (req->qidx >= (256UL << (cfg & 0xF))))
708 rc = NIX_AF_ERR_AQ_ENQUEUE;
710 case NIX_AQ_CTYPE_MCE:
711 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
713 /* Check if index exceeds MCE list length */
714 if (!nix_hw->mcast.mce_ctx ||
715 (req->qidx >= (256UL << (cfg & 0xF))))
716 rc = NIX_AF_ERR_AQ_ENQUEUE;
718 /* Adding multicast lists for requests from PF/VFs is not
719 * yet supported, so ignore this.
722 rc = NIX_AF_ERR_AQ_ENQUEUE;
725 rc = NIX_AF_ERR_AQ_ENQUEUE;
731 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
732 if (req->ctype == NIX_AQ_CTYPE_SQ &&
733 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
734 (req->op == NIX_AQ_INSTOP_WRITE &&
735 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
736 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
737 pcifunc, req->sq.smq))
738 return NIX_AF_ERR_AQ_ENQUEUE;
741 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
743 inst.cindex = req->qidx;
744 inst.ctype = req->ctype;
746 /* Currently we are not supporting enqueuing multiple instructions,
747 * so always choose first entry in result memory.
749 inst.res_addr = (u64)aq->res->iova;
751 /* Hardware uses same aq->res->base for updating result of
752 * previous instruction hence wait here till it is done.
754 spin_lock(&aq->lock);
756 /* Clean result + context memory */
757 memset(aq->res->base, 0, aq->res->entry_sz);
758 /* Context needs to be written at RES_ADDR + 128 */
759 ctx = aq->res->base + 128;
760 /* Mask needs to be written at RES_ADDR + 256 */
761 mask = aq->res->base + 256;
764 case NIX_AQ_INSTOP_WRITE:
765 if (req->ctype == NIX_AQ_CTYPE_RQ)
766 memcpy(mask, &req->rq_mask,
767 sizeof(struct nix_rq_ctx_s));
768 else if (req->ctype == NIX_AQ_CTYPE_SQ)
769 memcpy(mask, &req->sq_mask,
770 sizeof(struct nix_sq_ctx_s));
771 else if (req->ctype == NIX_AQ_CTYPE_CQ)
772 memcpy(mask, &req->cq_mask,
773 sizeof(struct nix_cq_ctx_s));
774 else if (req->ctype == NIX_AQ_CTYPE_RSS)
775 memcpy(mask, &req->rss_mask,
776 sizeof(struct nix_rsse_s));
777 else if (req->ctype == NIX_AQ_CTYPE_MCE)
778 memcpy(mask, &req->mce_mask,
779 sizeof(struct nix_rx_mce_s));
781 case NIX_AQ_INSTOP_INIT:
782 if (req->ctype == NIX_AQ_CTYPE_RQ)
783 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
784 else if (req->ctype == NIX_AQ_CTYPE_SQ)
785 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
786 else if (req->ctype == NIX_AQ_CTYPE_CQ)
787 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
788 else if (req->ctype == NIX_AQ_CTYPE_RSS)
789 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
790 else if (req->ctype == NIX_AQ_CTYPE_MCE)
791 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
793 case NIX_AQ_INSTOP_NOP:
794 case NIX_AQ_INSTOP_READ:
795 case NIX_AQ_INSTOP_LOCK:
796 case NIX_AQ_INSTOP_UNLOCK:
799 rc = NIX_AF_ERR_AQ_ENQUEUE;
800 spin_unlock(&aq->lock);
804 /* Submit the instruction to AQ */
805 rc = nix_aq_enqueue_wait(rvu, block, &inst);
807 spin_unlock(&aq->lock);
811 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
812 if (req->op == NIX_AQ_INSTOP_INIT) {
813 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
814 __set_bit(req->qidx, pfvf->rq_bmap);
815 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
816 __set_bit(req->qidx, pfvf->sq_bmap);
817 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
818 __set_bit(req->qidx, pfvf->cq_bmap);
821 if (req->op == NIX_AQ_INSTOP_WRITE) {
822 if (req->ctype == NIX_AQ_CTYPE_RQ) {
823 ena = (req->rq.ena & req->rq_mask.ena) |
824 (test_bit(req->qidx, pfvf->rq_bmap) &
827 __set_bit(req->qidx, pfvf->rq_bmap);
829 __clear_bit(req->qidx, pfvf->rq_bmap);
831 if (req->ctype == NIX_AQ_CTYPE_SQ) {
832 ena = (req->rq.ena & req->sq_mask.ena) |
833 (test_bit(req->qidx, pfvf->sq_bmap) &
836 __set_bit(req->qidx, pfvf->sq_bmap);
838 __clear_bit(req->qidx, pfvf->sq_bmap);
840 if (req->ctype == NIX_AQ_CTYPE_CQ) {
841 ena = (req->rq.ena & req->cq_mask.ena) |
842 (test_bit(req->qidx, pfvf->cq_bmap) &
845 __set_bit(req->qidx, pfvf->cq_bmap);
847 __clear_bit(req->qidx, pfvf->cq_bmap);
852 /* Copy read context into mailbox */
853 if (req->op == NIX_AQ_INSTOP_READ) {
854 if (req->ctype == NIX_AQ_CTYPE_RQ)
855 memcpy(&rsp->rq, ctx,
856 sizeof(struct nix_rq_ctx_s));
857 else if (req->ctype == NIX_AQ_CTYPE_SQ)
858 memcpy(&rsp->sq, ctx,
859 sizeof(struct nix_sq_ctx_s));
860 else if (req->ctype == NIX_AQ_CTYPE_CQ)
861 memcpy(&rsp->cq, ctx,
862 sizeof(struct nix_cq_ctx_s));
863 else if (req->ctype == NIX_AQ_CTYPE_RSS)
864 memcpy(&rsp->rss, ctx,
865 sizeof(struct nix_rsse_s));
866 else if (req->ctype == NIX_AQ_CTYPE_MCE)
867 memcpy(&rsp->mce, ctx,
868 sizeof(struct nix_rx_mce_s));
872 spin_unlock(&aq->lock);
876 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
877 struct nix_aq_enq_rsp *rsp)
879 struct nix_hw *nix_hw;
882 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
884 return NIX_AF_ERR_AF_LF_INVALID;
886 nix_hw = get_nix_hw(rvu->hw, blkaddr);
890 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
893 static const char *nix_get_ctx_name(int ctype)
896 case NIX_AQ_CTYPE_CQ:
898 case NIX_AQ_CTYPE_SQ:
900 case NIX_AQ_CTYPE_RQ:
902 case NIX_AQ_CTYPE_RSS:
908 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
910 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
911 struct nix_aq_enq_req aq_req;
916 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
917 return NIX_AF_ERR_AQ_ENQUEUE;
919 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
920 aq_req.hdr.pcifunc = req->hdr.pcifunc;
922 if (req->ctype == NIX_AQ_CTYPE_CQ) {
924 aq_req.cq_mask.ena = 1;
925 aq_req.cq.bp_ena = 0;
926 aq_req.cq_mask.bp_ena = 1;
927 q_cnt = pfvf->cq_ctx->qsize;
928 bmap = pfvf->cq_bmap;
930 if (req->ctype == NIX_AQ_CTYPE_SQ) {
932 aq_req.sq_mask.ena = 1;
933 q_cnt = pfvf->sq_ctx->qsize;
934 bmap = pfvf->sq_bmap;
936 if (req->ctype == NIX_AQ_CTYPE_RQ) {
938 aq_req.rq_mask.ena = 1;
939 q_cnt = pfvf->rq_ctx->qsize;
940 bmap = pfvf->rq_bmap;
943 aq_req.ctype = req->ctype;
944 aq_req.op = NIX_AQ_INSTOP_WRITE;
946 for (qidx = 0; qidx < q_cnt; qidx++) {
947 if (!test_bit(qidx, bmap))
950 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
953 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
954 nix_get_ctx_name(req->ctype), qidx);
961 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
962 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
964 struct nix_aq_enq_req lock_ctx_req;
967 if (req->op != NIX_AQ_INSTOP_INIT)
970 if (req->ctype == NIX_AQ_CTYPE_MCE ||
971 req->ctype == NIX_AQ_CTYPE_DYNO)
974 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
975 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
976 lock_ctx_req.ctype = req->ctype;
977 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
978 lock_ctx_req.qidx = req->qidx;
979 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
982 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
984 nix_get_ctx_name(req->ctype), req->qidx);
988 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
989 struct nix_aq_enq_req *req,
990 struct nix_aq_enq_rsp *rsp)
994 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
996 err = nix_lf_hwctx_lockdown(rvu, req);
1001 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1002 struct nix_aq_enq_req *req,
1003 struct nix_aq_enq_rsp *rsp)
1005 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1008 /* CN10K mbox handler */
1009 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1010 struct nix_cn10k_aq_enq_req *req,
1011 struct nix_cn10k_aq_enq_rsp *rsp)
1013 return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1014 (struct nix_aq_enq_rsp *)rsp);
1017 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1018 struct hwctx_disable_req *req,
1019 struct msg_rsp *rsp)
1021 return nix_lf_hwctx_disable(rvu, req);
1024 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1025 struct nix_lf_alloc_req *req,
1026 struct nix_lf_alloc_rsp *rsp)
1028 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1029 struct rvu_hwinfo *hw = rvu->hw;
1030 u16 pcifunc = req->hdr.pcifunc;
1031 struct rvu_block *block;
1032 struct rvu_pfvf *pfvf;
1036 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1037 return NIX_AF_ERR_PARAM;
1040 req->way_mask &= 0xFFFF;
1042 pfvf = rvu_get_pfvf(rvu, pcifunc);
1043 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1044 if (!pfvf->nixlf || blkaddr < 0)
1045 return NIX_AF_ERR_AF_LF_INVALID;
1047 block = &hw->block[blkaddr];
1048 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1050 return NIX_AF_ERR_AF_LF_INVALID;
1052 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1053 if (req->npa_func) {
1054 /* If default, use 'this' NIXLF's PFFUNC */
1055 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1056 req->npa_func = pcifunc;
1057 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1058 return NIX_AF_INVAL_NPA_PF_FUNC;
1061 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1062 if (req->sso_func) {
1063 /* If default, use 'this' NIXLF's PFFUNC */
1064 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1065 req->sso_func = pcifunc;
1066 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1067 return NIX_AF_INVAL_SSO_PF_FUNC;
1070 /* If RSS is being enabled, check if requested config is valid.
1071 * RSS table size should be power of two, otherwise
1072 * RSS_GRP::OFFSET + adder might go beyond that group or
1073 * won't be able to use entire table.
1075 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1076 !is_power_of_2(req->rss_sz)))
1077 return NIX_AF_ERR_RSS_SIZE_INVALID;
1080 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1081 return NIX_AF_ERR_RSS_GRPS_INVALID;
1083 /* Reset this NIX LF */
1084 err = rvu_lf_reset(rvu, block, nixlf);
1086 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1087 block->addr - BLKADDR_NIX0, nixlf);
1088 return NIX_AF_ERR_LF_RESET;
1091 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1093 /* Alloc NIX RQ HW context memory and config the base */
1094 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1095 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1099 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1103 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1104 (u64)pfvf->rq_ctx->iova);
1106 /* Set caching and queue count in HW */
1107 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1108 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1110 /* Alloc NIX SQ HW context memory and config the base */
1111 hwctx_size = 1UL << (ctx_cfg & 0xF);
1112 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1116 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1120 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1121 (u64)pfvf->sq_ctx->iova);
1123 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1124 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1126 /* Alloc NIX CQ HW context memory and config the base */
1127 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1128 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1132 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1136 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1137 (u64)pfvf->cq_ctx->iova);
1139 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1140 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1142 /* Initialize receive side scaling (RSS) */
1143 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1144 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1145 req->rss_grps, hwctx_size, req->way_mask);
1149 /* Alloc memory for CQINT's HW contexts */
1150 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1151 qints = (cfg >> 24) & 0xFFF;
1152 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1153 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1157 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1158 (u64)pfvf->cq_ints_ctx->iova);
1160 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1161 BIT_ULL(36) | req->way_mask << 20);
1163 /* Alloc memory for QINT's HW contexts */
1164 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1165 qints = (cfg >> 12) & 0xFFF;
1166 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1167 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1171 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1172 (u64)pfvf->nix_qints_ctx->iova);
1173 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1174 BIT_ULL(36) | req->way_mask << 20);
1176 /* Setup VLANX TPID's.
1177 * Use VLAN1 for 802.1Q
1178 * and VLAN0 for 802.1AD.
1180 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1181 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1183 /* Enable LMTST for this NIX LF */
1184 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1186 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1188 cfg = req->npa_func;
1190 cfg |= (u64)req->sso_func << 16;
1192 cfg |= (u64)req->xqe_sz << 33;
1193 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1195 /* Config Rx pkt length, csum checks and apad enable / disable */
1196 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1198 /* Configure pkind for TX parse config */
1199 cfg = NPC_TX_DEF_PKIND;
1200 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1202 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1203 err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1207 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1208 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1210 /* Configure RX VTAG Type 7 (strip) for vf vlan */
1211 rvu_write64(rvu, blkaddr,
1212 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1213 VTAGSIZE_T4 | VTAG_STRIP);
1218 nix_ctx_free(rvu, pfvf);
1222 /* Set macaddr of this PF/VF */
1223 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1225 /* set SQB size info */
1226 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1227 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1228 rsp->rx_chan_base = pfvf->rx_chan_base;
1229 rsp->tx_chan_base = pfvf->tx_chan_base;
1230 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1231 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1232 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1233 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1234 /* Get HW supported stat count */
1235 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1236 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1237 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1238 /* Get count of CQ IRQs and error IRQs supported per LF */
1239 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1240 rsp->qints = ((cfg >> 12) & 0xFFF);
1241 rsp->cints = ((cfg >> 24) & 0xFFF);
1242 rsp->cgx_links = hw->cgx_links;
1243 rsp->lbk_links = hw->lbk_links;
1244 rsp->sdp_links = hw->sdp_links;
1249 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1250 struct msg_rsp *rsp)
1252 struct rvu_hwinfo *hw = rvu->hw;
1253 u16 pcifunc = req->hdr.pcifunc;
1254 struct rvu_block *block;
1255 int blkaddr, nixlf, err;
1256 struct rvu_pfvf *pfvf;
1258 pfvf = rvu_get_pfvf(rvu, pcifunc);
1259 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1260 if (!pfvf->nixlf || blkaddr < 0)
1261 return NIX_AF_ERR_AF_LF_INVALID;
1263 block = &hw->block[blkaddr];
1264 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1266 return NIX_AF_ERR_AF_LF_INVALID;
1268 if (req->flags & NIX_LF_DISABLE_FLOWS)
1269 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1271 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1273 /* Free any tx vtag def entries used by this NIX LF */
1274 if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1275 nix_free_tx_vtag_entries(rvu, pcifunc);
1277 nix_interface_deinit(rvu, pcifunc, nixlf);
1279 /* Reset this NIX LF */
1280 err = rvu_lf_reset(rvu, block, nixlf);
1282 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1283 block->addr - BLKADDR_NIX0, nixlf);
1284 return NIX_AF_ERR_LF_RESET;
1287 nix_ctx_free(rvu, pfvf);
1292 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1293 struct nix_mark_format_cfg *req,
1294 struct nix_mark_format_cfg_rsp *rsp)
1296 u16 pcifunc = req->hdr.pcifunc;
1297 struct nix_hw *nix_hw;
1298 struct rvu_pfvf *pfvf;
1302 pfvf = rvu_get_pfvf(rvu, pcifunc);
1303 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1304 if (!pfvf->nixlf || blkaddr < 0)
1305 return NIX_AF_ERR_AF_LF_INVALID;
1307 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1311 cfg = (((u32)req->offset & 0x7) << 16) |
1312 (((u32)req->y_mask & 0xF) << 12) |
1313 (((u32)req->y_val & 0xF) << 8) |
1314 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1316 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1318 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1319 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1320 return NIX_AF_ERR_MARK_CFG_FAIL;
1323 rsp->mark_format_idx = rc;
1327 /* Disable shaping of pkts by a scheduler queue
1328 * at a given scheduler level.
1330 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1333 u64 cir_reg = 0, pir_reg = 0;
1337 case NIX_TXSCH_LVL_TL1:
1338 cir_reg = NIX_AF_TL1X_CIR(schq);
1339 pir_reg = 0; /* PIR not available at TL1 */
1341 case NIX_TXSCH_LVL_TL2:
1342 cir_reg = NIX_AF_TL2X_CIR(schq);
1343 pir_reg = NIX_AF_TL2X_PIR(schq);
1345 case NIX_TXSCH_LVL_TL3:
1346 cir_reg = NIX_AF_TL3X_CIR(schq);
1347 pir_reg = NIX_AF_TL3X_PIR(schq);
1349 case NIX_TXSCH_LVL_TL4:
1350 cir_reg = NIX_AF_TL4X_CIR(schq);
1351 pir_reg = NIX_AF_TL4X_PIR(schq);
1357 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1358 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1362 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1363 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1366 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1369 struct rvu_hwinfo *hw = rvu->hw;
1372 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1375 /* Reset TL4's SDP link config */
1376 if (lvl == NIX_TXSCH_LVL_TL4)
1377 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1379 if (lvl != NIX_TXSCH_LVL_TL2)
1382 /* Reset TL2's CGX or LBK link config */
1383 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1384 rvu_write64(rvu, blkaddr,
1385 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1388 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1390 struct rvu_hwinfo *hw = rvu->hw;
1391 int pf = rvu_get_pf(pcifunc);
1392 u8 cgx_id = 0, lmac_id = 0;
1394 if (is_afvf(pcifunc)) {/* LBK links */
1395 return hw->cgx_links;
1396 } else if (is_pf_cgxmapped(rvu, pf)) {
1397 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1398 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1402 return hw->cgx_links + hw->lbk_links;
1405 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1406 int link, int *start, int *end)
1408 struct rvu_hwinfo *hw = rvu->hw;
1409 int pf = rvu_get_pf(pcifunc);
1411 if (is_afvf(pcifunc)) { /* LBK links */
1412 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1413 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1414 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1415 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1416 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1417 } else { /* SDP link */
1418 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1419 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1420 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1424 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1425 struct nix_hw *nix_hw,
1426 struct nix_txsch_alloc_req *req)
1428 struct rvu_hwinfo *hw = rvu->hw;
1429 int schq, req_schq, free_cnt;
1430 struct nix_txsch *txsch;
1431 int link, start, end;
1433 txsch = &nix_hw->txsch[lvl];
1434 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1439 link = nix_get_tx_link(rvu, pcifunc);
1441 /* For traffic aggregating scheduler level, one queue is enough */
1442 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1444 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1448 /* Get free SCHQ count and check if request can be accomodated */
1449 if (hw->cap.nix_fixed_txschq_mapping) {
1450 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1451 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1452 if (end <= txsch->schq.max && schq < end &&
1453 !test_bit(schq, txsch->schq.bmap))
1458 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1461 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1462 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1464 /* If contiguous queues are needed, check for availability */
1465 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1466 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1467 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1472 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1473 struct nix_txsch_alloc_rsp *rsp,
1474 int lvl, int start, int end)
1476 struct rvu_hwinfo *hw = rvu->hw;
1477 u16 pcifunc = rsp->hdr.pcifunc;
1480 /* For traffic aggregating levels, queue alloc is based
1481 * on transmit link to which PF_FUNC is mapped to.
1483 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1484 /* A single TL queue is allocated */
1485 if (rsp->schq_contig[lvl]) {
1486 rsp->schq_contig[lvl] = 1;
1487 rsp->schq_contig_list[lvl][0] = start;
1490 /* Both contig and non-contig reqs doesn't make sense here */
1491 if (rsp->schq_contig[lvl])
1494 if (rsp->schq[lvl]) {
1496 rsp->schq_list[lvl][0] = start;
1501 /* Adjust the queue request count if HW supports
1502 * only one queue per level configuration.
1504 if (hw->cap.nix_fixed_txschq_mapping) {
1505 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1507 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1508 rsp->schq_contig[lvl] = 0;
1513 if (rsp->schq_contig[lvl]) {
1514 rsp->schq_contig[lvl] = 1;
1515 set_bit(schq, txsch->schq.bmap);
1516 rsp->schq_contig_list[lvl][0] = schq;
1518 } else if (rsp->schq[lvl]) {
1520 set_bit(schq, txsch->schq.bmap);
1521 rsp->schq_list[lvl][0] = schq;
1526 /* Allocate contiguous queue indices requesty first */
1527 if (rsp->schq_contig[lvl]) {
1528 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1529 txsch->schq.max, start,
1530 rsp->schq_contig[lvl], 0);
1532 rsp->schq_contig[lvl] = 0;
1533 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1534 set_bit(schq, txsch->schq.bmap);
1535 rsp->schq_contig_list[lvl][idx] = schq;
1540 /* Allocate non-contiguous queue indices */
1541 if (rsp->schq[lvl]) {
1543 for (schq = start; schq < end; schq++) {
1544 if (!test_bit(schq, txsch->schq.bmap)) {
1545 set_bit(schq, txsch->schq.bmap);
1546 rsp->schq_list[lvl][idx++] = schq;
1548 if (idx == rsp->schq[lvl])
1551 /* Update how many were allocated */
1552 rsp->schq[lvl] = idx;
1556 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1557 struct nix_txsch_alloc_req *req,
1558 struct nix_txsch_alloc_rsp *rsp)
1560 struct rvu_hwinfo *hw = rvu->hw;
1561 u16 pcifunc = req->hdr.pcifunc;
1562 int link, blkaddr, rc = 0;
1563 int lvl, idx, start, end;
1564 struct nix_txsch *txsch;
1565 struct rvu_pfvf *pfvf;
1566 struct nix_hw *nix_hw;
1570 pfvf = rvu_get_pfvf(rvu, pcifunc);
1571 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1572 if (!pfvf->nixlf || blkaddr < 0)
1573 return NIX_AF_ERR_AF_LF_INVALID;
1575 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1579 mutex_lock(&rvu->rsrc_lock);
1581 /* Check if request is valid as per HW capabilities
1582 * and can be accomodated.
1584 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1585 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1590 /* Allocate requested Tx scheduler queues */
1591 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1592 txsch = &nix_hw->txsch[lvl];
1593 pfvf_map = txsch->pfvf_map;
1595 if (!req->schq[lvl] && !req->schq_contig[lvl])
1598 rsp->schq[lvl] = req->schq[lvl];
1599 rsp->schq_contig[lvl] = req->schq_contig[lvl];
1601 link = nix_get_tx_link(rvu, pcifunc);
1603 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1606 } else if (hw->cap.nix_fixed_txschq_mapping) {
1607 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1610 end = txsch->schq.max;
1613 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1615 /* Reset queue config */
1616 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1617 schq = rsp->schq_contig_list[lvl][idx];
1618 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1619 NIX_TXSCHQ_CFG_DONE))
1620 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1621 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1622 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1625 for (idx = 0; idx < req->schq[lvl]; idx++) {
1626 schq = rsp->schq_list[lvl][idx];
1627 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1628 NIX_TXSCHQ_CFG_DONE))
1629 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1630 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1631 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1635 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1636 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1637 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1638 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1639 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1642 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1644 mutex_unlock(&rvu->rsrc_lock);
1648 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1649 int smq, u16 pcifunc, int nixlf)
1651 int pf = rvu_get_pf(pcifunc);
1652 u8 cgx_id = 0, lmac_id = 0;
1653 int err, restore_tx_en = 0;
1656 /* enable cgx tx if disabled */
1657 if (is_pf_cgxmapped(rvu, pf)) {
1658 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1659 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1663 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1664 /* Do SMQ flush and set enqueue xoff */
1665 cfg |= BIT_ULL(50) | BIT_ULL(49);
1666 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1668 /* Disable backpressure from physical link,
1669 * otherwise SMQ flush may stall.
1671 rvu_cgx_enadis_rx_bp(rvu, pf, false);
1673 /* Wait for flush to complete */
1674 err = rvu_poll_reg(rvu, blkaddr,
1675 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1678 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1680 rvu_cgx_enadis_rx_bp(rvu, pf, true);
1681 /* restore cgx tx state */
1683 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1686 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1688 int blkaddr, nixlf, lvl, schq, err;
1689 struct rvu_hwinfo *hw = rvu->hw;
1690 struct nix_txsch *txsch;
1691 struct nix_hw *nix_hw;
1693 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1695 return NIX_AF_ERR_AF_LF_INVALID;
1697 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1701 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1703 return NIX_AF_ERR_AF_LF_INVALID;
1705 /* Disable TL2/3 queue links before SMQ flush*/
1706 mutex_lock(&rvu->rsrc_lock);
1707 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1708 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1711 txsch = &nix_hw->txsch[lvl];
1712 for (schq = 0; schq < txsch->schq.max; schq++) {
1713 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1715 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1720 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1721 for (schq = 0; schq < txsch->schq.max; schq++) {
1722 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1724 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1727 /* Now free scheduler queues to free pool */
1728 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1729 /* TLs above aggregation level are shared across all PF
1730 * and it's VFs, hence skip freeing them.
1732 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1735 txsch = &nix_hw->txsch[lvl];
1736 for (schq = 0; schq < txsch->schq.max; schq++) {
1737 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1739 rvu_free_rsrc(&txsch->schq, schq);
1740 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1743 mutex_unlock(&rvu->rsrc_lock);
1745 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1746 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1747 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1749 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1754 static int nix_txschq_free_one(struct rvu *rvu,
1755 struct nix_txsch_free_req *req)
1757 struct rvu_hwinfo *hw = rvu->hw;
1758 u16 pcifunc = req->hdr.pcifunc;
1759 int lvl, schq, nixlf, blkaddr;
1760 struct nix_txsch *txsch;
1761 struct nix_hw *nix_hw;
1764 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1766 return NIX_AF_ERR_AF_LF_INVALID;
1768 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1772 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1774 return NIX_AF_ERR_AF_LF_INVALID;
1776 lvl = req->schq_lvl;
1778 txsch = &nix_hw->txsch[lvl];
1780 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1783 pfvf_map = txsch->pfvf_map;
1784 mutex_lock(&rvu->rsrc_lock);
1786 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1787 mutex_unlock(&rvu->rsrc_lock);
1791 /* Flush if it is a SMQ. Onus of disabling
1792 * TL2/3 queue links before SMQ flush is on user
1794 if (lvl == NIX_TXSCH_LVL_SMQ)
1795 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1797 /* Free the resource */
1798 rvu_free_rsrc(&txsch->schq, schq);
1799 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1800 mutex_unlock(&rvu->rsrc_lock);
1803 return NIX_AF_ERR_TLX_INVALID;
1806 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1807 struct nix_txsch_free_req *req,
1808 struct msg_rsp *rsp)
1810 if (req->flags & TXSCHQ_FREE_ALL)
1811 return nix_txschq_free(rvu, req->hdr.pcifunc);
1813 return nix_txschq_free_one(rvu, req);
1816 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1817 int lvl, u64 reg, u64 regval)
1819 u64 regbase = reg & 0xFFFF;
1822 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1825 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1826 /* Check if this schq belongs to this PF/VF or not */
1827 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1830 parent = (regval >> 16) & 0x1FF;
1831 /* Validate MDQ's TL4 parent */
1832 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1833 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1836 /* Validate TL4's TL3 parent */
1837 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1838 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1841 /* Validate TL3's TL2 parent */
1842 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1843 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1846 /* Validate TL2's TL1 parent */
1847 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1848 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1854 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1858 if (hw->cap.nix_shaping)
1861 /* If shaping and coloring is not supported, then
1862 * *_CIR and *_PIR registers should not be configured.
1864 regbase = reg & 0xFFFF;
1867 case NIX_TXSCH_LVL_TL1:
1868 if (regbase == NIX_AF_TL1X_CIR(0))
1871 case NIX_TXSCH_LVL_TL2:
1872 if (regbase == NIX_AF_TL2X_CIR(0) ||
1873 regbase == NIX_AF_TL2X_PIR(0))
1876 case NIX_TXSCH_LVL_TL3:
1877 if (regbase == NIX_AF_TL3X_CIR(0) ||
1878 regbase == NIX_AF_TL3X_PIR(0))
1881 case NIX_TXSCH_LVL_TL4:
1882 if (regbase == NIX_AF_TL4X_CIR(0) ||
1883 regbase == NIX_AF_TL4X_PIR(0))
1890 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1891 u16 pcifunc, int blkaddr)
1896 schq = nix_get_tx_link(rvu, pcifunc);
1897 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1898 /* Skip if PF has already done the config */
1899 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1901 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1902 (TXSCH_TL1_DFLT_RR_PRIO << 1));
1903 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1904 TXSCH_TL1_DFLT_RR_QTM);
1905 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1906 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1909 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1910 struct nix_txschq_config *req,
1911 struct msg_rsp *rsp)
1913 struct rvu_hwinfo *hw = rvu->hw;
1914 u16 pcifunc = req->hdr.pcifunc;
1915 u64 reg, regval, schq_regbase;
1916 struct nix_txsch *txsch;
1917 struct nix_hw *nix_hw;
1918 int blkaddr, idx, err;
1922 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1923 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1924 return NIX_AF_INVAL_TXSCHQ_CFG;
1926 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1930 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1934 txsch = &nix_hw->txsch[req->lvl];
1935 pfvf_map = txsch->pfvf_map;
1937 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1938 pcifunc & RVU_PFVF_FUNC_MASK) {
1939 mutex_lock(&rvu->rsrc_lock);
1940 if (req->lvl == NIX_TXSCH_LVL_TL1)
1941 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1942 mutex_unlock(&rvu->rsrc_lock);
1946 for (idx = 0; idx < req->num_regs; idx++) {
1947 reg = req->reg[idx];
1948 regval = req->regval[idx];
1949 schq_regbase = reg & 0xFFFF;
1951 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1952 txsch->lvl, reg, regval))
1953 return NIX_AF_INVAL_TXSCHQ_CFG;
1955 /* Check if shaping and coloring is supported */
1956 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1959 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1960 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1961 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1963 regval &= ~(0x7FULL << 24);
1964 regval |= ((u64)nixlf << 24);
1967 /* Clear 'BP_ENA' config, if it's not allowed */
1968 if (!hw->cap.nix_tx_link_bp) {
1969 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1970 (schq_regbase & 0xFF00) ==
1971 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1972 regval &= ~BIT_ULL(13);
1975 /* Mark config as done for TL1 by PF */
1976 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1977 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1978 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1979 mutex_lock(&rvu->rsrc_lock);
1980 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
1981 NIX_TXSCHQ_CFG_DONE);
1982 mutex_unlock(&rvu->rsrc_lock);
1985 /* SMQ flush is special hence split register writes such
1986 * that flush first and write rest of the bits later.
1988 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1989 (regval & BIT_ULL(49))) {
1990 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1991 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1992 regval &= ~BIT_ULL(49);
1994 rvu_write64(rvu, blkaddr, reg, regval);
2000 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2001 struct nix_vtag_config *req)
2003 u64 regval = req->vtag_size;
2005 if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2006 req->vtag_size > VTAGSIZE_T8)
2009 /* RX VTAG Type 7 reserved for vf vlan */
2010 if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2011 return NIX_AF_ERR_RX_VTAG_INUSE;
2013 if (req->rx.capture_vtag)
2014 regval |= BIT_ULL(5);
2015 if (req->rx.strip_vtag)
2016 regval |= BIT_ULL(4);
2018 rvu_write64(rvu, blkaddr,
2019 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2023 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2024 u16 pcifunc, int index)
2026 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2027 struct nix_txvlan *vlan = &nix_hw->txvlan;
2029 if (vlan->entry2pfvf_map[index] != pcifunc)
2030 return NIX_AF_ERR_PARAM;
2032 rvu_write64(rvu, blkaddr,
2033 NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2034 rvu_write64(rvu, blkaddr,
2035 NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2037 vlan->entry2pfvf_map[index] = 0;
2038 rvu_free_rsrc(&vlan->rsrc, index);
2043 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2045 struct nix_txvlan *vlan;
2046 struct nix_hw *nix_hw;
2049 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2053 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2054 vlan = &nix_hw->txvlan;
2056 mutex_lock(&vlan->rsrc_lock);
2057 /* Scan all the entries and free the ones mapped to 'pcifunc' */
2058 for (index = 0; index < vlan->rsrc.max; index++) {
2059 if (vlan->entry2pfvf_map[index] == pcifunc)
2060 nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2062 mutex_unlock(&vlan->rsrc_lock);
2065 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2068 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2069 struct nix_txvlan *vlan = &nix_hw->txvlan;
2073 mutex_lock(&vlan->rsrc_lock);
2075 index = rvu_alloc_rsrc(&vlan->rsrc);
2077 mutex_unlock(&vlan->rsrc_lock);
2081 mutex_unlock(&vlan->rsrc_lock);
2083 regval = size ? vtag : vtag << 32;
2085 rvu_write64(rvu, blkaddr,
2086 NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2087 rvu_write64(rvu, blkaddr,
2088 NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2093 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2094 struct nix_vtag_config *req)
2096 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2097 struct nix_txvlan *vlan = &nix_hw->txvlan;
2098 u16 pcifunc = req->hdr.pcifunc;
2099 int idx0 = req->tx.vtag0_idx;
2100 int idx1 = req->tx.vtag1_idx;
2103 if (req->tx.free_vtag0 && req->tx.free_vtag1)
2104 if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2105 vlan->entry2pfvf_map[idx1] != pcifunc)
2106 return NIX_AF_ERR_PARAM;
2108 mutex_lock(&vlan->rsrc_lock);
2110 if (req->tx.free_vtag0) {
2111 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2116 if (req->tx.free_vtag1)
2117 err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2120 mutex_unlock(&vlan->rsrc_lock);
2124 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2125 struct nix_vtag_config *req,
2126 struct nix_vtag_config_rsp *rsp)
2128 struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2129 struct nix_txvlan *vlan = &nix_hw->txvlan;
2130 u16 pcifunc = req->hdr.pcifunc;
2132 if (req->tx.cfg_vtag0) {
2134 nix_tx_vtag_alloc(rvu, blkaddr,
2135 req->tx.vtag0, req->vtag_size);
2137 if (rsp->vtag0_idx < 0)
2138 return NIX_AF_ERR_TX_VTAG_NOSPC;
2140 vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2143 if (req->tx.cfg_vtag1) {
2145 nix_tx_vtag_alloc(rvu, blkaddr,
2146 req->tx.vtag1, req->vtag_size);
2148 if (rsp->vtag1_idx < 0)
2151 vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2157 if (req->tx.cfg_vtag0)
2158 nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2160 return NIX_AF_ERR_TX_VTAG_NOSPC;
2163 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2164 struct nix_vtag_config *req,
2165 struct nix_vtag_config_rsp *rsp)
2167 u16 pcifunc = req->hdr.pcifunc;
2168 int blkaddr, nixlf, err;
2170 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2174 if (req->cfg_type) {
2175 /* rx vtag configuration */
2176 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2178 return NIX_AF_ERR_PARAM;
2180 /* tx vtag configuration */
2181 if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2182 (req->tx.free_vtag0 || req->tx.free_vtag1))
2183 return NIX_AF_ERR_PARAM;
2185 if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2186 return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2188 if (req->tx.free_vtag0 || req->tx.free_vtag1)
2189 return nix_tx_vtag_decfg(rvu, blkaddr, req);
2195 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2196 int mce, u8 op, u16 pcifunc, int next, bool eol)
2198 struct nix_aq_enq_req aq_req;
2201 aq_req.hdr.pcifunc = 0;
2202 aq_req.ctype = NIX_AQ_CTYPE_MCE;
2206 /* Forward bcast pkts to RQ0, RSS not needed */
2208 aq_req.mce.index = 0;
2209 aq_req.mce.eol = eol;
2210 aq_req.mce.pf_func = pcifunc;
2211 aq_req.mce.next = next;
2213 /* All fields valid */
2214 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
2216 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2218 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2219 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2225 static int nix_update_mce_list(struct nix_mce_list *mce_list,
2226 u16 pcifunc, bool add)
2228 struct mce *mce, *tail = NULL;
2229 bool delete = false;
2231 /* Scan through the current list */
2232 hlist_for_each_entry(mce, &mce_list->head, node) {
2233 /* If already exists, then delete */
2234 if (mce->pcifunc == pcifunc && !add) {
2242 hlist_del(&mce->node);
2251 /* Add a new one to the list, at the tail */
2252 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2255 mce->pcifunc = pcifunc;
2257 hlist_add_head(&mce->node, &mce_list->head);
2259 hlist_add_behind(&mce->node, &tail->node);
2264 int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
2266 int err = 0, idx, next_idx, last_idx;
2267 struct nix_mce_list *mce_list;
2268 struct nix_mcast *mcast;
2269 struct nix_hw *nix_hw;
2270 struct rvu_pfvf *pfvf;
2274 /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
2275 if (is_afvf(pcifunc))
2278 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2282 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2286 mcast = &nix_hw->mcast;
2288 /* Get this PF/VF func's MCE index */
2289 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2290 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2292 mce_list = &pfvf->bcast_mce_list;
2293 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
2295 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2296 __func__, idx, mce_list->max,
2297 pcifunc >> RVU_PFVF_PF_SHIFT);
2301 mutex_lock(&mcast->mce_lock);
2303 err = nix_update_mce_list(mce_list, pcifunc, add);
2307 /* Disable MCAM entry in NPC */
2308 if (!mce_list->count) {
2309 rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
2313 /* Dump the updated list to HW */
2314 idx = pfvf->bcast_mce_idx;
2315 last_idx = idx + mce_list->count - 1;
2316 hlist_for_each_entry(mce, &mce_list->head, node) {
2321 /* EOL should be set in last MCE */
2322 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2323 mce->pcifunc, next_idx,
2324 (next_idx > last_idx) ? true : false);
2331 mutex_unlock(&mcast->mce_lock);
2335 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2337 struct nix_mcast *mcast = &nix_hw->mcast;
2338 int err, pf, numvfs, idx;
2339 struct rvu_pfvf *pfvf;
2343 /* Skip PF0 (i.e AF) */
2344 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2345 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2346 /* If PF is not enabled, nothing to do */
2347 if (!((cfg >> 20) & 0x01))
2349 /* Get numVFs attached to this PF */
2350 numvfs = (cfg >> 12) & 0xFF;
2352 pfvf = &rvu->pf[pf];
2354 /* This NIX0/1 block mapped to PF ? */
2355 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2358 /* Save the start MCE */
2359 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2361 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2363 for (idx = 0; idx < (numvfs + 1); idx++) {
2364 /* idx-0 is for PF, followed by VFs */
2365 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2367 /* Add dummy entries now, so that we don't have to check
2368 * for whether AQ_OP should be INIT/WRITE later on.
2369 * Will be updated when a NIXLF is attached/detached to
2372 err = nix_blk_setup_mce(rvu, nix_hw,
2373 pfvf->bcast_mce_idx + idx,
2383 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2385 struct nix_mcast *mcast = &nix_hw->mcast;
2386 struct rvu_hwinfo *hw = rvu->hw;
2389 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2390 size = (1ULL << size);
2392 /* Alloc memory for multicast/mirror replication entries */
2393 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2394 (256UL << MC_TBL_SIZE), size);
2398 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2399 (u64)mcast->mce_ctx->iova);
2401 /* Set max list length equal to max no of VFs per PF + PF itself */
2402 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2403 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2405 /* Alloc memory for multicast replication buffers */
2406 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2407 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2408 (8UL << MC_BUF_CNT), size);
2412 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2413 (u64)mcast->mcast_buf->iova);
2415 /* Alloc pkind for NIX internal RX multicast/mirror replay */
2416 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2418 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2419 BIT_ULL(63) | (mcast->replay_pkind << 24) |
2420 BIT_ULL(20) | MC_BUF_CNT);
2422 mutex_init(&mcast->mce_lock);
2424 return nix_setup_bcast_tables(rvu, nix_hw);
2427 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2429 struct nix_txvlan *vlan = &nix_hw->txvlan;
2432 /* Allocate resource bimap for tx vtag def registers*/
2433 vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2434 err = rvu_alloc_bitmap(&vlan->rsrc);
2438 /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2439 vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
2440 sizeof(u16), GFP_KERNEL);
2441 if (!vlan->entry2pfvf_map)
2444 mutex_init(&vlan->rsrc_lock);
2448 kfree(vlan->rsrc.bmap);
2452 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2454 struct nix_txsch *txsch;
2458 /* Get scheduler queue count of each type and alloc
2459 * bitmap for each for alloc/free/attach operations.
2461 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2462 txsch = &nix_hw->txsch[lvl];
2465 case NIX_TXSCH_LVL_SMQ:
2466 reg = NIX_AF_MDQ_CONST;
2468 case NIX_TXSCH_LVL_TL4:
2469 reg = NIX_AF_TL4_CONST;
2471 case NIX_TXSCH_LVL_TL3:
2472 reg = NIX_AF_TL3_CONST;
2474 case NIX_TXSCH_LVL_TL2:
2475 reg = NIX_AF_TL2_CONST;
2477 case NIX_TXSCH_LVL_TL1:
2478 reg = NIX_AF_TL1_CONST;
2481 cfg = rvu_read64(rvu, blkaddr, reg);
2482 txsch->schq.max = cfg & 0xFFFF;
2483 err = rvu_alloc_bitmap(&txsch->schq);
2487 /* Allocate memory for scheduler queues to
2488 * PF/VF pcifunc mapping info.
2490 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2491 sizeof(u32), GFP_KERNEL);
2492 if (!txsch->pfvf_map)
2494 for (schq = 0; schq < txsch->schq.max; schq++)
2495 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2500 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2501 int blkaddr, u32 cfg)
2505 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2506 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2509 if (fmt_idx >= nix_hw->mark_format.total)
2512 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2513 nix_hw->mark_format.cfg[fmt_idx] = cfg;
2514 nix_hw->mark_format.in_use++;
2518 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2522 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
2523 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
2524 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
2525 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
2526 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
2527 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
2528 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
2529 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
2530 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2535 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2536 nix_hw->mark_format.total = (u8)total;
2537 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2539 if (!nix_hw->mark_format.cfg)
2541 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2542 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2544 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2551 static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2553 /* CN10K supports LBK FIFO size 72 KB */
2554 if (rvu->hw->lbk_bufsize == 0x12000)
2555 *max_mtu = CN10K_LBK_LINK_MAX_FRS;
2557 *max_mtu = NIC_HW_MAX_FRS;
2560 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2562 /* RPM supports FIFO len 128 KB */
2563 if (rvu_cgx_get_fifolen(rvu) == 0x20000)
2564 *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
2566 *max_mtu = NIC_HW_MAX_FRS;
2569 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
2570 struct nix_hw_info *rsp)
2572 u16 pcifunc = req->hdr.pcifunc;
2575 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2577 return NIX_AF_ERR_AF_LF_INVALID;
2579 if (is_afvf(pcifunc))
2580 rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
2582 rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
2584 rsp->min_mtu = NIC_HW_MIN_FRS;
2588 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2589 struct msg_rsp *rsp)
2591 u16 pcifunc = req->hdr.pcifunc;
2592 int i, nixlf, blkaddr, err;
2595 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2599 /* Get stats count supported by HW */
2600 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2602 /* Reset tx stats */
2603 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2604 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2606 /* Reset rx stats */
2607 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2608 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2613 /* Returns the ALG index to be set into NPC_RX_ACTION */
2614 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2618 /* Scan over exiting algo entries to find a match */
2619 for (i = 0; i < nix_hw->flowkey.in_use; i++)
2620 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2626 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2628 int idx, nr_field, key_off, field_marker, keyoff_marker;
2629 int max_key_off, max_bit_pos, group_member;
2630 struct nix_rx_flowkey_alg *field;
2631 struct nix_rx_flowkey_alg tmp;
2632 u32 key_type, valid_key;
2633 int l4_key_offset = 0;
2638 #define FIELDS_PER_ALG 5
2639 #define MAX_KEY_OFF 40
2640 /* Clear all fields */
2641 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2643 /* Each of the 32 possible flow key algorithm definitions should
2644 * fall into above incremental config (except ALG0). Otherwise a
2645 * single NPC MCAM entry is not sufficient for supporting RSS.
2647 * If a different definition or combination needed then NPC MCAM
2648 * has to be programmed to filter such pkts and it's action should
2649 * point to this definition to calculate flowtag or hash.
2651 * The `for loop` goes over _all_ protocol field and the following
2652 * variables depicts the state machine forward progress logic.
2654 * keyoff_marker - Enabled when hash byte length needs to be accounted
2655 * in field->key_offset update.
2656 * field_marker - Enabled when a new field needs to be selected.
2657 * group_member - Enabled when protocol is part of a group.
2660 keyoff_marker = 0; max_key_off = 0; group_member = 0;
2661 nr_field = 0; key_off = 0; field_marker = 1;
2662 field = &tmp; max_bit_pos = fls(flow_cfg);
2664 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2665 key_off < MAX_KEY_OFF; idx++) {
2666 key_type = BIT(idx);
2667 valid_key = flow_cfg & key_type;
2668 /* Found a field marker, reset the field values */
2670 memset(&tmp, 0, sizeof(tmp));
2672 field_marker = true;
2673 keyoff_marker = true;
2675 case NIX_FLOW_KEY_TYPE_PORT:
2676 field->sel_chan = true;
2677 /* This should be set to 1, when SEL_CHAN is set */
2680 case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
2681 field->lid = NPC_LID_LC;
2682 field->hdr_offset = 9; /* offset */
2683 field->bytesm1 = 0; /* 1 byte */
2684 field->ltype_match = NPC_LT_LC_IP;
2685 field->ltype_mask = 0xF;
2687 case NIX_FLOW_KEY_TYPE_IPV4:
2688 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2689 field->lid = NPC_LID_LC;
2690 field->ltype_match = NPC_LT_LC_IP;
2691 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2692 field->lid = NPC_LID_LG;
2693 field->ltype_match = NPC_LT_LG_TU_IP;
2695 field->hdr_offset = 12; /* SIP offset */
2696 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2697 field->ltype_mask = 0xF; /* Match only IPv4 */
2698 keyoff_marker = false;
2700 case NIX_FLOW_KEY_TYPE_IPV6:
2701 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2702 field->lid = NPC_LID_LC;
2703 field->ltype_match = NPC_LT_LC_IP6;
2704 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2705 field->lid = NPC_LID_LG;
2706 field->ltype_match = NPC_LT_LG_TU_IP6;
2708 field->hdr_offset = 8; /* SIP offset */
2709 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2710 field->ltype_mask = 0xF; /* Match only IPv6 */
2712 case NIX_FLOW_KEY_TYPE_TCP:
2713 case NIX_FLOW_KEY_TYPE_UDP:
2714 case NIX_FLOW_KEY_TYPE_SCTP:
2715 case NIX_FLOW_KEY_TYPE_INNR_TCP:
2716 case NIX_FLOW_KEY_TYPE_INNR_UDP:
2717 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2718 field->lid = NPC_LID_LD;
2719 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2720 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2721 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2722 field->lid = NPC_LID_LH;
2723 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2725 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2726 * so no need to change the ltype_match, just change
2727 * the lid for inner protocols
2729 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2730 (int)NPC_LT_LH_TU_TCP);
2731 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2732 (int)NPC_LT_LH_TU_UDP);
2733 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2734 (int)NPC_LT_LH_TU_SCTP);
2736 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2737 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2739 field->ltype_match |= NPC_LT_LD_TCP;
2740 group_member = true;
2741 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2742 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2744 field->ltype_match |= NPC_LT_LD_UDP;
2745 group_member = true;
2746 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2747 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2749 field->ltype_match |= NPC_LT_LD_SCTP;
2750 group_member = true;
2752 field->ltype_mask = ~field->ltype_match;
2753 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2754 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2755 /* Handle the case where any of the group item
2756 * is enabled in the group but not the final one
2760 group_member = false;
2763 field_marker = false;
2764 keyoff_marker = false;
2767 /* TCP/UDP/SCTP and ESP/AH falls at same offset so
2768 * remember the TCP key offset of 40 byte hash key.
2770 if (key_type == NIX_FLOW_KEY_TYPE_TCP)
2771 l4_key_offset = key_off;
2773 case NIX_FLOW_KEY_TYPE_NVGRE:
2774 field->lid = NPC_LID_LD;
2775 field->hdr_offset = 4; /* VSID offset */
2777 field->ltype_match = NPC_LT_LD_NVGRE;
2778 field->ltype_mask = 0xF;
2780 case NIX_FLOW_KEY_TYPE_VXLAN:
2781 case NIX_FLOW_KEY_TYPE_GENEVE:
2782 field->lid = NPC_LID_LE;
2784 field->hdr_offset = 4;
2785 field->ltype_mask = 0xF;
2786 field_marker = false;
2787 keyoff_marker = false;
2789 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2790 field->ltype_match |= NPC_LT_LE_VXLAN;
2791 group_member = true;
2794 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2795 field->ltype_match |= NPC_LT_LE_GENEVE;
2796 group_member = true;
2799 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2801 field->ltype_mask = ~field->ltype_match;
2802 field_marker = true;
2803 keyoff_marker = true;
2805 group_member = false;
2809 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2810 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2811 field->lid = NPC_LID_LA;
2812 field->ltype_match = NPC_LT_LA_ETHER;
2813 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2814 field->lid = NPC_LID_LF;
2815 field->ltype_match = NPC_LT_LF_TU_ETHER;
2817 field->hdr_offset = 0;
2818 field->bytesm1 = 5; /* DMAC 6 Byte */
2819 field->ltype_mask = 0xF;
2821 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2822 field->lid = NPC_LID_LC;
2823 field->hdr_offset = 40; /* IPV6 hdr */
2824 field->bytesm1 = 0; /* 1 Byte ext hdr*/
2825 field->ltype_match = NPC_LT_LC_IP6_EXT;
2826 field->ltype_mask = 0xF;
2828 case NIX_FLOW_KEY_TYPE_GTPU:
2829 field->lid = NPC_LID_LE;
2830 field->hdr_offset = 4;
2831 field->bytesm1 = 3; /* 4 bytes TID*/
2832 field->ltype_match = NPC_LT_LE_GTPU;
2833 field->ltype_mask = 0xF;
2835 case NIX_FLOW_KEY_TYPE_VLAN:
2836 field->lid = NPC_LID_LB;
2837 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2838 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
2839 field->ltype_match = NPC_LT_LB_CTAG;
2840 field->ltype_mask = 0xF;
2841 field->fn_mask = 1; /* Mask out the first nibble */
2843 case NIX_FLOW_KEY_TYPE_AH:
2844 case NIX_FLOW_KEY_TYPE_ESP:
2845 field->hdr_offset = 0;
2846 field->bytesm1 = 7; /* SPI + sequence number */
2847 field->ltype_mask = 0xF;
2848 field->lid = NPC_LID_LE;
2849 field->ltype_match = NPC_LT_LE_ESP;
2850 if (key_type == NIX_FLOW_KEY_TYPE_AH) {
2851 field->lid = NPC_LID_LD;
2852 field->ltype_match = NPC_LT_LD_AH;
2853 field->hdr_offset = 4;
2854 keyoff_marker = false;
2860 /* Found a valid flow key type */
2862 /* Use the key offset of TCP/UDP/SCTP fields
2863 * for ESP/AH fields.
2865 if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
2866 key_type == NIX_FLOW_KEY_TYPE_AH)
2867 key_off = l4_key_offset;
2868 field->key_offset = key_off;
2869 memcpy(&alg[nr_field], field, sizeof(*field));
2870 max_key_off = max(max_key_off, field->bytesm1 + 1);
2872 /* Found a field marker, get the next field */
2877 /* Found a keyoff marker, update the new key_off */
2878 if (keyoff_marker) {
2879 key_off += max_key_off;
2883 /* Processed all the flow key types */
2884 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2887 return NIX_AF_ERR_RSS_NOSPC_FIELD;
2890 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2892 u64 field[FIELDS_PER_ALG];
2896 hw = get_nix_hw(rvu->hw, blkaddr);
2900 /* No room to add new flow hash algoritham */
2901 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2902 return NIX_AF_ERR_RSS_NOSPC_ALGO;
2904 /* Generate algo fields for the given flow_cfg */
2905 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2909 /* Update ALGX_FIELDX register with generated fields */
2910 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2911 rvu_write64(rvu, blkaddr,
2912 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2915 /* Store the flow_cfg for futher lookup */
2916 rc = hw->flowkey.in_use;
2917 hw->flowkey.flowkey[rc] = flow_cfg;
2918 hw->flowkey.in_use++;
2923 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2924 struct nix_rss_flowkey_cfg *req,
2925 struct nix_rss_flowkey_cfg_rsp *rsp)
2927 u16 pcifunc = req->hdr.pcifunc;
2928 int alg_idx, nixlf, blkaddr;
2929 struct nix_hw *nix_hw;
2932 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2936 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2940 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2941 /* Failed to get algo index from the exiting list, reserve new */
2943 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2948 rsp->alg_idx = alg_idx;
2949 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2950 alg_idx, req->mcam_index);
2954 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2956 u32 flowkey_cfg, minkey_cfg;
2959 /* Disable all flow key algx fieldx */
2960 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2961 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2962 rvu_write64(rvu, blkaddr,
2963 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2967 /* IPv4/IPv6 SIP/DIPs */
2968 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2969 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2973 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2974 minkey_cfg = flowkey_cfg;
2975 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2976 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2980 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2981 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2982 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2986 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2987 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2988 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2992 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2993 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2994 NIX_FLOW_KEY_TYPE_UDP;
2995 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2999 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3000 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3001 NIX_FLOW_KEY_TYPE_SCTP;
3002 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3006 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3007 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3008 NIX_FLOW_KEY_TYPE_SCTP;
3009 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3013 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3014 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3015 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3016 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3023 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3024 struct nix_set_mac_addr *req,
3025 struct msg_rsp *rsp)
3027 bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3028 u16 pcifunc = req->hdr.pcifunc;
3029 int blkaddr, nixlf, err;
3030 struct rvu_pfvf *pfvf;
3032 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3036 pfvf = rvu_get_pfvf(rvu, pcifunc);
3038 /* VF can't overwrite admin(PF) changes */
3039 if (from_vf && pfvf->pf_set_vf_cfg)
3042 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3044 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3045 pfvf->rx_chan_base, req->mac_addr);
3050 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3051 struct msg_req *req,
3052 struct nix_get_mac_addr_rsp *rsp)
3054 u16 pcifunc = req->hdr.pcifunc;
3055 struct rvu_pfvf *pfvf;
3057 if (!is_nixlf_attached(rvu, pcifunc))
3058 return NIX_AF_ERR_AF_LF_INVALID;
3060 pfvf = rvu_get_pfvf(rvu, pcifunc);
3062 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3067 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3068 struct msg_rsp *rsp)
3070 bool allmulti = false, disable_promisc = false;
3071 u16 pcifunc = req->hdr.pcifunc;
3072 int blkaddr, nixlf, err;
3073 struct rvu_pfvf *pfvf;
3075 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3079 pfvf = rvu_get_pfvf(rvu, pcifunc);
3081 if (req->mode & NIX_RX_MODE_PROMISC)
3083 else if (req->mode & NIX_RX_MODE_ALLMULTI)
3086 disable_promisc = true;
3088 if (disable_promisc)
3089 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
3091 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3093 pfvf->rx_chan_cnt, allmulti);
3097 static void nix_find_link_frs(struct rvu *rvu,
3098 struct nix_frs_cfg *req, u16 pcifunc)
3100 int pf = rvu_get_pf(pcifunc);
3101 struct rvu_pfvf *pfvf;
3106 /* Update with requester's min/max lengths */
3107 pfvf = rvu_get_pfvf(rvu, pcifunc);
3108 pfvf->maxlen = req->maxlen;
3109 if (req->update_minlen)
3110 pfvf->minlen = req->minlen;
3112 maxlen = req->maxlen;
3113 minlen = req->update_minlen ? req->minlen : 0;
3115 /* Get this PF's numVFs and starting hwvf */
3116 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3118 /* For each VF, compare requested max/minlen */
3119 for (vf = 0; vf < numvfs; vf++) {
3120 pfvf = &rvu->hwvf[hwvf + vf];
3121 if (pfvf->maxlen > maxlen)
3122 maxlen = pfvf->maxlen;
3123 if (req->update_minlen &&
3124 pfvf->minlen && pfvf->minlen < minlen)
3125 minlen = pfvf->minlen;
3128 /* Compare requested max/minlen with PF's max/minlen */
3129 pfvf = &rvu->pf[pf];
3130 if (pfvf->maxlen > maxlen)
3131 maxlen = pfvf->maxlen;
3132 if (req->update_minlen &&
3133 pfvf->minlen && pfvf->minlen < minlen)
3134 minlen = pfvf->minlen;
3136 /* Update the request with max/min PF's and it's VF's max/min */
3137 req->maxlen = maxlen;
3138 if (req->update_minlen)
3139 req->minlen = minlen;
3142 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3143 struct msg_rsp *rsp)
3145 struct rvu_hwinfo *hw = rvu->hw;
3146 u16 pcifunc = req->hdr.pcifunc;
3147 int pf = rvu_get_pf(pcifunc);
3148 int blkaddr, schq, link = -1;
3149 struct nix_txsch *txsch;
3150 u64 cfg, lmac_fifo_len;
3151 struct nix_hw *nix_hw;
3152 u8 cgx = 0, lmac = 0;
3155 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3157 return NIX_AF_ERR_AF_LF_INVALID;
3159 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3163 if (is_afvf(pcifunc))
3164 rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3166 rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3168 if (!req->sdp_link && req->maxlen > max_mtu)
3169 return NIX_AF_ERR_FRS_INVALID;
3171 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3172 return NIX_AF_ERR_FRS_INVALID;
3174 /* Check if requester wants to update SMQ's */
3175 if (!req->update_smq)
3178 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
3179 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3180 mutex_lock(&rvu->rsrc_lock);
3181 for (schq = 0; schq < txsch->schq.max; schq++) {
3182 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3184 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3185 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3186 if (req->update_minlen)
3187 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3188 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3190 mutex_unlock(&rvu->rsrc_lock);
3193 /* Check if config is for SDP link */
3194 if (req->sdp_link) {
3196 return NIX_AF_ERR_RX_LINK_INVALID;
3197 link = hw->cgx_links + hw->lbk_links;
3201 /* Check if the request is from CGX mapped RVU PF */
3202 if (is_pf_cgxmapped(rvu, pf)) {
3203 /* Get CGX and LMAC to which this PF is mapped and find link */
3204 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3205 link = (cgx * hw->lmac_per_cgx) + lmac;
3206 } else if (pf == 0) {
3207 /* For VFs of PF0 ingress is LBK port, so config LBK link */
3208 link = hw->cgx_links;
3212 return NIX_AF_ERR_RX_LINK_INVALID;
3214 nix_find_link_frs(rvu, req, pcifunc);
3217 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3218 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3219 if (req->update_minlen)
3220 cfg = (cfg & ~0xFFFFULL) | req->minlen;
3221 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3223 if (req->sdp_link || pf == 0)
3226 /* Update transmit credits for CGX links */
3228 rvu_cgx_get_fifolen(rvu) /
3229 cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3230 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
3231 cfg &= ~(0xFFFFFULL << 12);
3232 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
3233 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3237 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3238 struct msg_rsp *rsp)
3240 int nixlf, blkaddr, err;
3243 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3247 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3248 /* Set the interface configuration */
3249 if (req->len_verify & BIT(0))
3252 cfg &= ~BIT_ULL(41);
3254 if (req->len_verify & BIT(1))
3257 cfg &= ~BIT_ULL(40);
3259 if (req->csum_verify & BIT(0))
3262 cfg &= ~BIT_ULL(37);
3264 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3269 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3271 /* CN10k supports 72KB FIFO size and max packet size of 64k */
3272 if (rvu->hw->lbk_bufsize == 0x12000)
3273 return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3275 return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3278 static void nix_link_config(struct rvu *rvu, int blkaddr)
3280 struct rvu_hwinfo *hw = rvu->hw;
3281 int cgx, lmac_cnt, slink, link;
3282 u16 lbk_max_frs, lmac_max_frs;
3285 rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3286 rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3288 /* Set default min/max packet lengths allowed on NIX Rx links.
3290 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3291 * as undersize and report them to SW as error pkts, hence
3292 * setting it to 40 bytes.
3294 for (link = 0; link < hw->cgx_links; link++) {
3295 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3296 ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
3299 for (link = hw->cgx_links; link < hw->lbk_links; link++) {
3300 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3301 ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
3303 if (hw->sdp_links) {
3304 link = hw->cgx_links + hw->lbk_links;
3305 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3306 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3309 /* Set credits for Tx links assuming max packet length allowed.
3310 * This will be reconfigured based on MTU set for PF/VF.
3312 for (cgx = 0; cgx < hw->cgx; cgx++) {
3313 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3314 tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
3316 /* Enable credits and set credit pkt count to max allowed */
3317 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3318 slink = cgx * hw->lmac_per_cgx;
3319 for (link = slink; link < (slink + lmac_cnt); link++) {
3320 rvu_write64(rvu, blkaddr,
3321 NIX_AF_TX_LINKX_NORM_CREDIT(link),
3326 /* Set Tx credits for LBK link */
3327 slink = hw->cgx_links;
3328 for (link = slink; link < (slink + hw->lbk_links); link++) {
3329 tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
3330 /* Enable credits and set credit pkt count to max allowed */
3331 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3332 rvu_write64(rvu, blkaddr,
3333 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3337 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3342 /* Start X2P bus calibration */
3343 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3344 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3345 /* Wait for calibration to complete */
3346 err = rvu_poll_reg(rvu, blkaddr,
3347 NIX_AF_STATUS, BIT_ULL(10), false);
3349 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3353 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3354 /* Check if CGX devices are ready */
3355 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3356 /* Skip when cgx port is not available */
3357 if (!rvu_cgx_pdata(idx, rvu) ||
3358 (status & (BIT_ULL(16 + idx))))
3361 "CGX%d didn't respond to NIX X2P calibration\n", idx);
3365 /* Check if LBK is ready */
3366 if (!(status & BIT_ULL(19))) {
3368 "LBK didn't respond to NIX X2P calibration\n");
3372 /* Clear 'calibrate_x2p' bit */
3373 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3374 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3375 if (err || (status & 0x3FFULL))
3377 "NIX X2P calibration failed, status 0x%llx\n", status);
3383 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3388 /* Set admin queue endianness */
3389 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3392 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3395 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3398 /* Do not bypass NDC cache */
3399 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3401 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3402 /* Disable caching of SQB aka SQEs */
3405 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3407 /* Result structure can be followed by RQ/SQ/CQ context at
3408 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3409 * operation type. Alloc sufficient result memory for all operations.
3411 err = rvu_aq_alloc(rvu, &block->aq,
3412 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3413 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3417 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3418 rvu_write64(rvu, block->addr,
3419 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3423 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3425 const struct npc_lt_def_cfg *ltdefs;
3426 struct rvu_hwinfo *hw = rvu->hw;
3427 int blkaddr = nix_hw->blkaddr;
3428 struct rvu_block *block;
3432 block = &hw->block[blkaddr];
3434 if (is_rvu_96xx_B0(rvu)) {
3435 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3436 * internal state when conditional clocks are turned off.
3437 * Hence enable them.
3439 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3440 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3442 /* Set chan/link to backpressure TL3 instead of TL2 */
3443 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3445 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
3446 * This sticky mode is known to cause SQ stalls when multiple
3447 * SQs are mapped to same SMQ and transmitting pkts at a time.
3449 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3450 cfg &= ~BIT_ULL(15);
3451 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3454 ltdefs = rvu->kpu.lt_def;
3455 /* Calibrate X2P bus to check if CGX/LBK links are fine */
3456 err = nix_calibrate_x2p(rvu, blkaddr);
3460 /* Initialize admin queue */
3461 err = nix_aq_init(rvu, block);
3465 /* Restore CINT timer delay to HW reset values */
3466 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3468 if (is_block_implemented(hw, blkaddr)) {
3469 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3473 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3477 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3481 err = nix_setup_txvlan(rvu, nix_hw);
3485 /* Configure segmentation offload formats */
3486 nix_setup_lso(rvu, nix_hw, blkaddr);
3488 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3489 * This helps HW protocol checker to identify headers
3490 * and validate length and checksums.
3492 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3493 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3494 ltdefs->rx_ol2.ltype_mask);
3495 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3496 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3497 ltdefs->rx_oip4.ltype_mask);
3498 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3499 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3500 ltdefs->rx_iip4.ltype_mask);
3501 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3502 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3503 ltdefs->rx_oip6.ltype_mask);
3504 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3505 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3506 ltdefs->rx_iip6.ltype_mask);
3507 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3508 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3509 ltdefs->rx_otcp.ltype_mask);
3510 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3511 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3512 ltdefs->rx_itcp.ltype_mask);
3513 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3514 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3515 ltdefs->rx_oudp.ltype_mask);
3516 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3517 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3518 ltdefs->rx_iudp.ltype_mask);
3519 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3520 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3521 ltdefs->rx_osctp.ltype_mask);
3522 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3523 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3524 ltdefs->rx_isctp.ltype_mask);
3526 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3530 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3531 nix_link_config(rvu, blkaddr);
3533 /* Enable Channel backpressure */
3534 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3539 int rvu_nix_init(struct rvu *rvu)
3541 struct rvu_hwinfo *hw = rvu->hw;
3542 struct nix_hw *nix_hw;
3543 int blkaddr = 0, err;
3546 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3551 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3553 nix_hw = &hw->nix[i];
3555 nix_hw->blkaddr = blkaddr;
3556 err = rvu_nix_block_init(rvu, nix_hw);
3559 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3566 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3567 struct rvu_block *block)
3569 struct nix_txsch *txsch;
3570 struct nix_mcast *mcast;
3571 struct nix_txvlan *vlan;
3572 struct nix_hw *nix_hw;
3575 rvu_aq_free(rvu, block->aq);
3577 if (is_block_implemented(rvu->hw, blkaddr)) {
3578 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3582 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3583 txsch = &nix_hw->txsch[lvl];
3584 kfree(txsch->schq.bmap);
3587 vlan = &nix_hw->txvlan;
3588 kfree(vlan->rsrc.bmap);
3589 mutex_destroy(&vlan->rsrc_lock);
3590 devm_kfree(rvu->dev, vlan->entry2pfvf_map);
3592 mcast = &nix_hw->mcast;
3593 qmem_free(rvu->dev, mcast->mce_ctx);
3594 qmem_free(rvu->dev, mcast->mcast_buf);
3595 mutex_destroy(&mcast->mce_lock);
3599 void rvu_nix_freemem(struct rvu *rvu)
3601 struct rvu_hwinfo *hw = rvu->hw;
3602 struct rvu_block *block;
3605 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3607 block = &hw->block[blkaddr];
3608 rvu_nix_block_freemem(rvu, blkaddr, block);
3609 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3613 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3614 struct msg_rsp *rsp)
3616 u16 pcifunc = req->hdr.pcifunc;
3619 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3623 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3625 npc_mcam_enable_flows(rvu, pcifunc);
3627 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3630 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3631 struct msg_rsp *rsp)
3633 u16 pcifunc = req->hdr.pcifunc;
3636 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3640 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3642 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3645 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3647 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3648 struct hwctx_disable_req ctx_req;
3651 ctx_req.hdr.pcifunc = pcifunc;
3653 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3654 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3655 rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
3656 nix_interface_deinit(rvu, pcifunc, nixlf);
3657 nix_rx_sync(rvu, blkaddr);
3658 nix_txschq_free(rvu, pcifunc);
3660 rvu_cgx_start_stop_io(rvu, pcifunc, false);
3663 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3664 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3666 dev_err(rvu->dev, "SQ ctx disable failed\n");
3670 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3671 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3673 dev_err(rvu->dev, "RQ ctx disable failed\n");
3677 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3678 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3680 dev_err(rvu->dev, "CQ ctx disable failed\n");
3683 nix_ctx_free(rvu, pfvf);
3686 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
3688 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3690 struct rvu_hwinfo *hw = rvu->hw;
3691 struct rvu_block *block;
3696 pf = rvu_get_pf(pcifunc);
3697 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
3700 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3702 return NIX_AF_ERR_AF_LF_INVALID;
3704 block = &hw->block[blkaddr];
3705 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3707 return NIX_AF_ERR_AF_LF_INVALID;
3709 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3712 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3714 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3716 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3721 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3722 struct msg_rsp *rsp)
3724 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3727 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3728 struct msg_rsp *rsp)
3730 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3733 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3734 struct nix_lso_format_cfg *req,
3735 struct nix_lso_format_cfg_rsp *rsp)
3737 u16 pcifunc = req->hdr.pcifunc;
3738 struct nix_hw *nix_hw;
3739 struct rvu_pfvf *pfvf;
3740 int blkaddr, idx, f;
3743 pfvf = rvu_get_pfvf(rvu, pcifunc);
3744 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3745 if (!pfvf->nixlf || blkaddr < 0)
3746 return NIX_AF_ERR_AF_LF_INVALID;
3748 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3752 /* Find existing matching LSO format, if any */
3753 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3754 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3755 reg = rvu_read64(rvu, blkaddr,
3756 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3757 if (req->fields[f] != (reg & req->field_mask))
3761 if (f == NIX_LSO_FIELD_MAX)
3765 if (idx < nix_hw->lso.in_use) {
3767 rsp->lso_format_idx = idx;
3771 if (nix_hw->lso.in_use == nix_hw->lso.total)
3772 return NIX_AF_ERR_LSO_CFG_FAIL;
3774 rsp->lso_format_idx = nix_hw->lso.in_use++;
3776 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3777 rvu_write64(rvu, blkaddr,
3778 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
3784 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
3786 bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
3788 /* overwrite vf mac address with default_mac */
3790 ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);