1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
20 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
21 int type, int chan_id);
47 enum nix_makr_fmt_indexes {
48 NIX_MARK_CFG_IP_DSCP_RED,
49 NIX_MARK_CFG_IP_DSCP_YELLOW,
50 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
51 NIX_MARK_CFG_IP_ECN_RED,
52 NIX_MARK_CFG_IP_ECN_YELLOW,
53 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
54 NIX_MARK_CFG_VLAN_DEI_RED,
55 NIX_MARK_CFG_VLAN_DEI_YELLOW,
56 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
60 /* For now considering MC resources needed for broadcast
61 * pkt replication only. i.e 256 HWVFs + 12 PFs.
63 #define MC_TBL_SIZE MC_TBL_SZ_512
64 #define MC_BUF_CNT MC_BUF_CNT_128
67 struct hlist_node node;
71 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
75 /*If blkaddr is 0, return the first nix block address*/
77 return rvu->nix_blkaddr[blkaddr];
79 while (i + 1 < MAX_NIX_BLKS) {
80 if (rvu->nix_blkaddr[i] == blkaddr)
81 return rvu->nix_blkaddr[i + 1];
88 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
90 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
93 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
94 if (!pfvf->nixlf || blkaddr < 0)
99 int rvu_get_nixlf_count(struct rvu *rvu)
101 int blkaddr = 0, max = 0;
102 struct rvu_block *block;
104 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
106 block = &rvu->hw->block[blkaddr];
107 max += block->lf.max;
108 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
113 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
115 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
116 struct rvu_hwinfo *hw = rvu->hw;
119 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
120 if (!pfvf->nixlf || blkaddr < 0)
121 return NIX_AF_ERR_AF_LF_INVALID;
123 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
125 return NIX_AF_ERR_AF_LF_INVALID;
128 *nix_blkaddr = blkaddr;
133 static void nix_mce_list_init(struct nix_mce_list *list, int max)
135 INIT_HLIST_HEAD(&list->head);
140 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
147 idx = mcast->next_free_mce;
148 mcast->next_free_mce += count;
152 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
154 int nix_blkaddr = 0, i = 0;
155 struct rvu *rvu = hw->rvu;
157 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
158 while (nix_blkaddr) {
159 if (blkaddr == nix_blkaddr && hw->nix)
161 nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
167 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
171 /*Sync all in flight RX packets to LLC/DRAM */
172 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
173 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
175 dev_err(rvu->dev, "NIX RX software sync failed\n");
178 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
179 int lvl, u16 pcifunc, u16 schq)
181 struct rvu_hwinfo *hw = rvu->hw;
182 struct nix_txsch *txsch;
183 struct nix_hw *nix_hw;
186 nix_hw = get_nix_hw(rvu->hw, blkaddr);
190 txsch = &nix_hw->txsch[lvl];
191 /* Check out of bounds */
192 if (schq >= txsch->schq.max)
195 mutex_lock(&rvu->rsrc_lock);
196 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
197 mutex_unlock(&rvu->rsrc_lock);
199 /* TLs aggegating traffic are shared across PF and VFs */
200 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
201 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
207 if (map_func != pcifunc)
213 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
215 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
216 int pkind, pf, vf, lbkid;
220 pf = rvu_get_pf(pcifunc);
221 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
225 case NIX_INTF_TYPE_CGX:
226 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
227 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
229 pkind = rvu_npc_get_pkind(rvu, pf);
232 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
235 pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
236 pfvf->tx_chan_base = pfvf->rx_chan_base;
237 pfvf->rx_chan_cnt = 1;
238 pfvf->tx_chan_cnt = 1;
239 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
240 rvu_npc_set_pkind(rvu, pkind, pfvf);
242 /* By default we enable pause frames */
243 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
244 cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
245 lmac_id, true, true);
247 case NIX_INTF_TYPE_LBK:
248 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
250 /* If NIX1 block is present on the silicon then NIXes are
251 * assigned alternatively for lbk interfaces. NIX0 should
252 * send packets on lbk link 1 channels and NIX1 should send
253 * on lbk link 0 channels for the communication between
257 if (rvu->hw->lbk_links > 1)
258 lbkid = vf & 0x1 ? 0 : 1;
260 /* Note that AF's VFs work in pairs and talk over consecutive
261 * loopback channels.Therefore if odd number of AF VFs are
262 * enabled then the last VF remains with no pair.
264 pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf);
265 pfvf->tx_chan_base = vf & 0x1 ?
266 NIX_CHAN_LBK_CHX(lbkid, vf - 1) :
267 NIX_CHAN_LBK_CHX(lbkid, vf + 1);
268 pfvf->rx_chan_cnt = 1;
269 pfvf->tx_chan_cnt = 1;
270 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
271 pfvf->rx_chan_base, false);
275 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
276 * RVU PF/VF's MAC address.
278 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
279 pfvf->rx_chan_base, pfvf->mac_addr);
281 /* Add this PF_FUNC to bcast pkt replication list */
282 err = nix_update_bcast_mce_list(rvu, pcifunc, true);
285 "Bcast list, failed to enable PF_FUNC 0x%x\n",
290 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
291 nixlf, pfvf->rx_chan_base);
292 pfvf->maxlen = NIC_HW_MIN_FRS;
293 pfvf->minlen = NIC_HW_MIN_FRS;
298 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
300 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
305 pfvf->rxvlan = false;
307 /* Remove this PF_FUNC from bcast pkt replication list */
308 err = nix_update_bcast_mce_list(rvu, pcifunc, false);
311 "Bcast list, failed to disable PF_FUNC 0x%x\n",
315 /* Free and disable any MCAM entries used by this NIX LF */
316 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
319 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
320 struct nix_bp_cfg_req *req,
323 u16 pcifunc = req->hdr.pcifunc;
324 struct rvu_pfvf *pfvf;
325 int blkaddr, pf, type;
329 pf = rvu_get_pf(pcifunc);
330 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
331 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
334 pfvf = rvu_get_pfvf(rvu, pcifunc);
335 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
337 chan_base = pfvf->rx_chan_base + req->chan_base;
338 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
339 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
340 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
346 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
347 int type, int chan_id)
349 int bpid, blkaddr, lmac_chan_cnt;
350 struct rvu_hwinfo *hw = rvu->hw;
351 u16 cgx_bpid_cnt, lbk_bpid_cnt;
352 struct rvu_pfvf *pfvf;
356 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
357 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
358 lmac_chan_cnt = cfg & 0xFF;
360 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
361 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
363 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
365 /* Backpressure IDs range division
366 * CGX channles are mapped to (0 - 191) BPIDs
367 * LBK channles are mapped to (192 - 255) BPIDs
368 * SDP channles are mapped to (256 - 511) BPIDs
370 * Lmac channles and bpids mapped as follows
371 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
372 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
373 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
376 case NIX_INTF_TYPE_CGX:
377 if ((req->chan_base + req->chan_cnt) > 15)
379 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
380 /* Assign bpid based on cgx, lmac and chan id */
381 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
382 (lmac_id * lmac_chan_cnt) + req->chan_base;
384 if (req->bpid_per_chan)
386 if (bpid > cgx_bpid_cnt)
390 case NIX_INTF_TYPE_LBK:
391 if ((req->chan_base + req->chan_cnt) > 63)
393 bpid = cgx_bpid_cnt + req->chan_base;
394 if (req->bpid_per_chan)
396 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
405 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
406 struct nix_bp_cfg_req *req,
407 struct nix_bp_cfg_rsp *rsp)
409 int blkaddr, pf, type, chan_id = 0;
410 u16 pcifunc = req->hdr.pcifunc;
411 struct rvu_pfvf *pfvf;
416 pf = rvu_get_pf(pcifunc);
417 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
419 /* Enable backpressure only for CGX mapped PFs and LBK interface */
420 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
423 pfvf = rvu_get_pfvf(rvu, pcifunc);
424 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
426 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
427 chan_base = pfvf->rx_chan_base + req->chan_base;
430 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
432 dev_warn(rvu->dev, "Fail to enable backpressure\n");
436 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
437 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
438 cfg | (bpid & 0xFF) | BIT_ULL(16));
440 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
443 for (chan = 0; chan < req->chan_cnt; chan++) {
444 /* Map channel and bpid assign to it */
445 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
447 if (req->bpid_per_chan)
450 rsp->chan_cnt = req->chan_cnt;
455 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
456 u64 format, bool v4, u64 *fidx)
458 struct nix_lso_format field = {0};
460 /* IP's Length field */
461 field.layer = NIX_TXLAYER_OL3;
462 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
463 field.offset = v4 ? 2 : 4;
464 field.sizem1 = 1; /* i.e 2 bytes */
465 field.alg = NIX_LSOALG_ADD_PAYLEN;
466 rvu_write64(rvu, blkaddr,
467 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
470 /* No ID field in IPv6 header */
475 field.layer = NIX_TXLAYER_OL3;
477 field.sizem1 = 1; /* i.e 2 bytes */
478 field.alg = NIX_LSOALG_ADD_SEGNUM;
479 rvu_write64(rvu, blkaddr,
480 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
484 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
485 u64 format, u64 *fidx)
487 struct nix_lso_format field = {0};
489 /* TCP's sequence number field */
490 field.layer = NIX_TXLAYER_OL4;
492 field.sizem1 = 3; /* i.e 4 bytes */
493 field.alg = NIX_LSOALG_ADD_OFFSET;
494 rvu_write64(rvu, blkaddr,
495 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
498 /* TCP's flags field */
499 field.layer = NIX_TXLAYER_OL4;
501 field.sizem1 = 1; /* 2 bytes */
502 field.alg = NIX_LSOALG_TCP_FLAGS;
503 rvu_write64(rvu, blkaddr,
504 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
508 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
510 u64 cfg, idx, fidx = 0;
512 /* Get max HW supported format indices */
513 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
514 nix_hw->lso.total = cfg;
517 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
518 /* For TSO, set first and middle segment flags to
519 * mask out PSH, RST & FIN flags in TCP packet
521 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
522 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
523 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
525 /* Setup default static LSO formats
527 * Configure format fields for TCPv4 segmentation offload
529 idx = NIX_LSO_FORMAT_IDX_TSOV4;
530 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
531 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
533 /* Set rest of the fields to NOP */
534 for (; fidx < 8; fidx++) {
535 rvu_write64(rvu, blkaddr,
536 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
538 nix_hw->lso.in_use++;
540 /* Configure format fields for TCPv6 segmentation offload */
541 idx = NIX_LSO_FORMAT_IDX_TSOV6;
543 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
544 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
546 /* Set rest of the fields to NOP */
547 for (; fidx < 8; fidx++) {
548 rvu_write64(rvu, blkaddr,
549 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
551 nix_hw->lso.in_use++;
554 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
556 kfree(pfvf->rq_bmap);
557 kfree(pfvf->sq_bmap);
558 kfree(pfvf->cq_bmap);
560 qmem_free(rvu->dev, pfvf->rq_ctx);
562 qmem_free(rvu->dev, pfvf->sq_ctx);
564 qmem_free(rvu->dev, pfvf->cq_ctx);
566 qmem_free(rvu->dev, pfvf->rss_ctx);
567 if (pfvf->nix_qints_ctx)
568 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
569 if (pfvf->cq_ints_ctx)
570 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
572 pfvf->rq_bmap = NULL;
573 pfvf->cq_bmap = NULL;
574 pfvf->sq_bmap = NULL;
578 pfvf->rss_ctx = NULL;
579 pfvf->nix_qints_ctx = NULL;
580 pfvf->cq_ints_ctx = NULL;
583 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
584 struct rvu_pfvf *pfvf, int nixlf,
585 int rss_sz, int rss_grps, int hwctx_size,
588 int err, grp, num_indices;
590 /* RSS is not requested for this NIXLF */
593 num_indices = rss_sz * rss_grps;
595 /* Alloc NIX RSS HW context memory and config the base */
596 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
600 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
601 (u64)pfvf->rss_ctx->iova);
603 /* Config full RSS table size, enable RSS and caching */
604 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
605 BIT_ULL(36) | BIT_ULL(4) |
606 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
608 /* Config RSS group offset and sizes */
609 for (grp = 0; grp < rss_grps; grp++)
610 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
611 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
615 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
616 struct nix_aq_inst_s *inst)
618 struct admin_queue *aq = block->aq;
619 struct nix_aq_res_s *result;
623 result = (struct nix_aq_res_s *)aq->res->base;
625 /* Get current head pointer where to append this instruction */
626 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
627 head = (reg >> 4) & AQ_PTR_MASK;
629 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
630 (void *)inst, aq->inst->entry_sz);
631 memset(result, 0, sizeof(*result));
632 /* sync into memory */
635 /* Ring the doorbell and wait for result */
636 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
637 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
645 if (result->compcode != NIX_AQ_COMP_GOOD)
646 /* TODO: Replace this with some error code */
652 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
653 struct nix_aq_enq_req *req,
654 struct nix_aq_enq_rsp *rsp)
656 struct rvu_hwinfo *hw = rvu->hw;
657 u16 pcifunc = req->hdr.pcifunc;
658 int nixlf, blkaddr, rc = 0;
659 struct nix_aq_inst_s inst;
660 struct rvu_block *block;
661 struct admin_queue *aq;
662 struct rvu_pfvf *pfvf;
667 blkaddr = nix_hw->blkaddr;
668 block = &hw->block[blkaddr];
671 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
672 return NIX_AF_ERR_AQ_ENQUEUE;
675 pfvf = rvu_get_pfvf(rvu, pcifunc);
676 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
678 /* Skip NIXLF check for broadcast MCE entry init */
679 if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
680 if (!pfvf->nixlf || nixlf < 0)
681 return NIX_AF_ERR_AF_LF_INVALID;
684 switch (req->ctype) {
685 case NIX_AQ_CTYPE_RQ:
686 /* Check if index exceeds max no of queues */
687 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
688 rc = NIX_AF_ERR_AQ_ENQUEUE;
690 case NIX_AQ_CTYPE_SQ:
691 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
692 rc = NIX_AF_ERR_AQ_ENQUEUE;
694 case NIX_AQ_CTYPE_CQ:
695 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
696 rc = NIX_AF_ERR_AQ_ENQUEUE;
698 case NIX_AQ_CTYPE_RSS:
699 /* Check if RSS is enabled and qidx is within range */
700 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
701 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
702 (req->qidx >= (256UL << (cfg & 0xF))))
703 rc = NIX_AF_ERR_AQ_ENQUEUE;
705 case NIX_AQ_CTYPE_MCE:
706 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
708 /* Check if index exceeds MCE list length */
709 if (!nix_hw->mcast.mce_ctx ||
710 (req->qidx >= (256UL << (cfg & 0xF))))
711 rc = NIX_AF_ERR_AQ_ENQUEUE;
713 /* Adding multicast lists for requests from PF/VFs is not
714 * yet supported, so ignore this.
717 rc = NIX_AF_ERR_AQ_ENQUEUE;
720 rc = NIX_AF_ERR_AQ_ENQUEUE;
726 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
727 if (req->ctype == NIX_AQ_CTYPE_SQ &&
728 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
729 (req->op == NIX_AQ_INSTOP_WRITE &&
730 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
731 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
732 pcifunc, req->sq.smq))
733 return NIX_AF_ERR_AQ_ENQUEUE;
736 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
738 inst.cindex = req->qidx;
739 inst.ctype = req->ctype;
741 /* Currently we are not supporting enqueuing multiple instructions,
742 * so always choose first entry in result memory.
744 inst.res_addr = (u64)aq->res->iova;
746 /* Hardware uses same aq->res->base for updating result of
747 * previous instruction hence wait here till it is done.
749 spin_lock(&aq->lock);
751 /* Clean result + context memory */
752 memset(aq->res->base, 0, aq->res->entry_sz);
753 /* Context needs to be written at RES_ADDR + 128 */
754 ctx = aq->res->base + 128;
755 /* Mask needs to be written at RES_ADDR + 256 */
756 mask = aq->res->base + 256;
759 case NIX_AQ_INSTOP_WRITE:
760 if (req->ctype == NIX_AQ_CTYPE_RQ)
761 memcpy(mask, &req->rq_mask,
762 sizeof(struct nix_rq_ctx_s));
763 else if (req->ctype == NIX_AQ_CTYPE_SQ)
764 memcpy(mask, &req->sq_mask,
765 sizeof(struct nix_sq_ctx_s));
766 else if (req->ctype == NIX_AQ_CTYPE_CQ)
767 memcpy(mask, &req->cq_mask,
768 sizeof(struct nix_cq_ctx_s));
769 else if (req->ctype == NIX_AQ_CTYPE_RSS)
770 memcpy(mask, &req->rss_mask,
771 sizeof(struct nix_rsse_s));
772 else if (req->ctype == NIX_AQ_CTYPE_MCE)
773 memcpy(mask, &req->mce_mask,
774 sizeof(struct nix_rx_mce_s));
776 case NIX_AQ_INSTOP_INIT:
777 if (req->ctype == NIX_AQ_CTYPE_RQ)
778 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
779 else if (req->ctype == NIX_AQ_CTYPE_SQ)
780 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
781 else if (req->ctype == NIX_AQ_CTYPE_CQ)
782 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
783 else if (req->ctype == NIX_AQ_CTYPE_RSS)
784 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
785 else if (req->ctype == NIX_AQ_CTYPE_MCE)
786 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
788 case NIX_AQ_INSTOP_NOP:
789 case NIX_AQ_INSTOP_READ:
790 case NIX_AQ_INSTOP_LOCK:
791 case NIX_AQ_INSTOP_UNLOCK:
794 rc = NIX_AF_ERR_AQ_ENQUEUE;
795 spin_unlock(&aq->lock);
799 /* Submit the instruction to AQ */
800 rc = nix_aq_enqueue_wait(rvu, block, &inst);
802 spin_unlock(&aq->lock);
806 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
807 if (req->op == NIX_AQ_INSTOP_INIT) {
808 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
809 __set_bit(req->qidx, pfvf->rq_bmap);
810 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
811 __set_bit(req->qidx, pfvf->sq_bmap);
812 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
813 __set_bit(req->qidx, pfvf->cq_bmap);
816 if (req->op == NIX_AQ_INSTOP_WRITE) {
817 if (req->ctype == NIX_AQ_CTYPE_RQ) {
818 ena = (req->rq.ena & req->rq_mask.ena) |
819 (test_bit(req->qidx, pfvf->rq_bmap) &
822 __set_bit(req->qidx, pfvf->rq_bmap);
824 __clear_bit(req->qidx, pfvf->rq_bmap);
826 if (req->ctype == NIX_AQ_CTYPE_SQ) {
827 ena = (req->rq.ena & req->sq_mask.ena) |
828 (test_bit(req->qidx, pfvf->sq_bmap) &
831 __set_bit(req->qidx, pfvf->sq_bmap);
833 __clear_bit(req->qidx, pfvf->sq_bmap);
835 if (req->ctype == NIX_AQ_CTYPE_CQ) {
836 ena = (req->rq.ena & req->cq_mask.ena) |
837 (test_bit(req->qidx, pfvf->cq_bmap) &
840 __set_bit(req->qidx, pfvf->cq_bmap);
842 __clear_bit(req->qidx, pfvf->cq_bmap);
847 /* Copy read context into mailbox */
848 if (req->op == NIX_AQ_INSTOP_READ) {
849 if (req->ctype == NIX_AQ_CTYPE_RQ)
850 memcpy(&rsp->rq, ctx,
851 sizeof(struct nix_rq_ctx_s));
852 else if (req->ctype == NIX_AQ_CTYPE_SQ)
853 memcpy(&rsp->sq, ctx,
854 sizeof(struct nix_sq_ctx_s));
855 else if (req->ctype == NIX_AQ_CTYPE_CQ)
856 memcpy(&rsp->cq, ctx,
857 sizeof(struct nix_cq_ctx_s));
858 else if (req->ctype == NIX_AQ_CTYPE_RSS)
859 memcpy(&rsp->rss, ctx,
860 sizeof(struct nix_rsse_s));
861 else if (req->ctype == NIX_AQ_CTYPE_MCE)
862 memcpy(&rsp->mce, ctx,
863 sizeof(struct nix_rx_mce_s));
867 spin_unlock(&aq->lock);
871 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
872 struct nix_aq_enq_rsp *rsp)
874 struct nix_hw *nix_hw;
877 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
879 return NIX_AF_ERR_AF_LF_INVALID;
881 nix_hw = get_nix_hw(rvu->hw, blkaddr);
885 return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
888 static const char *nix_get_ctx_name(int ctype)
891 case NIX_AQ_CTYPE_CQ:
893 case NIX_AQ_CTYPE_SQ:
895 case NIX_AQ_CTYPE_RQ:
897 case NIX_AQ_CTYPE_RSS:
903 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
905 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
906 struct nix_aq_enq_req aq_req;
911 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
912 return NIX_AF_ERR_AQ_ENQUEUE;
914 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
915 aq_req.hdr.pcifunc = req->hdr.pcifunc;
917 if (req->ctype == NIX_AQ_CTYPE_CQ) {
919 aq_req.cq_mask.ena = 1;
920 aq_req.cq.bp_ena = 0;
921 aq_req.cq_mask.bp_ena = 1;
922 q_cnt = pfvf->cq_ctx->qsize;
923 bmap = pfvf->cq_bmap;
925 if (req->ctype == NIX_AQ_CTYPE_SQ) {
927 aq_req.sq_mask.ena = 1;
928 q_cnt = pfvf->sq_ctx->qsize;
929 bmap = pfvf->sq_bmap;
931 if (req->ctype == NIX_AQ_CTYPE_RQ) {
933 aq_req.rq_mask.ena = 1;
934 q_cnt = pfvf->rq_ctx->qsize;
935 bmap = pfvf->rq_bmap;
938 aq_req.ctype = req->ctype;
939 aq_req.op = NIX_AQ_INSTOP_WRITE;
941 for (qidx = 0; qidx < q_cnt; qidx++) {
942 if (!test_bit(qidx, bmap))
945 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
948 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
949 nix_get_ctx_name(req->ctype), qidx);
956 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
957 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
959 struct nix_aq_enq_req lock_ctx_req;
962 if (req->op != NIX_AQ_INSTOP_INIT)
965 if (req->ctype == NIX_AQ_CTYPE_MCE ||
966 req->ctype == NIX_AQ_CTYPE_DYNO)
969 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
970 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
971 lock_ctx_req.ctype = req->ctype;
972 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
973 lock_ctx_req.qidx = req->qidx;
974 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
977 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
979 nix_get_ctx_name(req->ctype), req->qidx);
983 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
984 struct nix_aq_enq_req *req,
985 struct nix_aq_enq_rsp *rsp)
989 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
991 err = nix_lf_hwctx_lockdown(rvu, req);
996 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
997 struct nix_aq_enq_req *req,
998 struct nix_aq_enq_rsp *rsp)
1000 return rvu_nix_aq_enq_inst(rvu, req, rsp);
1004 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1005 struct hwctx_disable_req *req,
1006 struct msg_rsp *rsp)
1008 return nix_lf_hwctx_disable(rvu, req);
1011 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1012 struct nix_lf_alloc_req *req,
1013 struct nix_lf_alloc_rsp *rsp)
1015 int nixlf, qints, hwctx_size, intf, err, rc = 0;
1016 struct rvu_hwinfo *hw = rvu->hw;
1017 u16 pcifunc = req->hdr.pcifunc;
1018 struct rvu_block *block;
1019 struct rvu_pfvf *pfvf;
1023 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1024 return NIX_AF_ERR_PARAM;
1027 req->way_mask &= 0xFFFF;
1029 pfvf = rvu_get_pfvf(rvu, pcifunc);
1030 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1031 if (!pfvf->nixlf || blkaddr < 0)
1032 return NIX_AF_ERR_AF_LF_INVALID;
1034 block = &hw->block[blkaddr];
1035 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1037 return NIX_AF_ERR_AF_LF_INVALID;
1039 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1040 if (req->npa_func) {
1041 /* If default, use 'this' NIXLF's PFFUNC */
1042 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1043 req->npa_func = pcifunc;
1044 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1045 return NIX_AF_INVAL_NPA_PF_FUNC;
1048 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1049 if (req->sso_func) {
1050 /* If default, use 'this' NIXLF's PFFUNC */
1051 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1052 req->sso_func = pcifunc;
1053 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1054 return NIX_AF_INVAL_SSO_PF_FUNC;
1057 /* If RSS is being enabled, check if requested config is valid.
1058 * RSS table size should be power of two, otherwise
1059 * RSS_GRP::OFFSET + adder might go beyond that group or
1060 * won't be able to use entire table.
1062 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1063 !is_power_of_2(req->rss_sz)))
1064 return NIX_AF_ERR_RSS_SIZE_INVALID;
1067 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1068 return NIX_AF_ERR_RSS_GRPS_INVALID;
1070 /* Reset this NIX LF */
1071 err = rvu_lf_reset(rvu, block, nixlf);
1073 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1074 block->addr - BLKADDR_NIX0, nixlf);
1075 return NIX_AF_ERR_LF_RESET;
1078 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1080 /* Alloc NIX RQ HW context memory and config the base */
1081 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1082 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1086 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1090 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1091 (u64)pfvf->rq_ctx->iova);
1093 /* Set caching and queue count in HW */
1094 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1095 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1097 /* Alloc NIX SQ HW context memory and config the base */
1098 hwctx_size = 1UL << (ctx_cfg & 0xF);
1099 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1103 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1107 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1108 (u64)pfvf->sq_ctx->iova);
1110 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1111 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1113 /* Alloc NIX CQ HW context memory and config the base */
1114 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1115 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1119 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1123 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1124 (u64)pfvf->cq_ctx->iova);
1126 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1127 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1129 /* Initialize receive side scaling (RSS) */
1130 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1131 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1132 req->rss_grps, hwctx_size, req->way_mask);
1136 /* Alloc memory for CQINT's HW contexts */
1137 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1138 qints = (cfg >> 24) & 0xFFF;
1139 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1140 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1144 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1145 (u64)pfvf->cq_ints_ctx->iova);
1147 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1148 BIT_ULL(36) | req->way_mask << 20);
1150 /* Alloc memory for QINT's HW contexts */
1151 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1152 qints = (cfg >> 12) & 0xFFF;
1153 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1154 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1158 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1159 (u64)pfvf->nix_qints_ctx->iova);
1160 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1161 BIT_ULL(36) | req->way_mask << 20);
1163 /* Setup VLANX TPID's.
1164 * Use VLAN1 for 802.1Q
1165 * and VLAN0 for 802.1AD.
1167 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1168 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1170 /* Enable LMTST for this NIX LF */
1171 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1173 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1175 cfg = req->npa_func;
1177 cfg |= (u64)req->sso_func << 16;
1179 cfg |= (u64)req->xqe_sz << 33;
1180 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1182 /* Config Rx pkt length, csum checks and apad enable / disable */
1183 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1185 /* Configure pkind for TX parse config */
1186 cfg = NPC_TX_DEF_PKIND;
1187 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1189 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1190 err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1194 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1195 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1200 nix_ctx_free(rvu, pfvf);
1204 /* Set macaddr of this PF/VF */
1205 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1207 /* set SQB size info */
1208 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1209 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1210 rsp->rx_chan_base = pfvf->rx_chan_base;
1211 rsp->tx_chan_base = pfvf->tx_chan_base;
1212 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1213 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1214 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1215 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1216 /* Get HW supported stat count */
1217 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1218 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1219 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1220 /* Get count of CQ IRQs and error IRQs supported per LF */
1221 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1222 rsp->qints = ((cfg >> 12) & 0xFFF);
1223 rsp->cints = ((cfg >> 24) & 0xFFF);
1224 rsp->cgx_links = hw->cgx_links;
1225 rsp->lbk_links = hw->lbk_links;
1226 rsp->sdp_links = hw->sdp_links;
1231 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
1232 struct msg_rsp *rsp)
1234 struct rvu_hwinfo *hw = rvu->hw;
1235 u16 pcifunc = req->hdr.pcifunc;
1236 struct rvu_block *block;
1237 int blkaddr, nixlf, err;
1238 struct rvu_pfvf *pfvf;
1240 pfvf = rvu_get_pfvf(rvu, pcifunc);
1241 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1242 if (!pfvf->nixlf || blkaddr < 0)
1243 return NIX_AF_ERR_AF_LF_INVALID;
1245 block = &hw->block[blkaddr];
1246 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1248 return NIX_AF_ERR_AF_LF_INVALID;
1250 nix_interface_deinit(rvu, pcifunc, nixlf);
1252 /* Reset this NIX LF */
1253 err = rvu_lf_reset(rvu, block, nixlf);
1255 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1256 block->addr - BLKADDR_NIX0, nixlf);
1257 return NIX_AF_ERR_LF_RESET;
1260 nix_ctx_free(rvu, pfvf);
1265 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1266 struct nix_mark_format_cfg *req,
1267 struct nix_mark_format_cfg_rsp *rsp)
1269 u16 pcifunc = req->hdr.pcifunc;
1270 struct nix_hw *nix_hw;
1271 struct rvu_pfvf *pfvf;
1275 pfvf = rvu_get_pfvf(rvu, pcifunc);
1276 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1277 if (!pfvf->nixlf || blkaddr < 0)
1278 return NIX_AF_ERR_AF_LF_INVALID;
1280 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1284 cfg = (((u32)req->offset & 0x7) << 16) |
1285 (((u32)req->y_mask & 0xF) << 12) |
1286 (((u32)req->y_val & 0xF) << 8) |
1287 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1289 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1291 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1292 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1293 return NIX_AF_ERR_MARK_CFG_FAIL;
1296 rsp->mark_format_idx = rc;
1300 /* Disable shaping of pkts by a scheduler queue
1301 * at a given scheduler level.
1303 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1306 u64 cir_reg = 0, pir_reg = 0;
1310 case NIX_TXSCH_LVL_TL1:
1311 cir_reg = NIX_AF_TL1X_CIR(schq);
1312 pir_reg = 0; /* PIR not available at TL1 */
1314 case NIX_TXSCH_LVL_TL2:
1315 cir_reg = NIX_AF_TL2X_CIR(schq);
1316 pir_reg = NIX_AF_TL2X_PIR(schq);
1318 case NIX_TXSCH_LVL_TL3:
1319 cir_reg = NIX_AF_TL3X_CIR(schq);
1320 pir_reg = NIX_AF_TL3X_PIR(schq);
1322 case NIX_TXSCH_LVL_TL4:
1323 cir_reg = NIX_AF_TL4X_CIR(schq);
1324 pir_reg = NIX_AF_TL4X_PIR(schq);
1330 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1331 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1335 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1336 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1339 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1342 struct rvu_hwinfo *hw = rvu->hw;
1345 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1348 /* Reset TL4's SDP link config */
1349 if (lvl == NIX_TXSCH_LVL_TL4)
1350 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1352 if (lvl != NIX_TXSCH_LVL_TL2)
1355 /* Reset TL2's CGX or LBK link config */
1356 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1357 rvu_write64(rvu, blkaddr,
1358 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1361 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1363 struct rvu_hwinfo *hw = rvu->hw;
1364 int pf = rvu_get_pf(pcifunc);
1365 u8 cgx_id = 0, lmac_id = 0;
1367 if (is_afvf(pcifunc)) {/* LBK links */
1368 return hw->cgx_links;
1369 } else if (is_pf_cgxmapped(rvu, pf)) {
1370 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1371 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1375 return hw->cgx_links + hw->lbk_links;
1378 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1379 int link, int *start, int *end)
1381 struct rvu_hwinfo *hw = rvu->hw;
1382 int pf = rvu_get_pf(pcifunc);
1384 if (is_afvf(pcifunc)) { /* LBK links */
1385 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1386 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1387 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1388 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1389 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1390 } else { /* SDP link */
1391 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1392 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1393 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1397 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1398 struct nix_hw *nix_hw,
1399 struct nix_txsch_alloc_req *req)
1401 struct rvu_hwinfo *hw = rvu->hw;
1402 int schq, req_schq, free_cnt;
1403 struct nix_txsch *txsch;
1404 int link, start, end;
1406 txsch = &nix_hw->txsch[lvl];
1407 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1412 link = nix_get_tx_link(rvu, pcifunc);
1414 /* For traffic aggregating scheduler level, one queue is enough */
1415 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1417 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1421 /* Get free SCHQ count and check if request can be accomodated */
1422 if (hw->cap.nix_fixed_txschq_mapping) {
1423 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1424 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1425 if (end <= txsch->schq.max && schq < end &&
1426 !test_bit(schq, txsch->schq.bmap))
1431 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1434 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1435 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1437 /* If contiguous queues are needed, check for availability */
1438 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1439 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1440 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1445 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1446 struct nix_txsch_alloc_rsp *rsp,
1447 int lvl, int start, int end)
1449 struct rvu_hwinfo *hw = rvu->hw;
1450 u16 pcifunc = rsp->hdr.pcifunc;
1453 /* For traffic aggregating levels, queue alloc is based
1454 * on transmit link to which PF_FUNC is mapped to.
1456 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1457 /* A single TL queue is allocated */
1458 if (rsp->schq_contig[lvl]) {
1459 rsp->schq_contig[lvl] = 1;
1460 rsp->schq_contig_list[lvl][0] = start;
1463 /* Both contig and non-contig reqs doesn't make sense here */
1464 if (rsp->schq_contig[lvl])
1467 if (rsp->schq[lvl]) {
1469 rsp->schq_list[lvl][0] = start;
1474 /* Adjust the queue request count if HW supports
1475 * only one queue per level configuration.
1477 if (hw->cap.nix_fixed_txschq_mapping) {
1478 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1480 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1481 rsp->schq_contig[lvl] = 0;
1486 if (rsp->schq_contig[lvl]) {
1487 rsp->schq_contig[lvl] = 1;
1488 set_bit(schq, txsch->schq.bmap);
1489 rsp->schq_contig_list[lvl][0] = schq;
1491 } else if (rsp->schq[lvl]) {
1493 set_bit(schq, txsch->schq.bmap);
1494 rsp->schq_list[lvl][0] = schq;
1499 /* Allocate contiguous queue indices requesty first */
1500 if (rsp->schq_contig[lvl]) {
1501 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1502 txsch->schq.max, start,
1503 rsp->schq_contig[lvl], 0);
1505 rsp->schq_contig[lvl] = 0;
1506 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1507 set_bit(schq, txsch->schq.bmap);
1508 rsp->schq_contig_list[lvl][idx] = schq;
1513 /* Allocate non-contiguous queue indices */
1514 if (rsp->schq[lvl]) {
1516 for (schq = start; schq < end; schq++) {
1517 if (!test_bit(schq, txsch->schq.bmap)) {
1518 set_bit(schq, txsch->schq.bmap);
1519 rsp->schq_list[lvl][idx++] = schq;
1521 if (idx == rsp->schq[lvl])
1524 /* Update how many were allocated */
1525 rsp->schq[lvl] = idx;
1529 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1530 struct nix_txsch_alloc_req *req,
1531 struct nix_txsch_alloc_rsp *rsp)
1533 struct rvu_hwinfo *hw = rvu->hw;
1534 u16 pcifunc = req->hdr.pcifunc;
1535 int link, blkaddr, rc = 0;
1536 int lvl, idx, start, end;
1537 struct nix_txsch *txsch;
1538 struct rvu_pfvf *pfvf;
1539 struct nix_hw *nix_hw;
1543 pfvf = rvu_get_pfvf(rvu, pcifunc);
1544 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1545 if (!pfvf->nixlf || blkaddr < 0)
1546 return NIX_AF_ERR_AF_LF_INVALID;
1548 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1552 mutex_lock(&rvu->rsrc_lock);
1554 /* Check if request is valid as per HW capabilities
1555 * and can be accomodated.
1557 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1558 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1563 /* Allocate requested Tx scheduler queues */
1564 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1565 txsch = &nix_hw->txsch[lvl];
1566 pfvf_map = txsch->pfvf_map;
1568 if (!req->schq[lvl] && !req->schq_contig[lvl])
1571 rsp->schq[lvl] = req->schq[lvl];
1572 rsp->schq_contig[lvl] = req->schq_contig[lvl];
1574 link = nix_get_tx_link(rvu, pcifunc);
1576 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1579 } else if (hw->cap.nix_fixed_txschq_mapping) {
1580 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1583 end = txsch->schq.max;
1586 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1588 /* Reset queue config */
1589 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1590 schq = rsp->schq_contig_list[lvl][idx];
1591 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1592 NIX_TXSCHQ_CFG_DONE))
1593 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1594 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1595 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1598 for (idx = 0; idx < req->schq[lvl]; idx++) {
1599 schq = rsp->schq_list[lvl][idx];
1600 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1601 NIX_TXSCHQ_CFG_DONE))
1602 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1603 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1604 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1608 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1609 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1610 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1611 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1612 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1615 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1617 mutex_unlock(&rvu->rsrc_lock);
1621 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1622 int smq, u16 pcifunc, int nixlf)
1624 int pf = rvu_get_pf(pcifunc);
1625 u8 cgx_id = 0, lmac_id = 0;
1626 int err, restore_tx_en = 0;
1629 /* enable cgx tx if disabled */
1630 if (is_pf_cgxmapped(rvu, pf)) {
1631 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1632 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1636 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1637 /* Do SMQ flush and set enqueue xoff */
1638 cfg |= BIT_ULL(50) | BIT_ULL(49);
1639 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1641 /* Disable backpressure from physical link,
1642 * otherwise SMQ flush may stall.
1644 rvu_cgx_enadis_rx_bp(rvu, pf, false);
1646 /* Wait for flush to complete */
1647 err = rvu_poll_reg(rvu, blkaddr,
1648 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1651 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1653 rvu_cgx_enadis_rx_bp(rvu, pf, true);
1654 /* restore cgx tx state */
1656 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1659 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1661 int blkaddr, nixlf, lvl, schq, err;
1662 struct rvu_hwinfo *hw = rvu->hw;
1663 struct nix_txsch *txsch;
1664 struct nix_hw *nix_hw;
1666 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1668 return NIX_AF_ERR_AF_LF_INVALID;
1670 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1674 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1676 return NIX_AF_ERR_AF_LF_INVALID;
1678 /* Disable TL2/3 queue links before SMQ flush*/
1679 mutex_lock(&rvu->rsrc_lock);
1680 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1681 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1684 txsch = &nix_hw->txsch[lvl];
1685 for (schq = 0; schq < txsch->schq.max; schq++) {
1686 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1688 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1693 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1694 for (schq = 0; schq < txsch->schq.max; schq++) {
1695 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1697 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1700 /* Now free scheduler queues to free pool */
1701 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1702 /* TLs above aggregation level are shared across all PF
1703 * and it's VFs, hence skip freeing them.
1705 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1708 txsch = &nix_hw->txsch[lvl];
1709 for (schq = 0; schq < txsch->schq.max; schq++) {
1710 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1712 rvu_free_rsrc(&txsch->schq, schq);
1713 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1716 mutex_unlock(&rvu->rsrc_lock);
1718 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1719 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1720 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1722 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1727 static int nix_txschq_free_one(struct rvu *rvu,
1728 struct nix_txsch_free_req *req)
1730 struct rvu_hwinfo *hw = rvu->hw;
1731 u16 pcifunc = req->hdr.pcifunc;
1732 int lvl, schq, nixlf, blkaddr;
1733 struct nix_txsch *txsch;
1734 struct nix_hw *nix_hw;
1737 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1739 return NIX_AF_ERR_AF_LF_INVALID;
1741 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1745 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1747 return NIX_AF_ERR_AF_LF_INVALID;
1749 lvl = req->schq_lvl;
1751 txsch = &nix_hw->txsch[lvl];
1753 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1756 pfvf_map = txsch->pfvf_map;
1757 mutex_lock(&rvu->rsrc_lock);
1759 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1760 mutex_unlock(&rvu->rsrc_lock);
1764 /* Flush if it is a SMQ. Onus of disabling
1765 * TL2/3 queue links before SMQ flush is on user
1767 if (lvl == NIX_TXSCH_LVL_SMQ)
1768 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1770 /* Free the resource */
1771 rvu_free_rsrc(&txsch->schq, schq);
1772 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1773 mutex_unlock(&rvu->rsrc_lock);
1776 return NIX_AF_ERR_TLX_INVALID;
1779 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1780 struct nix_txsch_free_req *req,
1781 struct msg_rsp *rsp)
1783 if (req->flags & TXSCHQ_FREE_ALL)
1784 return nix_txschq_free(rvu, req->hdr.pcifunc);
1786 return nix_txschq_free_one(rvu, req);
1789 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1790 int lvl, u64 reg, u64 regval)
1792 u64 regbase = reg & 0xFFFF;
1795 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1798 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1799 /* Check if this schq belongs to this PF/VF or not */
1800 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1803 parent = (regval >> 16) & 0x1FF;
1804 /* Validate MDQ's TL4 parent */
1805 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1806 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1809 /* Validate TL4's TL3 parent */
1810 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1811 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1814 /* Validate TL3's TL2 parent */
1815 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1816 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1819 /* Validate TL2's TL1 parent */
1820 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1821 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1827 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1831 if (hw->cap.nix_shaping)
1834 /* If shaping and coloring is not supported, then
1835 * *_CIR and *_PIR registers should not be configured.
1837 regbase = reg & 0xFFFF;
1840 case NIX_TXSCH_LVL_TL1:
1841 if (regbase == NIX_AF_TL1X_CIR(0))
1844 case NIX_TXSCH_LVL_TL2:
1845 if (regbase == NIX_AF_TL2X_CIR(0) ||
1846 regbase == NIX_AF_TL2X_PIR(0))
1849 case NIX_TXSCH_LVL_TL3:
1850 if (regbase == NIX_AF_TL3X_CIR(0) ||
1851 regbase == NIX_AF_TL3X_PIR(0))
1854 case NIX_TXSCH_LVL_TL4:
1855 if (regbase == NIX_AF_TL4X_CIR(0) ||
1856 regbase == NIX_AF_TL4X_PIR(0))
1863 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1864 u16 pcifunc, int blkaddr)
1869 schq = nix_get_tx_link(rvu, pcifunc);
1870 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1871 /* Skip if PF has already done the config */
1872 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1874 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1875 (TXSCH_TL1_DFLT_RR_PRIO << 1));
1876 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1877 TXSCH_TL1_DFLT_RR_QTM);
1878 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1879 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1882 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1883 struct nix_txschq_config *req,
1884 struct msg_rsp *rsp)
1886 struct rvu_hwinfo *hw = rvu->hw;
1887 u16 pcifunc = req->hdr.pcifunc;
1888 u64 reg, regval, schq_regbase;
1889 struct nix_txsch *txsch;
1890 struct nix_hw *nix_hw;
1891 int blkaddr, idx, err;
1895 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1896 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1897 return NIX_AF_INVAL_TXSCHQ_CFG;
1899 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1903 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1907 txsch = &nix_hw->txsch[req->lvl];
1908 pfvf_map = txsch->pfvf_map;
1910 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1911 pcifunc & RVU_PFVF_FUNC_MASK) {
1912 mutex_lock(&rvu->rsrc_lock);
1913 if (req->lvl == NIX_TXSCH_LVL_TL1)
1914 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1915 mutex_unlock(&rvu->rsrc_lock);
1919 for (idx = 0; idx < req->num_regs; idx++) {
1920 reg = req->reg[idx];
1921 regval = req->regval[idx];
1922 schq_regbase = reg & 0xFFFF;
1924 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1925 txsch->lvl, reg, regval))
1926 return NIX_AF_INVAL_TXSCHQ_CFG;
1928 /* Check if shaping and coloring is supported */
1929 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1932 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1933 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1934 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1936 regval &= ~(0x7FULL << 24);
1937 regval |= ((u64)nixlf << 24);
1940 /* Clear 'BP_ENA' config, if it's not allowed */
1941 if (!hw->cap.nix_tx_link_bp) {
1942 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1943 (schq_regbase & 0xFF00) ==
1944 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1945 regval &= ~BIT_ULL(13);
1948 /* Mark config as done for TL1 by PF */
1949 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1950 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1951 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1952 mutex_lock(&rvu->rsrc_lock);
1953 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
1954 NIX_TXSCHQ_CFG_DONE);
1955 mutex_unlock(&rvu->rsrc_lock);
1958 /* SMQ flush is special hence split register writes such
1959 * that flush first and write rest of the bits later.
1961 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1962 (regval & BIT_ULL(49))) {
1963 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1964 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1965 regval &= ~BIT_ULL(49);
1967 rvu_write64(rvu, blkaddr, reg, regval);
1973 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1974 struct nix_vtag_config *req)
1976 u64 regval = req->vtag_size;
1978 if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
1981 if (req->rx.capture_vtag)
1982 regval |= BIT_ULL(5);
1983 if (req->rx.strip_vtag)
1984 regval |= BIT_ULL(4);
1986 rvu_write64(rvu, blkaddr,
1987 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1991 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1992 struct nix_vtag_config *req,
1993 struct msg_rsp *rsp)
1995 u16 pcifunc = req->hdr.pcifunc;
1996 int blkaddr, nixlf, err;
1998 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2002 if (req->cfg_type) {
2003 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2005 return NIX_AF_ERR_PARAM;
2007 /* TODO: handle tx vtag configuration */
2014 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2015 int mce, u8 op, u16 pcifunc, int next, bool eol)
2017 struct nix_aq_enq_req aq_req;
2020 aq_req.hdr.pcifunc = 0;
2021 aq_req.ctype = NIX_AQ_CTYPE_MCE;
2025 /* Forward bcast pkts to RQ0, RSS not needed */
2027 aq_req.mce.index = 0;
2028 aq_req.mce.eol = eol;
2029 aq_req.mce.pf_func = pcifunc;
2030 aq_req.mce.next = next;
2032 /* All fields valid */
2033 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
2035 err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2037 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2038 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2044 static int nix_update_mce_list(struct nix_mce_list *mce_list,
2045 u16 pcifunc, bool add)
2047 struct mce *mce, *tail = NULL;
2048 bool delete = false;
2050 /* Scan through the current list */
2051 hlist_for_each_entry(mce, &mce_list->head, node) {
2052 /* If already exists, then delete */
2053 if (mce->pcifunc == pcifunc && !add) {
2061 hlist_del(&mce->node);
2070 /* Add a new one to the list, at the tail */
2071 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2074 mce->pcifunc = pcifunc;
2076 hlist_add_head(&mce->node, &mce_list->head);
2078 hlist_add_behind(&mce->node, &tail->node);
2083 int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
2085 int err = 0, idx, next_idx, last_idx;
2086 struct nix_mce_list *mce_list;
2087 struct nix_mcast *mcast;
2088 struct nix_hw *nix_hw;
2089 struct rvu_pfvf *pfvf;
2093 /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
2094 if (is_afvf(pcifunc))
2097 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2101 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2105 mcast = &nix_hw->mcast;
2107 /* Get this PF/VF func's MCE index */
2108 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2109 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2111 mce_list = &pfvf->bcast_mce_list;
2112 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
2114 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2115 __func__, idx, mce_list->max,
2116 pcifunc >> RVU_PFVF_PF_SHIFT);
2120 mutex_lock(&mcast->mce_lock);
2122 err = nix_update_mce_list(mce_list, pcifunc, add);
2126 /* Disable MCAM entry in NPC */
2127 if (!mce_list->count) {
2128 rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
2132 /* Dump the updated list to HW */
2133 idx = pfvf->bcast_mce_idx;
2134 last_idx = idx + mce_list->count - 1;
2135 hlist_for_each_entry(mce, &mce_list->head, node) {
2140 /* EOL should be set in last MCE */
2141 err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2142 mce->pcifunc, next_idx,
2143 (next_idx > last_idx) ? true : false);
2150 mutex_unlock(&mcast->mce_lock);
2154 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2156 struct nix_mcast *mcast = &nix_hw->mcast;
2157 int err, pf, numvfs, idx;
2158 struct rvu_pfvf *pfvf;
2162 /* Skip PF0 (i.e AF) */
2163 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2164 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2165 /* If PF is not enabled, nothing to do */
2166 if (!((cfg >> 20) & 0x01))
2168 /* Get numVFs attached to this PF */
2169 numvfs = (cfg >> 12) & 0xFF;
2171 pfvf = &rvu->pf[pf];
2173 /* This NIX0/1 block mapped to PF ? */
2174 if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2177 /* Save the start MCE */
2178 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2180 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2182 for (idx = 0; idx < (numvfs + 1); idx++) {
2183 /* idx-0 is for PF, followed by VFs */
2184 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2186 /* Add dummy entries now, so that we don't have to check
2187 * for whether AQ_OP should be INIT/WRITE later on.
2188 * Will be updated when a NIXLF is attached/detached to
2191 err = nix_blk_setup_mce(rvu, nix_hw,
2192 pfvf->bcast_mce_idx + idx,
2202 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2204 struct nix_mcast *mcast = &nix_hw->mcast;
2205 struct rvu_hwinfo *hw = rvu->hw;
2208 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2209 size = (1ULL << size);
2211 /* Alloc memory for multicast/mirror replication entries */
2212 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2213 (256UL << MC_TBL_SIZE), size);
2217 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2218 (u64)mcast->mce_ctx->iova);
2220 /* Set max list length equal to max no of VFs per PF + PF itself */
2221 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2222 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2224 /* Alloc memory for multicast replication buffers */
2225 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2226 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2227 (8UL << MC_BUF_CNT), size);
2231 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2232 (u64)mcast->mcast_buf->iova);
2234 /* Alloc pkind for NIX internal RX multicast/mirror replay */
2235 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2237 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2238 BIT_ULL(63) | (mcast->replay_pkind << 24) |
2239 BIT_ULL(20) | MC_BUF_CNT);
2241 mutex_init(&mcast->mce_lock);
2243 return nix_setup_bcast_tables(rvu, nix_hw);
2246 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2248 struct nix_txsch *txsch;
2252 /* Get scheduler queue count of each type and alloc
2253 * bitmap for each for alloc/free/attach operations.
2255 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2256 txsch = &nix_hw->txsch[lvl];
2259 case NIX_TXSCH_LVL_SMQ:
2260 reg = NIX_AF_MDQ_CONST;
2262 case NIX_TXSCH_LVL_TL4:
2263 reg = NIX_AF_TL4_CONST;
2265 case NIX_TXSCH_LVL_TL3:
2266 reg = NIX_AF_TL3_CONST;
2268 case NIX_TXSCH_LVL_TL2:
2269 reg = NIX_AF_TL2_CONST;
2271 case NIX_TXSCH_LVL_TL1:
2272 reg = NIX_AF_TL1_CONST;
2275 cfg = rvu_read64(rvu, blkaddr, reg);
2276 txsch->schq.max = cfg & 0xFFFF;
2277 err = rvu_alloc_bitmap(&txsch->schq);
2281 /* Allocate memory for scheduler queues to
2282 * PF/VF pcifunc mapping info.
2284 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2285 sizeof(u32), GFP_KERNEL);
2286 if (!txsch->pfvf_map)
2288 for (schq = 0; schq < txsch->schq.max; schq++)
2289 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2294 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2295 int blkaddr, u32 cfg)
2299 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2300 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2303 if (fmt_idx >= nix_hw->mark_format.total)
2306 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2307 nix_hw->mark_format.cfg[fmt_idx] = cfg;
2308 nix_hw->mark_format.in_use++;
2312 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2316 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
2317 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
2318 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
2319 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
2320 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
2321 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
2322 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
2323 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
2324 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2329 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2330 nix_hw->mark_format.total = (u8)total;
2331 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2333 if (!nix_hw->mark_format.cfg)
2335 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2336 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2338 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2345 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2346 struct msg_rsp *rsp)
2348 u16 pcifunc = req->hdr.pcifunc;
2349 int i, nixlf, blkaddr, err;
2352 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2356 /* Get stats count supported by HW */
2357 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2359 /* Reset tx stats */
2360 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2361 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2363 /* Reset rx stats */
2364 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2365 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2370 /* Returns the ALG index to be set into NPC_RX_ACTION */
2371 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2375 /* Scan over exiting algo entries to find a match */
2376 for (i = 0; i < nix_hw->flowkey.in_use; i++)
2377 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2383 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2385 int idx, nr_field, key_off, field_marker, keyoff_marker;
2386 int max_key_off, max_bit_pos, group_member;
2387 struct nix_rx_flowkey_alg *field;
2388 struct nix_rx_flowkey_alg tmp;
2389 u32 key_type, valid_key;
2394 #define FIELDS_PER_ALG 5
2395 #define MAX_KEY_OFF 40
2396 /* Clear all fields */
2397 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2399 /* Each of the 32 possible flow key algorithm definitions should
2400 * fall into above incremental config (except ALG0). Otherwise a
2401 * single NPC MCAM entry is not sufficient for supporting RSS.
2403 * If a different definition or combination needed then NPC MCAM
2404 * has to be programmed to filter such pkts and it's action should
2405 * point to this definition to calculate flowtag or hash.
2407 * The `for loop` goes over _all_ protocol field and the following
2408 * variables depicts the state machine forward progress logic.
2410 * keyoff_marker - Enabled when hash byte length needs to be accounted
2411 * in field->key_offset update.
2412 * field_marker - Enabled when a new field needs to be selected.
2413 * group_member - Enabled when protocol is part of a group.
2416 keyoff_marker = 0; max_key_off = 0; group_member = 0;
2417 nr_field = 0; key_off = 0; field_marker = 1;
2418 field = &tmp; max_bit_pos = fls(flow_cfg);
2420 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2421 key_off < MAX_KEY_OFF; idx++) {
2422 key_type = BIT(idx);
2423 valid_key = flow_cfg & key_type;
2424 /* Found a field marker, reset the field values */
2426 memset(&tmp, 0, sizeof(tmp));
2428 field_marker = true;
2429 keyoff_marker = true;
2431 case NIX_FLOW_KEY_TYPE_PORT:
2432 field->sel_chan = true;
2433 /* This should be set to 1, when SEL_CHAN is set */
2436 case NIX_FLOW_KEY_TYPE_IPV4:
2437 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2438 field->lid = NPC_LID_LC;
2439 field->ltype_match = NPC_LT_LC_IP;
2440 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2441 field->lid = NPC_LID_LG;
2442 field->ltype_match = NPC_LT_LG_TU_IP;
2444 field->hdr_offset = 12; /* SIP offset */
2445 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2446 field->ltype_mask = 0xF; /* Match only IPv4 */
2447 keyoff_marker = false;
2449 case NIX_FLOW_KEY_TYPE_IPV6:
2450 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2451 field->lid = NPC_LID_LC;
2452 field->ltype_match = NPC_LT_LC_IP6;
2453 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2454 field->lid = NPC_LID_LG;
2455 field->ltype_match = NPC_LT_LG_TU_IP6;
2457 field->hdr_offset = 8; /* SIP offset */
2458 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2459 field->ltype_mask = 0xF; /* Match only IPv6 */
2461 case NIX_FLOW_KEY_TYPE_TCP:
2462 case NIX_FLOW_KEY_TYPE_UDP:
2463 case NIX_FLOW_KEY_TYPE_SCTP:
2464 case NIX_FLOW_KEY_TYPE_INNR_TCP:
2465 case NIX_FLOW_KEY_TYPE_INNR_UDP:
2466 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2467 field->lid = NPC_LID_LD;
2468 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2469 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2470 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2471 field->lid = NPC_LID_LH;
2472 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2474 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2475 * so no need to change the ltype_match, just change
2476 * the lid for inner protocols
2478 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2479 (int)NPC_LT_LH_TU_TCP);
2480 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2481 (int)NPC_LT_LH_TU_UDP);
2482 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2483 (int)NPC_LT_LH_TU_SCTP);
2485 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2486 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2488 field->ltype_match |= NPC_LT_LD_TCP;
2489 group_member = true;
2490 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2491 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2493 field->ltype_match |= NPC_LT_LD_UDP;
2494 group_member = true;
2495 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2496 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2498 field->ltype_match |= NPC_LT_LD_SCTP;
2499 group_member = true;
2501 field->ltype_mask = ~field->ltype_match;
2502 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2503 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2504 /* Handle the case where any of the group item
2505 * is enabled in the group but not the final one
2509 group_member = false;
2512 field_marker = false;
2513 keyoff_marker = false;
2516 case NIX_FLOW_KEY_TYPE_NVGRE:
2517 field->lid = NPC_LID_LD;
2518 field->hdr_offset = 4; /* VSID offset */
2520 field->ltype_match = NPC_LT_LD_NVGRE;
2521 field->ltype_mask = 0xF;
2523 case NIX_FLOW_KEY_TYPE_VXLAN:
2524 case NIX_FLOW_KEY_TYPE_GENEVE:
2525 field->lid = NPC_LID_LE;
2527 field->hdr_offset = 4;
2528 field->ltype_mask = 0xF;
2529 field_marker = false;
2530 keyoff_marker = false;
2532 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2533 field->ltype_match |= NPC_LT_LE_VXLAN;
2534 group_member = true;
2537 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2538 field->ltype_match |= NPC_LT_LE_GENEVE;
2539 group_member = true;
2542 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2544 field->ltype_mask = ~field->ltype_match;
2545 field_marker = true;
2546 keyoff_marker = true;
2548 group_member = false;
2552 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2553 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2554 field->lid = NPC_LID_LA;
2555 field->ltype_match = NPC_LT_LA_ETHER;
2556 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2557 field->lid = NPC_LID_LF;
2558 field->ltype_match = NPC_LT_LF_TU_ETHER;
2560 field->hdr_offset = 0;
2561 field->bytesm1 = 5; /* DMAC 6 Byte */
2562 field->ltype_mask = 0xF;
2564 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2565 field->lid = NPC_LID_LC;
2566 field->hdr_offset = 40; /* IPV6 hdr */
2567 field->bytesm1 = 0; /* 1 Byte ext hdr*/
2568 field->ltype_match = NPC_LT_LC_IP6_EXT;
2569 field->ltype_mask = 0xF;
2571 case NIX_FLOW_KEY_TYPE_GTPU:
2572 field->lid = NPC_LID_LE;
2573 field->hdr_offset = 4;
2574 field->bytesm1 = 3; /* 4 bytes TID*/
2575 field->ltype_match = NPC_LT_LE_GTPU;
2576 field->ltype_mask = 0xF;
2578 case NIX_FLOW_KEY_TYPE_VLAN:
2579 field->lid = NPC_LID_LB;
2580 field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2581 field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
2582 field->ltype_match = NPC_LT_LB_CTAG;
2583 field->ltype_mask = 0xF;
2584 field->fn_mask = 1; /* Mask out the first nibble */
2589 /* Found a valid flow key type */
2591 field->key_offset = key_off;
2592 memcpy(&alg[nr_field], field, sizeof(*field));
2593 max_key_off = max(max_key_off, field->bytesm1 + 1);
2595 /* Found a field marker, get the next field */
2600 /* Found a keyoff marker, update the new key_off */
2601 if (keyoff_marker) {
2602 key_off += max_key_off;
2606 /* Processed all the flow key types */
2607 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2610 return NIX_AF_ERR_RSS_NOSPC_FIELD;
2613 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2615 u64 field[FIELDS_PER_ALG];
2619 hw = get_nix_hw(rvu->hw, blkaddr);
2623 /* No room to add new flow hash algoritham */
2624 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2625 return NIX_AF_ERR_RSS_NOSPC_ALGO;
2627 /* Generate algo fields for the given flow_cfg */
2628 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2632 /* Update ALGX_FIELDX register with generated fields */
2633 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2634 rvu_write64(rvu, blkaddr,
2635 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2638 /* Store the flow_cfg for futher lookup */
2639 rc = hw->flowkey.in_use;
2640 hw->flowkey.flowkey[rc] = flow_cfg;
2641 hw->flowkey.in_use++;
2646 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2647 struct nix_rss_flowkey_cfg *req,
2648 struct nix_rss_flowkey_cfg_rsp *rsp)
2650 u16 pcifunc = req->hdr.pcifunc;
2651 int alg_idx, nixlf, blkaddr;
2652 struct nix_hw *nix_hw;
2655 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2659 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2663 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2664 /* Failed to get algo index from the exiting list, reserve new */
2666 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2671 rsp->alg_idx = alg_idx;
2672 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2673 alg_idx, req->mcam_index);
2677 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2679 u32 flowkey_cfg, minkey_cfg;
2682 /* Disable all flow key algx fieldx */
2683 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2684 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2685 rvu_write64(rvu, blkaddr,
2686 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2690 /* IPv4/IPv6 SIP/DIPs */
2691 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2692 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2696 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2697 minkey_cfg = flowkey_cfg;
2698 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2699 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2703 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2704 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2705 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2709 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2710 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2711 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2715 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2716 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2717 NIX_FLOW_KEY_TYPE_UDP;
2718 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2722 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2723 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2724 NIX_FLOW_KEY_TYPE_SCTP;
2725 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2729 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2730 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
2731 NIX_FLOW_KEY_TYPE_SCTP;
2732 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2736 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2737 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2738 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
2739 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2746 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
2747 struct nix_set_mac_addr *req,
2748 struct msg_rsp *rsp)
2750 u16 pcifunc = req->hdr.pcifunc;
2751 int blkaddr, nixlf, err;
2752 struct rvu_pfvf *pfvf;
2754 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2758 pfvf = rvu_get_pfvf(rvu, pcifunc);
2760 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
2762 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
2763 pfvf->rx_chan_base, req->mac_addr);
2765 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2770 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
2771 struct msg_req *req,
2772 struct nix_get_mac_addr_rsp *rsp)
2774 u16 pcifunc = req->hdr.pcifunc;
2775 struct rvu_pfvf *pfvf;
2777 if (!is_nixlf_attached(rvu, pcifunc))
2778 return NIX_AF_ERR_AF_LF_INVALID;
2780 pfvf = rvu_get_pfvf(rvu, pcifunc);
2782 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
2787 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
2788 struct msg_rsp *rsp)
2790 bool allmulti = false, disable_promisc = false;
2791 u16 pcifunc = req->hdr.pcifunc;
2792 int blkaddr, nixlf, err;
2793 struct rvu_pfvf *pfvf;
2795 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2799 pfvf = rvu_get_pfvf(rvu, pcifunc);
2801 if (req->mode & NIX_RX_MODE_PROMISC)
2803 else if (req->mode & NIX_RX_MODE_ALLMULTI)
2806 disable_promisc = true;
2808 if (disable_promisc)
2809 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
2811 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
2812 pfvf->rx_chan_base, allmulti);
2814 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2819 static void nix_find_link_frs(struct rvu *rvu,
2820 struct nix_frs_cfg *req, u16 pcifunc)
2822 int pf = rvu_get_pf(pcifunc);
2823 struct rvu_pfvf *pfvf;
2828 /* Update with requester's min/max lengths */
2829 pfvf = rvu_get_pfvf(rvu, pcifunc);
2830 pfvf->maxlen = req->maxlen;
2831 if (req->update_minlen)
2832 pfvf->minlen = req->minlen;
2834 maxlen = req->maxlen;
2835 minlen = req->update_minlen ? req->minlen : 0;
2837 /* Get this PF's numVFs and starting hwvf */
2838 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
2840 /* For each VF, compare requested max/minlen */
2841 for (vf = 0; vf < numvfs; vf++) {
2842 pfvf = &rvu->hwvf[hwvf + vf];
2843 if (pfvf->maxlen > maxlen)
2844 maxlen = pfvf->maxlen;
2845 if (req->update_minlen &&
2846 pfvf->minlen && pfvf->minlen < minlen)
2847 minlen = pfvf->minlen;
2850 /* Compare requested max/minlen with PF's max/minlen */
2851 pfvf = &rvu->pf[pf];
2852 if (pfvf->maxlen > maxlen)
2853 maxlen = pfvf->maxlen;
2854 if (req->update_minlen &&
2855 pfvf->minlen && pfvf->minlen < minlen)
2856 minlen = pfvf->minlen;
2858 /* Update the request with max/min PF's and it's VF's max/min */
2859 req->maxlen = maxlen;
2860 if (req->update_minlen)
2861 req->minlen = minlen;
2864 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
2865 struct msg_rsp *rsp)
2867 struct rvu_hwinfo *hw = rvu->hw;
2868 u16 pcifunc = req->hdr.pcifunc;
2869 int pf = rvu_get_pf(pcifunc);
2870 int blkaddr, schq, link = -1;
2871 struct nix_txsch *txsch;
2872 u64 cfg, lmac_fifo_len;
2873 struct nix_hw *nix_hw;
2874 u8 cgx = 0, lmac = 0;
2876 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2878 return NIX_AF_ERR_AF_LF_INVALID;
2880 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2884 if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
2885 return NIX_AF_ERR_FRS_INVALID;
2887 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
2888 return NIX_AF_ERR_FRS_INVALID;
2890 /* Check if requester wants to update SMQ's */
2891 if (!req->update_smq)
2894 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
2895 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2896 mutex_lock(&rvu->rsrc_lock);
2897 for (schq = 0; schq < txsch->schq.max; schq++) {
2898 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2900 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
2901 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
2902 if (req->update_minlen)
2903 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
2904 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
2906 mutex_unlock(&rvu->rsrc_lock);
2909 /* Check if config is for SDP link */
2910 if (req->sdp_link) {
2912 return NIX_AF_ERR_RX_LINK_INVALID;
2913 link = hw->cgx_links + hw->lbk_links;
2917 /* Check if the request is from CGX mapped RVU PF */
2918 if (is_pf_cgxmapped(rvu, pf)) {
2919 /* Get CGX and LMAC to which this PF is mapped and find link */
2920 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
2921 link = (cgx * hw->lmac_per_cgx) + lmac;
2922 } else if (pf == 0) {
2923 /* For VFs of PF0 ingress is LBK port, so config LBK link */
2924 link = hw->cgx_links;
2928 return NIX_AF_ERR_RX_LINK_INVALID;
2930 nix_find_link_frs(rvu, req, pcifunc);
2933 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
2934 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
2935 if (req->update_minlen)
2936 cfg = (cfg & ~0xFFFFULL) | req->minlen;
2937 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
2939 if (req->sdp_link || pf == 0)
2942 /* Update transmit credits for CGX links */
2944 CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2945 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
2946 cfg &= ~(0xFFFFFULL << 12);
2947 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
2948 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
2952 int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
2953 struct msg_rsp *rsp)
2955 struct npc_mcam_alloc_entry_req alloc_req = { };
2956 struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
2957 struct npc_mcam_free_entry_req free_req = { };
2958 u16 pcifunc = req->hdr.pcifunc;
2959 int blkaddr, nixlf, err;
2960 struct rvu_pfvf *pfvf;
2962 /* LBK VFs do not have separate MCAM UCAST entry hence
2963 * skip allocating rxvlan for them
2965 if (is_afvf(pcifunc))
2968 pfvf = rvu_get_pfvf(rvu, pcifunc);
2972 /* alloc new mcam entry */
2973 alloc_req.hdr.pcifunc = pcifunc;
2974 alloc_req.count = 1;
2976 err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
2981 /* update entry to enable rxvlan offload */
2982 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2984 err = NIX_AF_ERR_AF_LF_INVALID;
2988 nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
2990 err = NIX_AF_ERR_AF_LF_INVALID;
2994 pfvf->rxvlan_index = alloc_rsp.entry_list[0];
2995 /* all it means is that rxvlan_index is valid */
2996 pfvf->rxvlan = true;
2998 err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
3004 free_req.hdr.pcifunc = pcifunc;
3005 free_req.entry = alloc_rsp.entry_list[0];
3006 rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
3007 pfvf->rxvlan = false;
3011 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3012 struct msg_rsp *rsp)
3014 int nixlf, blkaddr, err;
3017 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3021 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3022 /* Set the interface configuration */
3023 if (req->len_verify & BIT(0))
3026 cfg &= ~BIT_ULL(41);
3028 if (req->len_verify & BIT(1))
3031 cfg &= ~BIT_ULL(40);
3033 if (req->csum_verify & BIT(0))
3036 cfg &= ~BIT_ULL(37);
3038 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3043 static void nix_link_config(struct rvu *rvu, int blkaddr)
3045 struct rvu_hwinfo *hw = rvu->hw;
3046 int cgx, lmac_cnt, slink, link;
3049 /* Set default min/max packet lengths allowed on NIX Rx links.
3051 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3052 * as undersize and report them to SW as error pkts, hence
3053 * setting it to 40 bytes.
3055 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
3056 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3057 NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3060 if (hw->sdp_links) {
3061 link = hw->cgx_links + hw->lbk_links;
3062 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3063 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3066 /* Set credits for Tx links assuming max packet length allowed.
3067 * This will be reconfigured based on MTU set for PF/VF.
3069 for (cgx = 0; cgx < hw->cgx; cgx++) {
3070 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3071 tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
3072 /* Enable credits and set credit pkt count to max allowed */
3073 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3074 slink = cgx * hw->lmac_per_cgx;
3075 for (link = slink; link < (slink + lmac_cnt); link++) {
3076 rvu_write64(rvu, blkaddr,
3077 NIX_AF_TX_LINKX_NORM_CREDIT(link),
3082 /* Set Tx credits for LBK link */
3083 slink = hw->cgx_links;
3084 for (link = slink; link < (slink + hw->lbk_links); link++) {
3085 tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
3086 /* Enable credits and set credit pkt count to max allowed */
3087 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3088 rvu_write64(rvu, blkaddr,
3089 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3093 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3098 /* Start X2P bus calibration */
3099 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3100 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3101 /* Wait for calibration to complete */
3102 err = rvu_poll_reg(rvu, blkaddr,
3103 NIX_AF_STATUS, BIT_ULL(10), false);
3105 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3109 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3110 /* Check if CGX devices are ready */
3111 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3112 /* Skip when cgx port is not available */
3113 if (!rvu_cgx_pdata(idx, rvu) ||
3114 (status & (BIT_ULL(16 + idx))))
3117 "CGX%d didn't respond to NIX X2P calibration\n", idx);
3121 /* Check if LBK is ready */
3122 if (!(status & BIT_ULL(19))) {
3124 "LBK didn't respond to NIX X2P calibration\n");
3128 /* Clear 'calibrate_x2p' bit */
3129 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3130 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3131 if (err || (status & 0x3FFULL))
3133 "NIX X2P calibration failed, status 0x%llx\n", status);
3139 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3144 /* Set admin queue endianness */
3145 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3148 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3151 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3154 /* Do not bypass NDC cache */
3155 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3157 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3158 /* Disable caching of SQB aka SQEs */
3161 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3163 /* Result structure can be followed by RQ/SQ/CQ context at
3164 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3165 * operation type. Alloc sufficient result memory for all operations.
3167 err = rvu_aq_alloc(rvu, &block->aq,
3168 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3169 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3173 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3174 rvu_write64(rvu, block->addr,
3175 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3179 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3181 const struct npc_lt_def_cfg *ltdefs;
3182 struct rvu_hwinfo *hw = rvu->hw;
3183 int blkaddr = nix_hw->blkaddr;
3184 struct rvu_block *block;
3188 block = &hw->block[blkaddr];
3190 if (is_rvu_96xx_B0(rvu)) {
3191 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3192 * internal state when conditional clocks are turned off.
3193 * Hence enable them.
3195 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3196 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3198 /* Set chan/link to backpressure TL3 instead of TL2 */
3199 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3201 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
3202 * This sticky mode is known to cause SQ stalls when multiple
3203 * SQs are mapped to same SMQ and transmitting pkts at a time.
3205 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3206 cfg &= ~BIT_ULL(15);
3207 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3210 ltdefs = rvu->kpu.lt_def;
3211 /* Calibrate X2P bus to check if CGX/LBK links are fine */
3212 err = nix_calibrate_x2p(rvu, blkaddr);
3216 /* Set num of links of each type */
3217 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
3218 hw->cgx = (cfg >> 12) & 0xF;
3219 hw->lmac_per_cgx = (cfg >> 8) & 0xF;
3220 hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
3221 hw->lbk_links = (cfg >> 24) & 0xF;
3224 /* Initialize admin queue */
3225 err = nix_aq_init(rvu, block);
3229 /* Restore CINT timer delay to HW reset values */
3230 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3232 if (is_block_implemented(hw, blkaddr)) {
3233 err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3237 err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3241 err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3245 /* Configure segmentation offload formats */
3246 nix_setup_lso(rvu, nix_hw, blkaddr);
3248 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3249 * This helps HW protocol checker to identify headers
3250 * and validate length and checksums.
3252 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3253 (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3254 ltdefs->rx_ol2.ltype_mask);
3255 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3256 (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3257 ltdefs->rx_oip4.ltype_mask);
3258 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3259 (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3260 ltdefs->rx_iip4.ltype_mask);
3261 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3262 (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3263 ltdefs->rx_oip6.ltype_mask);
3264 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3265 (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3266 ltdefs->rx_iip6.ltype_mask);
3267 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3268 (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3269 ltdefs->rx_otcp.ltype_mask);
3270 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3271 (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3272 ltdefs->rx_itcp.ltype_mask);
3273 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3274 (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3275 ltdefs->rx_oudp.ltype_mask);
3276 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3277 (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3278 ltdefs->rx_iudp.ltype_mask);
3279 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3280 (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3281 ltdefs->rx_osctp.ltype_mask);
3282 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3283 (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3284 ltdefs->rx_isctp.ltype_mask);
3286 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3290 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3291 nix_link_config(rvu, blkaddr);
3293 /* Enable Channel backpressure */
3294 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3299 int rvu_nix_init(struct rvu *rvu)
3301 struct rvu_hwinfo *hw = rvu->hw;
3302 struct nix_hw *nix_hw;
3303 int blkaddr = 0, err;
3306 hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3311 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3313 nix_hw = &hw->nix[i];
3315 nix_hw->blkaddr = blkaddr;
3316 err = rvu_nix_block_init(rvu, nix_hw);
3319 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3326 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3327 struct rvu_block *block)
3329 struct nix_txsch *txsch;
3330 struct nix_mcast *mcast;
3331 struct nix_hw *nix_hw;
3334 rvu_aq_free(rvu, block->aq);
3336 if (is_block_implemented(rvu->hw, blkaddr)) {
3337 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3341 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3342 txsch = &nix_hw->txsch[lvl];
3343 kfree(txsch->schq.bmap);
3346 mcast = &nix_hw->mcast;
3347 qmem_free(rvu->dev, mcast->mce_ctx);
3348 qmem_free(rvu->dev, mcast->mcast_buf);
3349 mutex_destroy(&mcast->mce_lock);
3353 void rvu_nix_freemem(struct rvu *rvu)
3355 struct rvu_hwinfo *hw = rvu->hw;
3356 struct rvu_block *block;
3359 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3361 block = &hw->block[blkaddr];
3362 rvu_nix_block_freemem(rvu, blkaddr, block);
3363 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3367 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3368 struct msg_rsp *rsp)
3370 u16 pcifunc = req->hdr.pcifunc;
3373 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3377 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3379 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3382 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3383 struct msg_rsp *rsp)
3385 u16 pcifunc = req->hdr.pcifunc;
3388 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3392 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
3394 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3397 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3399 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3400 struct hwctx_disable_req ctx_req;
3403 ctx_req.hdr.pcifunc = pcifunc;
3405 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3406 nix_interface_deinit(rvu, pcifunc, nixlf);
3407 nix_rx_sync(rvu, blkaddr);
3408 nix_txschq_free(rvu, pcifunc);
3410 rvu_cgx_start_stop_io(rvu, pcifunc, false);
3413 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3414 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3416 dev_err(rvu->dev, "SQ ctx disable failed\n");
3420 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3421 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3423 dev_err(rvu->dev, "RQ ctx disable failed\n");
3427 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3428 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3430 dev_err(rvu->dev, "CQ ctx disable failed\n");
3433 nix_ctx_free(rvu, pfvf);
3436 #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
3438 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3440 struct rvu_hwinfo *hw = rvu->hw;
3441 struct rvu_block *block;
3446 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3448 return NIX_AF_ERR_AF_LF_INVALID;
3450 block = &hw->block[blkaddr];
3451 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3453 return NIX_AF_ERR_AF_LF_INVALID;
3455 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3458 cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3460 cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3462 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3467 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3468 struct msg_rsp *rsp)
3470 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3473 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3474 struct msg_rsp *rsp)
3476 return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3479 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3480 struct nix_lso_format_cfg *req,
3481 struct nix_lso_format_cfg_rsp *rsp)
3483 u16 pcifunc = req->hdr.pcifunc;
3484 struct nix_hw *nix_hw;
3485 struct rvu_pfvf *pfvf;
3486 int blkaddr, idx, f;
3489 pfvf = rvu_get_pfvf(rvu, pcifunc);
3490 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3491 if (!pfvf->nixlf || blkaddr < 0)
3492 return NIX_AF_ERR_AF_LF_INVALID;
3494 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3498 /* Find existing matching LSO format, if any */
3499 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3500 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3501 reg = rvu_read64(rvu, blkaddr,
3502 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3503 if (req->fields[f] != (reg & req->field_mask))
3507 if (f == NIX_LSO_FIELD_MAX)
3511 if (idx < nix_hw->lso.in_use) {
3513 rsp->lso_format_idx = idx;
3517 if (nix_hw->lso.in_use == nix_hw->lso.total)
3518 return NIX_AF_ERR_LSO_CFG_FAIL;
3520 rsp->lso_format_idx = nix_hw->lso.in_use++;
3522 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3523 rvu_write64(rvu, blkaddr,
3524 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),