1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
20 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
22 int type, int chan_id);
48 enum nix_makr_fmt_indexes {
49 NIX_MARK_CFG_IP_DSCP_RED,
50 NIX_MARK_CFG_IP_DSCP_YELLOW,
51 NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
52 NIX_MARK_CFG_IP_ECN_RED,
53 NIX_MARK_CFG_IP_ECN_YELLOW,
54 NIX_MARK_CFG_IP_ECN_YELLOW_RED,
55 NIX_MARK_CFG_VLAN_DEI_RED,
56 NIX_MARK_CFG_VLAN_DEI_YELLOW,
57 NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
61 /* For now considering MC resources needed for broadcast
62 * pkt replication only. i.e 256 HWVFs + 12 PFs.
64 #define MC_TBL_SIZE MC_TBL_SZ_512
65 #define MC_BUF_CNT MC_BUF_CNT_128
68 struct hlist_node node;
72 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
74 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
77 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
78 if (!pfvf->nixlf || blkaddr < 0)
83 int rvu_get_nixlf_count(struct rvu *rvu)
85 struct rvu_block *block;
88 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
91 block = &rvu->hw->block[blkaddr];
95 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
97 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
98 struct rvu_hwinfo *hw = rvu->hw;
101 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
102 if (!pfvf->nixlf || blkaddr < 0)
103 return NIX_AF_ERR_AF_LF_INVALID;
105 *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
107 return NIX_AF_ERR_AF_LF_INVALID;
110 *nix_blkaddr = blkaddr;
115 static void nix_mce_list_init(struct nix_mce_list *list, int max)
117 INIT_HLIST_HEAD(&list->head);
122 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
129 idx = mcast->next_free_mce;
130 mcast->next_free_mce += count;
134 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
136 if (blkaddr == BLKADDR_NIX0 && hw->nix0)
142 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
146 /*Sync all in flight RX packets to LLC/DRAM */
147 rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
148 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
150 dev_err(rvu->dev, "NIX RX software sync failed\n");
153 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
154 int lvl, u16 pcifunc, u16 schq)
156 struct rvu_hwinfo *hw = rvu->hw;
157 struct nix_txsch *txsch;
158 struct nix_hw *nix_hw;
161 nix_hw = get_nix_hw(rvu->hw, blkaddr);
165 txsch = &nix_hw->txsch[lvl];
166 /* Check out of bounds */
167 if (schq >= txsch->schq.max)
170 mutex_lock(&rvu->rsrc_lock);
171 map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
172 mutex_unlock(&rvu->rsrc_lock);
174 /* TLs aggegating traffic are shared across PF and VFs */
175 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
176 if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
182 if (map_func != pcifunc)
188 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
190 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
195 pf = rvu_get_pf(pcifunc);
196 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
200 case NIX_INTF_TYPE_CGX:
201 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
202 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
204 pkind = rvu_npc_get_pkind(rvu, pf);
207 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
210 pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
211 pfvf->tx_chan_base = pfvf->rx_chan_base;
212 pfvf->rx_chan_cnt = 1;
213 pfvf->tx_chan_cnt = 1;
214 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
215 rvu_npc_set_pkind(rvu, pkind, pfvf);
217 /* By default we enable pause frames */
218 if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
219 cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
220 lmac_id, true, true);
222 case NIX_INTF_TYPE_LBK:
223 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
225 /* Note that AF's VFs work in pairs and talk over consecutive
226 * loopback channels.Therefore if odd number of AF VFs are
227 * enabled then the last VF remains with no pair.
229 pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
230 pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
231 NIX_CHAN_LBK_CHX(0, vf + 1);
232 pfvf->rx_chan_cnt = 1;
233 pfvf->tx_chan_cnt = 1;
234 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
235 pfvf->rx_chan_base, false);
239 /* Add a UCAST forwarding rule in MCAM with this NIXLF attached
240 * RVU PF/VF's MAC address.
242 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
243 pfvf->rx_chan_base, pfvf->mac_addr);
245 /* Add this PF_FUNC to bcast pkt replication list */
246 err = nix_update_bcast_mce_list(rvu, pcifunc, true);
249 "Bcast list, failed to enable PF_FUNC 0x%x\n",
254 rvu_npc_install_bcast_match_entry(rvu, pcifunc,
255 nixlf, pfvf->rx_chan_base);
256 pfvf->maxlen = NIC_HW_MIN_FRS;
257 pfvf->minlen = NIC_HW_MIN_FRS;
262 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
264 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
269 pfvf->rxvlan = false;
271 /* Remove this PF_FUNC from bcast pkt replication list */
272 err = nix_update_bcast_mce_list(rvu, pcifunc, false);
275 "Bcast list, failed to disable PF_FUNC 0x%x\n",
279 /* Free and disable any MCAM entries used by this NIX LF */
280 rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
283 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
284 struct nix_bp_cfg_req *req,
287 u16 pcifunc = req->hdr.pcifunc;
288 struct rvu_pfvf *pfvf;
289 int blkaddr, pf, type;
293 pf = rvu_get_pf(pcifunc);
294 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
295 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
298 pfvf = rvu_get_pfvf(rvu, pcifunc);
299 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
301 chan_base = pfvf->rx_chan_base + req->chan_base;
302 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
303 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
304 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
310 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
311 int type, int chan_id)
313 int bpid, blkaddr, lmac_chan_cnt;
314 struct rvu_hwinfo *hw = rvu->hw;
315 u16 cgx_bpid_cnt, lbk_bpid_cnt;
316 struct rvu_pfvf *pfvf;
320 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
321 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
322 lmac_chan_cnt = cfg & 0xFF;
324 cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
325 lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
327 pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
329 /* Backpressure IDs range division
330 * CGX channles are mapped to (0 - 191) BPIDs
331 * LBK channles are mapped to (192 - 255) BPIDs
332 * SDP channles are mapped to (256 - 511) BPIDs
334 * Lmac channles and bpids mapped as follows
335 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
336 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
337 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
340 case NIX_INTF_TYPE_CGX:
341 if ((req->chan_base + req->chan_cnt) > 15)
343 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
344 /* Assign bpid based on cgx, lmac and chan id */
345 bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
346 (lmac_id * lmac_chan_cnt) + req->chan_base;
348 if (req->bpid_per_chan)
350 if (bpid > cgx_bpid_cnt)
354 case NIX_INTF_TYPE_LBK:
355 if ((req->chan_base + req->chan_cnt) > 63)
357 bpid = cgx_bpid_cnt + req->chan_base;
358 if (req->bpid_per_chan)
360 if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
369 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
370 struct nix_bp_cfg_req *req,
371 struct nix_bp_cfg_rsp *rsp)
373 int blkaddr, pf, type, chan_id = 0;
374 u16 pcifunc = req->hdr.pcifunc;
375 struct rvu_pfvf *pfvf;
380 pf = rvu_get_pf(pcifunc);
381 type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
383 /* Enable backpressure only for CGX mapped PFs and LBK interface */
384 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
387 pfvf = rvu_get_pfvf(rvu, pcifunc);
388 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
390 bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
391 chan_base = pfvf->rx_chan_base + req->chan_base;
394 for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
396 dev_warn(rvu->dev, "Fail to enable backpressure\n");
400 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
401 rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
402 cfg | (bpid & 0xFF) | BIT_ULL(16));
404 bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
407 for (chan = 0; chan < req->chan_cnt; chan++) {
408 /* Map channel and bpid assign to it */
409 rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
411 if (req->bpid_per_chan)
414 rsp->chan_cnt = req->chan_cnt;
419 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
420 u64 format, bool v4, u64 *fidx)
422 struct nix_lso_format field = {0};
424 /* IP's Length field */
425 field.layer = NIX_TXLAYER_OL3;
426 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
427 field.offset = v4 ? 2 : 4;
428 field.sizem1 = 1; /* i.e 2 bytes */
429 field.alg = NIX_LSOALG_ADD_PAYLEN;
430 rvu_write64(rvu, blkaddr,
431 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
434 /* No ID field in IPv6 header */
439 field.layer = NIX_TXLAYER_OL3;
441 field.sizem1 = 1; /* i.e 2 bytes */
442 field.alg = NIX_LSOALG_ADD_SEGNUM;
443 rvu_write64(rvu, blkaddr,
444 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
448 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
449 u64 format, u64 *fidx)
451 struct nix_lso_format field = {0};
453 /* TCP's sequence number field */
454 field.layer = NIX_TXLAYER_OL4;
456 field.sizem1 = 3; /* i.e 4 bytes */
457 field.alg = NIX_LSOALG_ADD_OFFSET;
458 rvu_write64(rvu, blkaddr,
459 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
462 /* TCP's flags field */
463 field.layer = NIX_TXLAYER_OL4;
465 field.sizem1 = 1; /* 2 bytes */
466 field.alg = NIX_LSOALG_TCP_FLAGS;
467 rvu_write64(rvu, blkaddr,
468 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
472 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
474 u64 cfg, idx, fidx = 0;
476 /* Get max HW supported format indices */
477 cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
478 nix_hw->lso.total = cfg;
481 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
482 /* For TSO, set first and middle segment flags to
483 * mask out PSH, RST & FIN flags in TCP packet
485 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
486 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
487 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
489 /* Setup default static LSO formats
491 * Configure format fields for TCPv4 segmentation offload
493 idx = NIX_LSO_FORMAT_IDX_TSOV4;
494 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
495 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
497 /* Set rest of the fields to NOP */
498 for (; fidx < 8; fidx++) {
499 rvu_write64(rvu, blkaddr,
500 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
502 nix_hw->lso.in_use++;
504 /* Configure format fields for TCPv6 segmentation offload */
505 idx = NIX_LSO_FORMAT_IDX_TSOV6;
507 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
508 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
510 /* Set rest of the fields to NOP */
511 for (; fidx < 8; fidx++) {
512 rvu_write64(rvu, blkaddr,
513 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
515 nix_hw->lso.in_use++;
518 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
520 kfree(pfvf->rq_bmap);
521 kfree(pfvf->sq_bmap);
522 kfree(pfvf->cq_bmap);
524 qmem_free(rvu->dev, pfvf->rq_ctx);
526 qmem_free(rvu->dev, pfvf->sq_ctx);
528 qmem_free(rvu->dev, pfvf->cq_ctx);
530 qmem_free(rvu->dev, pfvf->rss_ctx);
531 if (pfvf->nix_qints_ctx)
532 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
533 if (pfvf->cq_ints_ctx)
534 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
536 pfvf->rq_bmap = NULL;
537 pfvf->cq_bmap = NULL;
538 pfvf->sq_bmap = NULL;
542 pfvf->rss_ctx = NULL;
543 pfvf->nix_qints_ctx = NULL;
544 pfvf->cq_ints_ctx = NULL;
547 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
548 struct rvu_pfvf *pfvf, int nixlf,
549 int rss_sz, int rss_grps, int hwctx_size,
552 int err, grp, num_indices;
554 /* RSS is not requested for this NIXLF */
557 num_indices = rss_sz * rss_grps;
559 /* Alloc NIX RSS HW context memory and config the base */
560 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
564 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
565 (u64)pfvf->rss_ctx->iova);
567 /* Config full RSS table size, enable RSS and caching */
568 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
569 BIT_ULL(36) | BIT_ULL(4) |
570 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
572 /* Config RSS group offset and sizes */
573 for (grp = 0; grp < rss_grps; grp++)
574 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
575 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
579 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
580 struct nix_aq_inst_s *inst)
582 struct admin_queue *aq = block->aq;
583 struct nix_aq_res_s *result;
587 result = (struct nix_aq_res_s *)aq->res->base;
589 /* Get current head pointer where to append this instruction */
590 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
591 head = (reg >> 4) & AQ_PTR_MASK;
593 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
594 (void *)inst, aq->inst->entry_sz);
595 memset(result, 0, sizeof(*result));
596 /* sync into memory */
599 /* Ring the doorbell and wait for result */
600 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
601 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
609 if (result->compcode != NIX_AQ_COMP_GOOD)
610 /* TODO: Replace this with some error code */
616 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
617 struct nix_aq_enq_rsp *rsp)
619 struct rvu_hwinfo *hw = rvu->hw;
620 u16 pcifunc = req->hdr.pcifunc;
621 int nixlf, blkaddr, rc = 0;
622 struct nix_aq_inst_s inst;
623 struct rvu_block *block;
624 struct admin_queue *aq;
625 struct rvu_pfvf *pfvf;
630 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
632 return NIX_AF_ERR_AF_LF_INVALID;
634 block = &hw->block[blkaddr];
637 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
638 return NIX_AF_ERR_AQ_ENQUEUE;
641 pfvf = rvu_get_pfvf(rvu, pcifunc);
642 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
644 /* Skip NIXLF check for broadcast MCE entry init */
645 if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
646 if (!pfvf->nixlf || nixlf < 0)
647 return NIX_AF_ERR_AF_LF_INVALID;
650 switch (req->ctype) {
651 case NIX_AQ_CTYPE_RQ:
652 /* Check if index exceeds max no of queues */
653 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
654 rc = NIX_AF_ERR_AQ_ENQUEUE;
656 case NIX_AQ_CTYPE_SQ:
657 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
658 rc = NIX_AF_ERR_AQ_ENQUEUE;
660 case NIX_AQ_CTYPE_CQ:
661 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
662 rc = NIX_AF_ERR_AQ_ENQUEUE;
664 case NIX_AQ_CTYPE_RSS:
665 /* Check if RSS is enabled and qidx is within range */
666 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
667 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
668 (req->qidx >= (256UL << (cfg & 0xF))))
669 rc = NIX_AF_ERR_AQ_ENQUEUE;
671 case NIX_AQ_CTYPE_MCE:
672 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
673 /* Check if index exceeds MCE list length */
674 if (!hw->nix0->mcast.mce_ctx ||
675 (req->qidx >= (256UL << (cfg & 0xF))))
676 rc = NIX_AF_ERR_AQ_ENQUEUE;
678 /* Adding multicast lists for requests from PF/VFs is not
679 * yet supported, so ignore this.
682 rc = NIX_AF_ERR_AQ_ENQUEUE;
685 rc = NIX_AF_ERR_AQ_ENQUEUE;
691 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
692 if (req->ctype == NIX_AQ_CTYPE_SQ &&
693 ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
694 (req->op == NIX_AQ_INSTOP_WRITE &&
695 req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
696 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
697 pcifunc, req->sq.smq))
698 return NIX_AF_ERR_AQ_ENQUEUE;
701 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
703 inst.cindex = req->qidx;
704 inst.ctype = req->ctype;
706 /* Currently we are not supporting enqueuing multiple instructions,
707 * so always choose first entry in result memory.
709 inst.res_addr = (u64)aq->res->iova;
711 /* Hardware uses same aq->res->base for updating result of
712 * previous instruction hence wait here till it is done.
714 spin_lock(&aq->lock);
716 /* Clean result + context memory */
717 memset(aq->res->base, 0, aq->res->entry_sz);
718 /* Context needs to be written at RES_ADDR + 128 */
719 ctx = aq->res->base + 128;
720 /* Mask needs to be written at RES_ADDR + 256 */
721 mask = aq->res->base + 256;
724 case NIX_AQ_INSTOP_WRITE:
725 if (req->ctype == NIX_AQ_CTYPE_RQ)
726 memcpy(mask, &req->rq_mask,
727 sizeof(struct nix_rq_ctx_s));
728 else if (req->ctype == NIX_AQ_CTYPE_SQ)
729 memcpy(mask, &req->sq_mask,
730 sizeof(struct nix_sq_ctx_s));
731 else if (req->ctype == NIX_AQ_CTYPE_CQ)
732 memcpy(mask, &req->cq_mask,
733 sizeof(struct nix_cq_ctx_s));
734 else if (req->ctype == NIX_AQ_CTYPE_RSS)
735 memcpy(mask, &req->rss_mask,
736 sizeof(struct nix_rsse_s));
737 else if (req->ctype == NIX_AQ_CTYPE_MCE)
738 memcpy(mask, &req->mce_mask,
739 sizeof(struct nix_rx_mce_s));
741 case NIX_AQ_INSTOP_INIT:
742 if (req->ctype == NIX_AQ_CTYPE_RQ)
743 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
744 else if (req->ctype == NIX_AQ_CTYPE_SQ)
745 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
746 else if (req->ctype == NIX_AQ_CTYPE_CQ)
747 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
748 else if (req->ctype == NIX_AQ_CTYPE_RSS)
749 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
750 else if (req->ctype == NIX_AQ_CTYPE_MCE)
751 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
753 case NIX_AQ_INSTOP_NOP:
754 case NIX_AQ_INSTOP_READ:
755 case NIX_AQ_INSTOP_LOCK:
756 case NIX_AQ_INSTOP_UNLOCK:
759 rc = NIX_AF_ERR_AQ_ENQUEUE;
760 spin_unlock(&aq->lock);
764 /* Submit the instruction to AQ */
765 rc = nix_aq_enqueue_wait(rvu, block, &inst);
767 spin_unlock(&aq->lock);
771 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
772 if (req->op == NIX_AQ_INSTOP_INIT) {
773 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
774 __set_bit(req->qidx, pfvf->rq_bmap);
775 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
776 __set_bit(req->qidx, pfvf->sq_bmap);
777 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
778 __set_bit(req->qidx, pfvf->cq_bmap);
781 if (req->op == NIX_AQ_INSTOP_WRITE) {
782 if (req->ctype == NIX_AQ_CTYPE_RQ) {
783 ena = (req->rq.ena & req->rq_mask.ena) |
784 (test_bit(req->qidx, pfvf->rq_bmap) &
787 __set_bit(req->qidx, pfvf->rq_bmap);
789 __clear_bit(req->qidx, pfvf->rq_bmap);
791 if (req->ctype == NIX_AQ_CTYPE_SQ) {
792 ena = (req->rq.ena & req->sq_mask.ena) |
793 (test_bit(req->qidx, pfvf->sq_bmap) &
796 __set_bit(req->qidx, pfvf->sq_bmap);
798 __clear_bit(req->qidx, pfvf->sq_bmap);
800 if (req->ctype == NIX_AQ_CTYPE_CQ) {
801 ena = (req->rq.ena & req->cq_mask.ena) |
802 (test_bit(req->qidx, pfvf->cq_bmap) &
805 __set_bit(req->qidx, pfvf->cq_bmap);
807 __clear_bit(req->qidx, pfvf->cq_bmap);
812 /* Copy read context into mailbox */
813 if (req->op == NIX_AQ_INSTOP_READ) {
814 if (req->ctype == NIX_AQ_CTYPE_RQ)
815 memcpy(&rsp->rq, ctx,
816 sizeof(struct nix_rq_ctx_s));
817 else if (req->ctype == NIX_AQ_CTYPE_SQ)
818 memcpy(&rsp->sq, ctx,
819 sizeof(struct nix_sq_ctx_s));
820 else if (req->ctype == NIX_AQ_CTYPE_CQ)
821 memcpy(&rsp->cq, ctx,
822 sizeof(struct nix_cq_ctx_s));
823 else if (req->ctype == NIX_AQ_CTYPE_RSS)
824 memcpy(&rsp->rss, ctx,
825 sizeof(struct nix_rsse_s));
826 else if (req->ctype == NIX_AQ_CTYPE_MCE)
827 memcpy(&rsp->mce, ctx,
828 sizeof(struct nix_rx_mce_s));
832 spin_unlock(&aq->lock);
836 static const char *nix_get_ctx_name(int ctype)
839 case NIX_AQ_CTYPE_CQ:
841 case NIX_AQ_CTYPE_SQ:
843 case NIX_AQ_CTYPE_RQ:
845 case NIX_AQ_CTYPE_RSS:
851 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
853 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
854 struct nix_aq_enq_req aq_req;
859 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
860 return NIX_AF_ERR_AQ_ENQUEUE;
862 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
863 aq_req.hdr.pcifunc = req->hdr.pcifunc;
865 if (req->ctype == NIX_AQ_CTYPE_CQ) {
867 aq_req.cq_mask.ena = 1;
868 aq_req.cq.bp_ena = 0;
869 aq_req.cq_mask.bp_ena = 1;
870 q_cnt = pfvf->cq_ctx->qsize;
871 bmap = pfvf->cq_bmap;
873 if (req->ctype == NIX_AQ_CTYPE_SQ) {
875 aq_req.sq_mask.ena = 1;
876 q_cnt = pfvf->sq_ctx->qsize;
877 bmap = pfvf->sq_bmap;
879 if (req->ctype == NIX_AQ_CTYPE_RQ) {
881 aq_req.rq_mask.ena = 1;
882 q_cnt = pfvf->rq_ctx->qsize;
883 bmap = pfvf->rq_bmap;
886 aq_req.ctype = req->ctype;
887 aq_req.op = NIX_AQ_INSTOP_WRITE;
889 for (qidx = 0; qidx < q_cnt; qidx++) {
890 if (!test_bit(qidx, bmap))
893 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
896 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
897 nix_get_ctx_name(req->ctype), qidx);
904 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
905 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
907 struct nix_aq_enq_req lock_ctx_req;
910 if (req->op != NIX_AQ_INSTOP_INIT)
913 if (req->ctype == NIX_AQ_CTYPE_MCE ||
914 req->ctype == NIX_AQ_CTYPE_DYNO)
917 memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
918 lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
919 lock_ctx_req.ctype = req->ctype;
920 lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
921 lock_ctx_req.qidx = req->qidx;
922 err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
925 "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
927 nix_get_ctx_name(req->ctype), req->qidx);
931 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
932 struct nix_aq_enq_req *req,
933 struct nix_aq_enq_rsp *rsp)
937 err = rvu_nix_aq_enq_inst(rvu, req, rsp);
939 err = nix_lf_hwctx_lockdown(rvu, req);
944 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
945 struct nix_aq_enq_req *req,
946 struct nix_aq_enq_rsp *rsp)
948 return rvu_nix_aq_enq_inst(rvu, req, rsp);
952 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
953 struct hwctx_disable_req *req,
956 return nix_lf_hwctx_disable(rvu, req);
959 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
960 struct nix_lf_alloc_req *req,
961 struct nix_lf_alloc_rsp *rsp)
963 int nixlf, qints, hwctx_size, intf, err, rc = 0;
964 struct rvu_hwinfo *hw = rvu->hw;
965 u16 pcifunc = req->hdr.pcifunc;
966 struct rvu_block *block;
967 struct rvu_pfvf *pfvf;
971 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
972 return NIX_AF_ERR_PARAM;
975 req->way_mask &= 0xFFFF;
977 pfvf = rvu_get_pfvf(rvu, pcifunc);
978 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
979 if (!pfvf->nixlf || blkaddr < 0)
980 return NIX_AF_ERR_AF_LF_INVALID;
982 block = &hw->block[blkaddr];
983 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
985 return NIX_AF_ERR_AF_LF_INVALID;
987 /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
989 /* If default, use 'this' NIXLF's PFFUNC */
990 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
991 req->npa_func = pcifunc;
992 if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
993 return NIX_AF_INVAL_NPA_PF_FUNC;
996 /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
998 /* If default, use 'this' NIXLF's PFFUNC */
999 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1000 req->sso_func = pcifunc;
1001 if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1002 return NIX_AF_INVAL_SSO_PF_FUNC;
1005 /* If RSS is being enabled, check if requested config is valid.
1006 * RSS table size should be power of two, otherwise
1007 * RSS_GRP::OFFSET + adder might go beyond that group or
1008 * won't be able to use entire table.
1010 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1011 !is_power_of_2(req->rss_sz)))
1012 return NIX_AF_ERR_RSS_SIZE_INVALID;
1015 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1016 return NIX_AF_ERR_RSS_GRPS_INVALID;
1018 /* Reset this NIX LF */
1019 err = rvu_lf_reset(rvu, block, nixlf);
1021 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1022 block->addr - BLKADDR_NIX0, nixlf);
1023 return NIX_AF_ERR_LF_RESET;
1026 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1028 /* Alloc NIX RQ HW context memory and config the base */
1029 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1030 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1034 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1038 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1039 (u64)pfvf->rq_ctx->iova);
1041 /* Set caching and queue count in HW */
1042 cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1043 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1045 /* Alloc NIX SQ HW context memory and config the base */
1046 hwctx_size = 1UL << (ctx_cfg & 0xF);
1047 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1051 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1055 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1056 (u64)pfvf->sq_ctx->iova);
1058 cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1059 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1061 /* Alloc NIX CQ HW context memory and config the base */
1062 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1063 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1067 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1071 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1072 (u64)pfvf->cq_ctx->iova);
1074 cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1075 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1077 /* Initialize receive side scaling (RSS) */
1078 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1079 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1080 req->rss_grps, hwctx_size, req->way_mask);
1084 /* Alloc memory for CQINT's HW contexts */
1085 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1086 qints = (cfg >> 24) & 0xFFF;
1087 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1088 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1092 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1093 (u64)pfvf->cq_ints_ctx->iova);
1095 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1096 BIT_ULL(36) | req->way_mask << 20);
1098 /* Alloc memory for QINT's HW contexts */
1099 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1100 qints = (cfg >> 12) & 0xFFF;
1101 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1102 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1106 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1107 (u64)pfvf->nix_qints_ctx->iova);
1108 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1109 BIT_ULL(36) | req->way_mask << 20);
1111 /* Setup VLANX TPID's.
1112 * Use VLAN1 for 802.1Q
1113 * and VLAN0 for 802.1AD.
1115 cfg = (0x8100ULL << 16) | 0x88A8ULL;
1116 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1118 /* Enable LMTST for this NIX LF */
1119 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1121 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1123 cfg = req->npa_func;
1125 cfg |= (u64)req->sso_func << 16;
1127 cfg |= (u64)req->xqe_sz << 33;
1128 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1130 /* Config Rx pkt length, csum checks and apad enable / disable */
1131 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1133 intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1134 err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1138 /* Disable NPC entries as NIXLF's contexts are not initialized yet */
1139 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1144 nix_ctx_free(rvu, pfvf);
1148 /* Set macaddr of this PF/VF */
1149 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1151 /* set SQB size info */
1152 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1153 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1154 rsp->rx_chan_base = pfvf->rx_chan_base;
1155 rsp->tx_chan_base = pfvf->tx_chan_base;
1156 rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1157 rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1158 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1159 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1160 /* Get HW supported stat count */
1161 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1162 rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1163 rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1164 /* Get count of CQ IRQs and error IRQs supported per LF */
1165 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1166 rsp->qints = ((cfg >> 12) & 0xFFF);
1167 rsp->cints = ((cfg >> 24) & 0xFFF);
1171 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
1172 struct msg_rsp *rsp)
1174 struct rvu_hwinfo *hw = rvu->hw;
1175 u16 pcifunc = req->hdr.pcifunc;
1176 struct rvu_block *block;
1177 int blkaddr, nixlf, err;
1178 struct rvu_pfvf *pfvf;
1180 pfvf = rvu_get_pfvf(rvu, pcifunc);
1181 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1182 if (!pfvf->nixlf || blkaddr < 0)
1183 return NIX_AF_ERR_AF_LF_INVALID;
1185 block = &hw->block[blkaddr];
1186 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1188 return NIX_AF_ERR_AF_LF_INVALID;
1190 nix_interface_deinit(rvu, pcifunc, nixlf);
1192 /* Reset this NIX LF */
1193 err = rvu_lf_reset(rvu, block, nixlf);
1195 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1196 block->addr - BLKADDR_NIX0, nixlf);
1197 return NIX_AF_ERR_LF_RESET;
1200 nix_ctx_free(rvu, pfvf);
1205 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1206 struct nix_mark_format_cfg *req,
1207 struct nix_mark_format_cfg_rsp *rsp)
1209 u16 pcifunc = req->hdr.pcifunc;
1210 struct nix_hw *nix_hw;
1211 struct rvu_pfvf *pfvf;
1215 pfvf = rvu_get_pfvf(rvu, pcifunc);
1216 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1217 if (!pfvf->nixlf || blkaddr < 0)
1218 return NIX_AF_ERR_AF_LF_INVALID;
1220 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1224 cfg = (((u32)req->offset & 0x7) << 16) |
1225 (((u32)req->y_mask & 0xF) << 12) |
1226 (((u32)req->y_val & 0xF) << 8) |
1227 (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1229 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1231 dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1232 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1233 return NIX_AF_ERR_MARK_CFG_FAIL;
1236 rsp->mark_format_idx = rc;
1240 /* Disable shaping of pkts by a scheduler queue
1241 * at a given scheduler level.
1243 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1246 u64 cir_reg = 0, pir_reg = 0;
1250 case NIX_TXSCH_LVL_TL1:
1251 cir_reg = NIX_AF_TL1X_CIR(schq);
1252 pir_reg = 0; /* PIR not available at TL1 */
1254 case NIX_TXSCH_LVL_TL2:
1255 cir_reg = NIX_AF_TL2X_CIR(schq);
1256 pir_reg = NIX_AF_TL2X_PIR(schq);
1258 case NIX_TXSCH_LVL_TL3:
1259 cir_reg = NIX_AF_TL3X_CIR(schq);
1260 pir_reg = NIX_AF_TL3X_PIR(schq);
1262 case NIX_TXSCH_LVL_TL4:
1263 cir_reg = NIX_AF_TL4X_CIR(schq);
1264 pir_reg = NIX_AF_TL4X_PIR(schq);
1270 cfg = rvu_read64(rvu, blkaddr, cir_reg);
1271 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1275 cfg = rvu_read64(rvu, blkaddr, pir_reg);
1276 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1279 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1282 struct rvu_hwinfo *hw = rvu->hw;
1285 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1288 /* Reset TL4's SDP link config */
1289 if (lvl == NIX_TXSCH_LVL_TL4)
1290 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1292 if (lvl != NIX_TXSCH_LVL_TL2)
1295 /* Reset TL2's CGX or LBK link config */
1296 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1297 rvu_write64(rvu, blkaddr,
1298 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1301 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1303 struct rvu_hwinfo *hw = rvu->hw;
1304 int pf = rvu_get_pf(pcifunc);
1305 u8 cgx_id = 0, lmac_id = 0;
1307 if (is_afvf(pcifunc)) {/* LBK links */
1308 return hw->cgx_links;
1309 } else if (is_pf_cgxmapped(rvu, pf)) {
1310 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1311 return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1315 return hw->cgx_links + hw->lbk_links;
1318 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1319 int link, int *start, int *end)
1321 struct rvu_hwinfo *hw = rvu->hw;
1322 int pf = rvu_get_pf(pcifunc);
1324 if (is_afvf(pcifunc)) { /* LBK links */
1325 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1326 *end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1327 } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1328 *start = hw->cap.nix_txsch_per_cgx_lmac * link;
1329 *end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1330 } else { /* SDP link */
1331 *start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1332 (hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1333 *end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1337 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1338 struct nix_hw *nix_hw,
1339 struct nix_txsch_alloc_req *req)
1341 struct rvu_hwinfo *hw = rvu->hw;
1342 int schq, req_schq, free_cnt;
1343 struct nix_txsch *txsch;
1344 int link, start, end;
1346 txsch = &nix_hw->txsch[lvl];
1347 req_schq = req->schq_contig[lvl] + req->schq[lvl];
1352 link = nix_get_tx_link(rvu, pcifunc);
1354 /* For traffic aggregating scheduler level, one queue is enough */
1355 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1357 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1361 /* Get free SCHQ count and check if request can be accomodated */
1362 if (hw->cap.nix_fixed_txschq_mapping) {
1363 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1364 schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1365 if (end <= txsch->schq.max && schq < end &&
1366 !test_bit(schq, txsch->schq.bmap))
1371 free_cnt = rvu_rsrc_free_count(&txsch->schq);
1374 if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1375 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1377 /* If contiguous queues are needed, check for availability */
1378 if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1379 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1380 return NIX_AF_ERR_TLX_ALLOC_FAIL;
1385 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1386 struct nix_txsch_alloc_rsp *rsp,
1387 int lvl, int start, int end)
1389 struct rvu_hwinfo *hw = rvu->hw;
1390 u16 pcifunc = rsp->hdr.pcifunc;
1393 /* For traffic aggregating levels, queue alloc is based
1394 * on transmit link to which PF_FUNC is mapped to.
1396 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1397 /* A single TL queue is allocated */
1398 if (rsp->schq_contig[lvl]) {
1399 rsp->schq_contig[lvl] = 1;
1400 rsp->schq_contig_list[lvl][0] = start;
1403 /* Both contig and non-contig reqs doesn't make sense here */
1404 if (rsp->schq_contig[lvl])
1407 if (rsp->schq[lvl]) {
1409 rsp->schq_list[lvl][0] = start;
1414 /* Adjust the queue request count if HW supports
1415 * only one queue per level configuration.
1417 if (hw->cap.nix_fixed_txschq_mapping) {
1418 idx = pcifunc & RVU_PFVF_FUNC_MASK;
1420 if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1421 rsp->schq_contig[lvl] = 0;
1426 if (rsp->schq_contig[lvl]) {
1427 rsp->schq_contig[lvl] = 1;
1428 set_bit(schq, txsch->schq.bmap);
1429 rsp->schq_contig_list[lvl][0] = schq;
1431 } else if (rsp->schq[lvl]) {
1433 set_bit(schq, txsch->schq.bmap);
1434 rsp->schq_list[lvl][0] = schq;
1439 /* Allocate contiguous queue indices requesty first */
1440 if (rsp->schq_contig[lvl]) {
1441 schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1442 txsch->schq.max, start,
1443 rsp->schq_contig[lvl], 0);
1445 rsp->schq_contig[lvl] = 0;
1446 for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1447 set_bit(schq, txsch->schq.bmap);
1448 rsp->schq_contig_list[lvl][idx] = schq;
1453 /* Allocate non-contiguous queue indices */
1454 if (rsp->schq[lvl]) {
1456 for (schq = start; schq < end; schq++) {
1457 if (!test_bit(schq, txsch->schq.bmap)) {
1458 set_bit(schq, txsch->schq.bmap);
1459 rsp->schq_list[lvl][idx++] = schq;
1461 if (idx == rsp->schq[lvl])
1464 /* Update how many were allocated */
1465 rsp->schq[lvl] = idx;
1469 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1470 struct nix_txsch_alloc_req *req,
1471 struct nix_txsch_alloc_rsp *rsp)
1473 struct rvu_hwinfo *hw = rvu->hw;
1474 u16 pcifunc = req->hdr.pcifunc;
1475 int link, blkaddr, rc = 0;
1476 int lvl, idx, start, end;
1477 struct nix_txsch *txsch;
1478 struct rvu_pfvf *pfvf;
1479 struct nix_hw *nix_hw;
1483 pfvf = rvu_get_pfvf(rvu, pcifunc);
1484 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1485 if (!pfvf->nixlf || blkaddr < 0)
1486 return NIX_AF_ERR_AF_LF_INVALID;
1488 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1492 mutex_lock(&rvu->rsrc_lock);
1494 /* Check if request is valid as per HW capabilities
1495 * and can be accomodated.
1497 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1498 rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1503 /* Allocate requested Tx scheduler queues */
1504 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1505 txsch = &nix_hw->txsch[lvl];
1506 pfvf_map = txsch->pfvf_map;
1508 if (!req->schq[lvl] && !req->schq_contig[lvl])
1511 rsp->schq[lvl] = req->schq[lvl];
1512 rsp->schq_contig[lvl] = req->schq_contig[lvl];
1514 link = nix_get_tx_link(rvu, pcifunc);
1516 if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1519 } else if (hw->cap.nix_fixed_txschq_mapping) {
1520 nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1523 end = txsch->schq.max;
1526 nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1528 /* Reset queue config */
1529 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1530 schq = rsp->schq_contig_list[lvl][idx];
1531 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1532 NIX_TXSCHQ_CFG_DONE))
1533 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1534 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1535 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1538 for (idx = 0; idx < req->schq[lvl]; idx++) {
1539 schq = rsp->schq_list[lvl][idx];
1540 if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1541 NIX_TXSCHQ_CFG_DONE))
1542 pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1543 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1544 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1548 rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1549 rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1550 rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1551 NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1552 NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1555 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1557 mutex_unlock(&rvu->rsrc_lock);
1561 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1562 int smq, u16 pcifunc, int nixlf)
1564 int pf = rvu_get_pf(pcifunc);
1565 u8 cgx_id = 0, lmac_id = 0;
1566 int err, restore_tx_en = 0;
1569 /* enable cgx tx if disabled */
1570 if (is_pf_cgxmapped(rvu, pf)) {
1571 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1572 restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1576 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1577 /* Do SMQ flush and set enqueue xoff */
1578 cfg |= BIT_ULL(50) | BIT_ULL(49);
1579 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1581 /* Disable backpressure from physical link,
1582 * otherwise SMQ flush may stall.
1584 rvu_cgx_enadis_rx_bp(rvu, pf, false);
1586 /* Wait for flush to complete */
1587 err = rvu_poll_reg(rvu, blkaddr,
1588 NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1591 "NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1593 rvu_cgx_enadis_rx_bp(rvu, pf, true);
1594 /* restore cgx tx state */
1596 cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1599 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1601 int blkaddr, nixlf, lvl, schq, err;
1602 struct rvu_hwinfo *hw = rvu->hw;
1603 struct nix_txsch *txsch;
1604 struct nix_hw *nix_hw;
1606 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1608 return NIX_AF_ERR_AF_LF_INVALID;
1610 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1614 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1616 return NIX_AF_ERR_AF_LF_INVALID;
1618 /* Disable TL2/3 queue links before SMQ flush*/
1619 mutex_lock(&rvu->rsrc_lock);
1620 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1621 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1624 txsch = &nix_hw->txsch[lvl];
1625 for (schq = 0; schq < txsch->schq.max; schq++) {
1626 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1628 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1633 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1634 for (schq = 0; schq < txsch->schq.max; schq++) {
1635 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1637 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1640 /* Now free scheduler queues to free pool */
1641 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1642 /* TLs above aggregation level are shared across all PF
1643 * and it's VFs, hence skip freeing them.
1645 if (lvl >= hw->cap.nix_tx_aggr_lvl)
1648 txsch = &nix_hw->txsch[lvl];
1649 for (schq = 0; schq < txsch->schq.max; schq++) {
1650 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1652 rvu_free_rsrc(&txsch->schq, schq);
1653 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1656 mutex_unlock(&rvu->rsrc_lock);
1658 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1659 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1660 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1662 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1667 static int nix_txschq_free_one(struct rvu *rvu,
1668 struct nix_txsch_free_req *req)
1670 struct rvu_hwinfo *hw = rvu->hw;
1671 u16 pcifunc = req->hdr.pcifunc;
1672 int lvl, schq, nixlf, blkaddr;
1673 struct nix_txsch *txsch;
1674 struct nix_hw *nix_hw;
1677 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1679 return NIX_AF_ERR_AF_LF_INVALID;
1681 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1685 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1687 return NIX_AF_ERR_AF_LF_INVALID;
1689 lvl = req->schq_lvl;
1691 txsch = &nix_hw->txsch[lvl];
1693 if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1696 pfvf_map = txsch->pfvf_map;
1697 mutex_lock(&rvu->rsrc_lock);
1699 if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1700 mutex_unlock(&rvu->rsrc_lock);
1704 /* Flush if it is a SMQ. Onus of disabling
1705 * TL2/3 queue links before SMQ flush is on user
1707 if (lvl == NIX_TXSCH_LVL_SMQ)
1708 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1710 /* Free the resource */
1711 rvu_free_rsrc(&txsch->schq, schq);
1712 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1713 mutex_unlock(&rvu->rsrc_lock);
1716 return NIX_AF_ERR_TLX_INVALID;
1719 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1720 struct nix_txsch_free_req *req,
1721 struct msg_rsp *rsp)
1723 if (req->flags & TXSCHQ_FREE_ALL)
1724 return nix_txschq_free(rvu, req->hdr.pcifunc);
1726 return nix_txschq_free_one(rvu, req);
1729 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1730 int lvl, u64 reg, u64 regval)
1732 u64 regbase = reg & 0xFFFF;
1735 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1738 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1739 /* Check if this schq belongs to this PF/VF or not */
1740 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1743 parent = (regval >> 16) & 0x1FF;
1744 /* Validate MDQ's TL4 parent */
1745 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1746 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1749 /* Validate TL4's TL3 parent */
1750 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1751 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1754 /* Validate TL3's TL2 parent */
1755 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1756 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1759 /* Validate TL2's TL1 parent */
1760 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1761 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1767 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1771 if (hw->cap.nix_shaping)
1774 /* If shaping and coloring is not supported, then
1775 * *_CIR and *_PIR registers should not be configured.
1777 regbase = reg & 0xFFFF;
1780 case NIX_TXSCH_LVL_TL1:
1781 if (regbase == NIX_AF_TL1X_CIR(0))
1784 case NIX_TXSCH_LVL_TL2:
1785 if (regbase == NIX_AF_TL2X_CIR(0) ||
1786 regbase == NIX_AF_TL2X_PIR(0))
1789 case NIX_TXSCH_LVL_TL3:
1790 if (regbase == NIX_AF_TL3X_CIR(0) ||
1791 regbase == NIX_AF_TL3X_PIR(0))
1794 case NIX_TXSCH_LVL_TL4:
1795 if (regbase == NIX_AF_TL4X_CIR(0) ||
1796 regbase == NIX_AF_TL4X_PIR(0))
1803 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1804 u16 pcifunc, int blkaddr)
1809 schq = nix_get_tx_link(rvu, pcifunc);
1810 pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1811 /* Skip if PF has already done the config */
1812 if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1814 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1815 (TXSCH_TL1_DFLT_RR_PRIO << 1));
1816 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1817 TXSCH_TL1_DFLT_RR_QTM);
1818 rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1819 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1822 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1823 struct nix_txschq_config *req,
1824 struct msg_rsp *rsp)
1826 struct rvu_hwinfo *hw = rvu->hw;
1827 u16 pcifunc = req->hdr.pcifunc;
1828 u64 reg, regval, schq_regbase;
1829 struct nix_txsch *txsch;
1830 struct nix_hw *nix_hw;
1831 int blkaddr, idx, err;
1835 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1836 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1837 return NIX_AF_INVAL_TXSCHQ_CFG;
1839 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1843 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1847 txsch = &nix_hw->txsch[req->lvl];
1848 pfvf_map = txsch->pfvf_map;
1850 if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1851 pcifunc & RVU_PFVF_FUNC_MASK) {
1852 mutex_lock(&rvu->rsrc_lock);
1853 if (req->lvl == NIX_TXSCH_LVL_TL1)
1854 nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1855 mutex_unlock(&rvu->rsrc_lock);
1859 for (idx = 0; idx < req->num_regs; idx++) {
1860 reg = req->reg[idx];
1861 regval = req->regval[idx];
1862 schq_regbase = reg & 0xFFFF;
1864 if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1865 txsch->lvl, reg, regval))
1866 return NIX_AF_INVAL_TXSCHQ_CFG;
1868 /* Check if shaping and coloring is supported */
1869 if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1872 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1873 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1874 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1876 regval &= ~(0x7FULL << 24);
1877 regval |= ((u64)nixlf << 24);
1880 /* Clear 'BP_ENA' config, if it's not allowed */
1881 if (!hw->cap.nix_tx_link_bp) {
1882 if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1883 (schq_regbase & 0xFF00) ==
1884 NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1885 regval &= ~BIT_ULL(13);
1888 /* Mark config as done for TL1 by PF */
1889 if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1890 schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1891 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1892 mutex_lock(&rvu->rsrc_lock);
1893 pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
1894 NIX_TXSCHQ_CFG_DONE);
1895 mutex_unlock(&rvu->rsrc_lock);
1898 /* SMQ flush is special hence split register writes such
1899 * that flush first and write rest of the bits later.
1901 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1902 (regval & BIT_ULL(49))) {
1903 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1904 nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1905 regval &= ~BIT_ULL(49);
1907 rvu_write64(rvu, blkaddr, reg, regval);
1913 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1914 struct nix_vtag_config *req)
1916 u64 regval = req->vtag_size;
1918 if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
1921 if (req->rx.capture_vtag)
1922 regval |= BIT_ULL(5);
1923 if (req->rx.strip_vtag)
1924 regval |= BIT_ULL(4);
1926 rvu_write64(rvu, blkaddr,
1927 NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1931 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1932 struct nix_vtag_config *req,
1933 struct msg_rsp *rsp)
1935 u16 pcifunc = req->hdr.pcifunc;
1936 int blkaddr, nixlf, err;
1938 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1942 if (req->cfg_type) {
1943 err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1945 return NIX_AF_ERR_PARAM;
1947 /* TODO: handle tx vtag configuration */
1954 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1955 u16 pcifunc, int next, bool eol)
1957 struct nix_aq_enq_req aq_req;
1960 aq_req.hdr.pcifunc = 0;
1961 aq_req.ctype = NIX_AQ_CTYPE_MCE;
1965 /* Forward bcast pkts to RQ0, RSS not needed */
1967 aq_req.mce.index = 0;
1968 aq_req.mce.eol = eol;
1969 aq_req.mce.pf_func = pcifunc;
1970 aq_req.mce.next = next;
1972 /* All fields valid */
1973 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
1975 err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1977 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1978 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1984 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1985 u16 pcifunc, bool add)
1987 struct mce *mce, *tail = NULL;
1988 bool delete = false;
1990 /* Scan through the current list */
1991 hlist_for_each_entry(mce, &mce_list->head, node) {
1992 /* If already exists, then delete */
1993 if (mce->pcifunc == pcifunc && !add) {
2001 hlist_del(&mce->node);
2010 /* Add a new one to the list, at the tail */
2011 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2014 mce->pcifunc = pcifunc;
2016 hlist_add_head(&mce->node, &mce_list->head);
2018 hlist_add_behind(&mce->node, &tail->node);
2023 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
2025 int err = 0, idx, next_idx, last_idx;
2026 struct nix_mce_list *mce_list;
2027 struct nix_mcast *mcast;
2028 struct nix_hw *nix_hw;
2029 struct rvu_pfvf *pfvf;
2033 /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
2034 if (is_afvf(pcifunc))
2037 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2041 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2045 mcast = &nix_hw->mcast;
2047 /* Get this PF/VF func's MCE index */
2048 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2049 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2051 mce_list = &pfvf->bcast_mce_list;
2052 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
2054 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2055 __func__, idx, mce_list->max,
2056 pcifunc >> RVU_PFVF_PF_SHIFT);
2060 mutex_lock(&mcast->mce_lock);
2062 err = nix_update_mce_list(mce_list, pcifunc, add);
2066 /* Disable MCAM entry in NPC */
2067 if (!mce_list->count) {
2068 rvu_npc_disable_bcast_entry(rvu, pcifunc);
2072 /* Dump the updated list to HW */
2073 idx = pfvf->bcast_mce_idx;
2074 last_idx = idx + mce_list->count - 1;
2075 hlist_for_each_entry(mce, &mce_list->head, node) {
2080 /* EOL should be set in last MCE */
2081 err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
2082 mce->pcifunc, next_idx,
2083 (next_idx > last_idx) ? true : false);
2090 mutex_unlock(&mcast->mce_lock);
2094 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2096 struct nix_mcast *mcast = &nix_hw->mcast;
2097 int err, pf, numvfs, idx;
2098 struct rvu_pfvf *pfvf;
2102 /* Skip PF0 (i.e AF) */
2103 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2104 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2105 /* If PF is not enabled, nothing to do */
2106 if (!((cfg >> 20) & 0x01))
2108 /* Get numVFs attached to this PF */
2109 numvfs = (cfg >> 12) & 0xFF;
2111 pfvf = &rvu->pf[pf];
2112 /* Save the start MCE */
2113 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2115 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2117 for (idx = 0; idx < (numvfs + 1); idx++) {
2118 /* idx-0 is for PF, followed by VFs */
2119 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2121 /* Add dummy entries now, so that we don't have to check
2122 * for whether AQ_OP should be INIT/WRITE later on.
2123 * Will be updated when a NIXLF is attached/detached to
2126 err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
2136 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2138 struct nix_mcast *mcast = &nix_hw->mcast;
2139 struct rvu_hwinfo *hw = rvu->hw;
2142 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2143 size = (1ULL << size);
2145 /* Alloc memory for multicast/mirror replication entries */
2146 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2147 (256UL << MC_TBL_SIZE), size);
2151 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2152 (u64)mcast->mce_ctx->iova);
2154 /* Set max list length equal to max no of VFs per PF + PF itself */
2155 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2156 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2158 /* Alloc memory for multicast replication buffers */
2159 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2160 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2161 (8UL << MC_BUF_CNT), size);
2165 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2166 (u64)mcast->mcast_buf->iova);
2168 /* Alloc pkind for NIX internal RX multicast/mirror replay */
2169 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2171 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2172 BIT_ULL(63) | (mcast->replay_pkind << 24) |
2173 BIT_ULL(20) | MC_BUF_CNT);
2175 mutex_init(&mcast->mce_lock);
2177 return nix_setup_bcast_tables(rvu, nix_hw);
2180 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2182 struct nix_txsch *txsch;
2186 /* Get scheduler queue count of each type and alloc
2187 * bitmap for each for alloc/free/attach operations.
2189 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2190 txsch = &nix_hw->txsch[lvl];
2193 case NIX_TXSCH_LVL_SMQ:
2194 reg = NIX_AF_MDQ_CONST;
2196 case NIX_TXSCH_LVL_TL4:
2197 reg = NIX_AF_TL4_CONST;
2199 case NIX_TXSCH_LVL_TL3:
2200 reg = NIX_AF_TL3_CONST;
2202 case NIX_TXSCH_LVL_TL2:
2203 reg = NIX_AF_TL2_CONST;
2205 case NIX_TXSCH_LVL_TL1:
2206 reg = NIX_AF_TL1_CONST;
2209 cfg = rvu_read64(rvu, blkaddr, reg);
2210 txsch->schq.max = cfg & 0xFFFF;
2211 err = rvu_alloc_bitmap(&txsch->schq);
2215 /* Allocate memory for scheduler queues to
2216 * PF/VF pcifunc mapping info.
2218 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2219 sizeof(u32), GFP_KERNEL);
2220 if (!txsch->pfvf_map)
2222 for (schq = 0; schq < txsch->schq.max; schq++)
2223 txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2228 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2229 int blkaddr, u32 cfg)
2233 for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2234 if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2237 if (fmt_idx >= nix_hw->mark_format.total)
2240 rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2241 nix_hw->mark_format.cfg[fmt_idx] = cfg;
2242 nix_hw->mark_format.in_use++;
2246 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2250 [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
2251 [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
2252 [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
2253 [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
2254 [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
2255 [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
2256 [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
2257 [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
2258 [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2263 total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2264 nix_hw->mark_format.total = (u8)total;
2265 nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2267 if (!nix_hw->mark_format.cfg)
2269 for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2270 rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2272 dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2279 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2280 struct msg_rsp *rsp)
2282 u16 pcifunc = req->hdr.pcifunc;
2283 int i, nixlf, blkaddr, err;
2286 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2290 /* Get stats count supported by HW */
2291 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2293 /* Reset tx stats */
2294 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2295 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2297 /* Reset rx stats */
2298 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2299 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2304 /* Returns the ALG index to be set into NPC_RX_ACTION */
2305 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2309 /* Scan over exiting algo entries to find a match */
2310 for (i = 0; i < nix_hw->flowkey.in_use; i++)
2311 if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2317 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2319 int idx, nr_field, key_off, field_marker, keyoff_marker;
2320 int max_key_off, max_bit_pos, group_member;
2321 struct nix_rx_flowkey_alg *field;
2322 struct nix_rx_flowkey_alg tmp;
2323 u32 key_type, valid_key;
2328 #define FIELDS_PER_ALG 5
2329 #define MAX_KEY_OFF 40
2330 /* Clear all fields */
2331 memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2333 /* Each of the 32 possible flow key algorithm definitions should
2334 * fall into above incremental config (except ALG0). Otherwise a
2335 * single NPC MCAM entry is not sufficient for supporting RSS.
2337 * If a different definition or combination needed then NPC MCAM
2338 * has to be programmed to filter such pkts and it's action should
2339 * point to this definition to calculate flowtag or hash.
2341 * The `for loop` goes over _all_ protocol field and the following
2342 * variables depicts the state machine forward progress logic.
2344 * keyoff_marker - Enabled when hash byte length needs to be accounted
2345 * in field->key_offset update.
2346 * field_marker - Enabled when a new field needs to be selected.
2347 * group_member - Enabled when protocol is part of a group.
2350 keyoff_marker = 0; max_key_off = 0; group_member = 0;
2351 nr_field = 0; key_off = 0; field_marker = 1;
2352 field = &tmp; max_bit_pos = fls(flow_cfg);
2354 idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2355 key_off < MAX_KEY_OFF; idx++) {
2356 key_type = BIT(idx);
2357 valid_key = flow_cfg & key_type;
2358 /* Found a field marker, reset the field values */
2360 memset(&tmp, 0, sizeof(tmp));
2362 field_marker = true;
2363 keyoff_marker = true;
2365 case NIX_FLOW_KEY_TYPE_PORT:
2366 field->sel_chan = true;
2367 /* This should be set to 1, when SEL_CHAN is set */
2370 case NIX_FLOW_KEY_TYPE_IPV4:
2371 case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2372 field->lid = NPC_LID_LC;
2373 field->ltype_match = NPC_LT_LC_IP;
2374 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2375 field->lid = NPC_LID_LG;
2376 field->ltype_match = NPC_LT_LG_TU_IP;
2378 field->hdr_offset = 12; /* SIP offset */
2379 field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2380 field->ltype_mask = 0xF; /* Match only IPv4 */
2381 keyoff_marker = false;
2383 case NIX_FLOW_KEY_TYPE_IPV6:
2384 case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2385 field->lid = NPC_LID_LC;
2386 field->ltype_match = NPC_LT_LC_IP6;
2387 if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2388 field->lid = NPC_LID_LG;
2389 field->ltype_match = NPC_LT_LG_TU_IP6;
2391 field->hdr_offset = 8; /* SIP offset */
2392 field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2393 field->ltype_mask = 0xF; /* Match only IPv6 */
2395 case NIX_FLOW_KEY_TYPE_TCP:
2396 case NIX_FLOW_KEY_TYPE_UDP:
2397 case NIX_FLOW_KEY_TYPE_SCTP:
2398 case NIX_FLOW_KEY_TYPE_INNR_TCP:
2399 case NIX_FLOW_KEY_TYPE_INNR_UDP:
2400 case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2401 field->lid = NPC_LID_LD;
2402 if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2403 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2404 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2405 field->lid = NPC_LID_LH;
2406 field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2408 /* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2409 * so no need to change the ltype_match, just change
2410 * the lid for inner protocols
2412 BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2413 (int)NPC_LT_LH_TU_TCP);
2414 BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2415 (int)NPC_LT_LH_TU_UDP);
2416 BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2417 (int)NPC_LT_LH_TU_SCTP);
2419 if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2420 key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2422 field->ltype_match |= NPC_LT_LD_TCP;
2423 group_member = true;
2424 } else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2425 key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2427 field->ltype_match |= NPC_LT_LD_UDP;
2428 group_member = true;
2429 } else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2430 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2432 field->ltype_match |= NPC_LT_LD_SCTP;
2433 group_member = true;
2435 field->ltype_mask = ~field->ltype_match;
2436 if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2437 key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2438 /* Handle the case where any of the group item
2439 * is enabled in the group but not the final one
2443 group_member = false;
2446 field_marker = false;
2447 keyoff_marker = false;
2450 case NIX_FLOW_KEY_TYPE_NVGRE:
2451 field->lid = NPC_LID_LD;
2452 field->hdr_offset = 4; /* VSID offset */
2454 field->ltype_match = NPC_LT_LD_NVGRE;
2455 field->ltype_mask = 0xF;
2457 case NIX_FLOW_KEY_TYPE_VXLAN:
2458 case NIX_FLOW_KEY_TYPE_GENEVE:
2459 field->lid = NPC_LID_LE;
2461 field->hdr_offset = 4;
2462 field->ltype_mask = 0xF;
2463 field_marker = false;
2464 keyoff_marker = false;
2466 if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2467 field->ltype_match |= NPC_LT_LE_VXLAN;
2468 group_member = true;
2471 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2472 field->ltype_match |= NPC_LT_LE_GENEVE;
2473 group_member = true;
2476 if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2478 field->ltype_mask = ~field->ltype_match;
2479 field_marker = true;
2480 keyoff_marker = true;
2482 group_member = false;
2486 case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2487 case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2488 field->lid = NPC_LID_LA;
2489 field->ltype_match = NPC_LT_LA_ETHER;
2490 if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2491 field->lid = NPC_LID_LF;
2492 field->ltype_match = NPC_LT_LF_TU_ETHER;
2494 field->hdr_offset = 0;
2495 field->bytesm1 = 5; /* DMAC 6 Byte */
2496 field->ltype_mask = 0xF;
2498 case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2499 field->lid = NPC_LID_LC;
2500 field->hdr_offset = 40; /* IPV6 hdr */
2501 field->bytesm1 = 0; /* 1 Byte ext hdr*/
2502 field->ltype_match = NPC_LT_LC_IP6_EXT;
2503 field->ltype_mask = 0xF;
2505 case NIX_FLOW_KEY_TYPE_GTPU:
2506 field->lid = NPC_LID_LE;
2507 field->hdr_offset = 4;
2508 field->bytesm1 = 3; /* 4 bytes TID*/
2509 field->ltype_match = NPC_LT_LE_GTPU;
2510 field->ltype_mask = 0xF;
2515 /* Found a valid flow key type */
2517 field->key_offset = key_off;
2518 memcpy(&alg[nr_field], field, sizeof(*field));
2519 max_key_off = max(max_key_off, field->bytesm1 + 1);
2521 /* Found a field marker, get the next field */
2526 /* Found a keyoff marker, update the new key_off */
2527 if (keyoff_marker) {
2528 key_off += max_key_off;
2532 /* Processed all the flow key types */
2533 if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2536 return NIX_AF_ERR_RSS_NOSPC_FIELD;
2539 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2541 u64 field[FIELDS_PER_ALG];
2545 hw = get_nix_hw(rvu->hw, blkaddr);
2549 /* No room to add new flow hash algoritham */
2550 if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2551 return NIX_AF_ERR_RSS_NOSPC_ALGO;
2553 /* Generate algo fields for the given flow_cfg */
2554 rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2558 /* Update ALGX_FIELDX register with generated fields */
2559 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2560 rvu_write64(rvu, blkaddr,
2561 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2564 /* Store the flow_cfg for futher lookup */
2565 rc = hw->flowkey.in_use;
2566 hw->flowkey.flowkey[rc] = flow_cfg;
2567 hw->flowkey.in_use++;
2572 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2573 struct nix_rss_flowkey_cfg *req,
2574 struct nix_rss_flowkey_cfg_rsp *rsp)
2576 u16 pcifunc = req->hdr.pcifunc;
2577 int alg_idx, nixlf, blkaddr;
2578 struct nix_hw *nix_hw;
2581 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2585 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2589 alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2590 /* Failed to get algo index from the exiting list, reserve new */
2592 alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2597 rsp->alg_idx = alg_idx;
2598 rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2599 alg_idx, req->mcam_index);
2603 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2605 u32 flowkey_cfg, minkey_cfg;
2608 /* Disable all flow key algx fieldx */
2609 for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2610 for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2611 rvu_write64(rvu, blkaddr,
2612 NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2616 /* IPv4/IPv6 SIP/DIPs */
2617 flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2618 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2622 /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2623 minkey_cfg = flowkey_cfg;
2624 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2625 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2629 /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2630 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2631 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2635 /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2636 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2637 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2641 /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2642 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2643 NIX_FLOW_KEY_TYPE_UDP;
2644 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2648 /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2649 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2650 NIX_FLOW_KEY_TYPE_SCTP;
2651 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2655 /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2656 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
2657 NIX_FLOW_KEY_TYPE_SCTP;
2658 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2662 /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2663 flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2664 NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
2665 rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2672 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
2673 struct nix_set_mac_addr *req,
2674 struct msg_rsp *rsp)
2676 u16 pcifunc = req->hdr.pcifunc;
2677 int blkaddr, nixlf, err;
2678 struct rvu_pfvf *pfvf;
2680 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2684 pfvf = rvu_get_pfvf(rvu, pcifunc);
2686 ether_addr_copy(pfvf->mac_addr, req->mac_addr);
2688 rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
2689 pfvf->rx_chan_base, req->mac_addr);
2691 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2696 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
2697 struct msg_req *req,
2698 struct nix_get_mac_addr_rsp *rsp)
2700 u16 pcifunc = req->hdr.pcifunc;
2701 struct rvu_pfvf *pfvf;
2703 if (!is_nixlf_attached(rvu, pcifunc))
2704 return NIX_AF_ERR_AF_LF_INVALID;
2706 pfvf = rvu_get_pfvf(rvu, pcifunc);
2708 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
2713 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
2714 struct msg_rsp *rsp)
2716 bool allmulti = false, disable_promisc = false;
2717 u16 pcifunc = req->hdr.pcifunc;
2718 int blkaddr, nixlf, err;
2719 struct rvu_pfvf *pfvf;
2721 err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2725 pfvf = rvu_get_pfvf(rvu, pcifunc);
2727 if (req->mode & NIX_RX_MODE_PROMISC)
2729 else if (req->mode & NIX_RX_MODE_ALLMULTI)
2732 disable_promisc = true;
2734 if (disable_promisc)
2735 rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
2737 rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
2738 pfvf->rx_chan_base, allmulti);
2740 rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2745 static void nix_find_link_frs(struct rvu *rvu,
2746 struct nix_frs_cfg *req, u16 pcifunc)
2748 int pf = rvu_get_pf(pcifunc);
2749 struct rvu_pfvf *pfvf;
2754 /* Update with requester's min/max lengths */
2755 pfvf = rvu_get_pfvf(rvu, pcifunc);
2756 pfvf->maxlen = req->maxlen;
2757 if (req->update_minlen)
2758 pfvf->minlen = req->minlen;
2760 maxlen = req->maxlen;
2761 minlen = req->update_minlen ? req->minlen : 0;
2763 /* Get this PF's numVFs and starting hwvf */
2764 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
2766 /* For each VF, compare requested max/minlen */
2767 for (vf = 0; vf < numvfs; vf++) {
2768 pfvf = &rvu->hwvf[hwvf + vf];
2769 if (pfvf->maxlen > maxlen)
2770 maxlen = pfvf->maxlen;
2771 if (req->update_minlen &&
2772 pfvf->minlen && pfvf->minlen < minlen)
2773 minlen = pfvf->minlen;
2776 /* Compare requested max/minlen with PF's max/minlen */
2777 pfvf = &rvu->pf[pf];
2778 if (pfvf->maxlen > maxlen)
2779 maxlen = pfvf->maxlen;
2780 if (req->update_minlen &&
2781 pfvf->minlen && pfvf->minlen < minlen)
2782 minlen = pfvf->minlen;
2784 /* Update the request with max/min PF's and it's VF's max/min */
2785 req->maxlen = maxlen;
2786 if (req->update_minlen)
2787 req->minlen = minlen;
2790 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
2791 struct msg_rsp *rsp)
2793 struct rvu_hwinfo *hw = rvu->hw;
2794 u16 pcifunc = req->hdr.pcifunc;
2795 int pf = rvu_get_pf(pcifunc);
2796 int blkaddr, schq, link = -1;
2797 struct nix_txsch *txsch;
2798 u64 cfg, lmac_fifo_len;
2799 struct nix_hw *nix_hw;
2800 u8 cgx = 0, lmac = 0;
2802 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2804 return NIX_AF_ERR_AF_LF_INVALID;
2806 nix_hw = get_nix_hw(rvu->hw, blkaddr);
2810 if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
2811 return NIX_AF_ERR_FRS_INVALID;
2813 if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
2814 return NIX_AF_ERR_FRS_INVALID;
2816 /* Check if requester wants to update SMQ's */
2817 if (!req->update_smq)
2820 /* Update min/maxlen in each of the SMQ attached to this PF/VF */
2821 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2822 mutex_lock(&rvu->rsrc_lock);
2823 for (schq = 0; schq < txsch->schq.max; schq++) {
2824 if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2826 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
2827 cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
2828 if (req->update_minlen)
2829 cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
2830 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
2832 mutex_unlock(&rvu->rsrc_lock);
2835 /* Check if config is for SDP link */
2836 if (req->sdp_link) {
2838 return NIX_AF_ERR_RX_LINK_INVALID;
2839 link = hw->cgx_links + hw->lbk_links;
2843 /* Check if the request is from CGX mapped RVU PF */
2844 if (is_pf_cgxmapped(rvu, pf)) {
2845 /* Get CGX and LMAC to which this PF is mapped and find link */
2846 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
2847 link = (cgx * hw->lmac_per_cgx) + lmac;
2848 } else if (pf == 0) {
2849 /* For VFs of PF0 ingress is LBK port, so config LBK link */
2850 link = hw->cgx_links;
2854 return NIX_AF_ERR_RX_LINK_INVALID;
2856 nix_find_link_frs(rvu, req, pcifunc);
2859 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
2860 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
2861 if (req->update_minlen)
2862 cfg = (cfg & ~0xFFFFULL) | req->minlen;
2863 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
2865 if (req->sdp_link || pf == 0)
2868 /* Update transmit credits for CGX links */
2870 CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2871 cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
2872 cfg &= ~(0xFFFFFULL << 12);
2873 cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
2874 rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
2878 int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
2879 struct msg_rsp *rsp)
2881 struct npc_mcam_alloc_entry_req alloc_req = { };
2882 struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
2883 struct npc_mcam_free_entry_req free_req = { };
2884 u16 pcifunc = req->hdr.pcifunc;
2885 int blkaddr, nixlf, err;
2886 struct rvu_pfvf *pfvf;
2888 /* LBK VFs do not have separate MCAM UCAST entry hence
2889 * skip allocating rxvlan for them
2891 if (is_afvf(pcifunc))
2894 pfvf = rvu_get_pfvf(rvu, pcifunc);
2898 /* alloc new mcam entry */
2899 alloc_req.hdr.pcifunc = pcifunc;
2900 alloc_req.count = 1;
2902 err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
2907 /* update entry to enable rxvlan offload */
2908 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2910 err = NIX_AF_ERR_AF_LF_INVALID;
2914 nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
2916 err = NIX_AF_ERR_AF_LF_INVALID;
2920 pfvf->rxvlan_index = alloc_rsp.entry_list[0];
2921 /* all it means is that rxvlan_index is valid */
2922 pfvf->rxvlan = true;
2924 err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2930 free_req.hdr.pcifunc = pcifunc;
2931 free_req.entry = alloc_rsp.entry_list[0];
2932 rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
2933 pfvf->rxvlan = false;
2937 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
2938 struct msg_rsp *rsp)
2940 int nixlf, blkaddr, err;
2943 err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
2947 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
2948 /* Set the interface configuration */
2949 if (req->len_verify & BIT(0))
2952 cfg &= ~BIT_ULL(41);
2954 if (req->len_verify & BIT(1))
2957 cfg &= ~BIT_ULL(40);
2959 if (req->csum_verify & BIT(0))
2962 cfg &= ~BIT_ULL(37);
2964 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
2969 static void nix_link_config(struct rvu *rvu, int blkaddr)
2971 struct rvu_hwinfo *hw = rvu->hw;
2972 int cgx, lmac_cnt, slink, link;
2975 /* Set default min/max packet lengths allowed on NIX Rx links.
2977 * With HW reset minlen value of 60byte, HW will treat ARP pkts
2978 * as undersize and report them to SW as error pkts, hence
2979 * setting it to 40 bytes.
2981 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
2982 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2983 NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2986 if (hw->sdp_links) {
2987 link = hw->cgx_links + hw->lbk_links;
2988 rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2989 SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2992 /* Set credits for Tx links assuming max packet length allowed.
2993 * This will be reconfigured based on MTU set for PF/VF.
2995 for (cgx = 0; cgx < hw->cgx; cgx++) {
2996 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2997 tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
2998 /* Enable credits and set credit pkt count to max allowed */
2999 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3000 slink = cgx * hw->lmac_per_cgx;
3001 for (link = slink; link < (slink + lmac_cnt); link++) {
3002 rvu_write64(rvu, blkaddr,
3003 NIX_AF_TX_LINKX_NORM_CREDIT(link),
3008 /* Set Tx credits for LBK link */
3009 slink = hw->cgx_links;
3010 for (link = slink; link < (slink + hw->lbk_links); link++) {
3011 tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
3012 /* Enable credits and set credit pkt count to max allowed */
3013 tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3014 rvu_write64(rvu, blkaddr,
3015 NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3019 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3024 /* Start X2P bus calibration */
3025 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3026 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3027 /* Wait for calibration to complete */
3028 err = rvu_poll_reg(rvu, blkaddr,
3029 NIX_AF_STATUS, BIT_ULL(10), false);
3031 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3035 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3036 /* Check if CGX devices are ready */
3037 for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3038 /* Skip when cgx port is not available */
3039 if (!rvu_cgx_pdata(idx, rvu) ||
3040 (status & (BIT_ULL(16 + idx))))
3043 "CGX%d didn't respond to NIX X2P calibration\n", idx);
3047 /* Check if LBK is ready */
3048 if (!(status & BIT_ULL(19))) {
3050 "LBK didn't respond to NIX X2P calibration\n");
3054 /* Clear 'calibrate_x2p' bit */
3055 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3056 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3057 if (err || (status & 0x3FFULL))
3059 "NIX X2P calibration failed, status 0x%llx\n", status);
3065 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3070 /* Set admin queue endianness */
3071 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3074 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3077 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3080 /* Do not bypass NDC cache */
3081 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3083 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3084 /* Disable caching of SQB aka SQEs */
3087 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3089 /* Result structure can be followed by RQ/SQ/CQ context at
3090 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3091 * operation type. Alloc sufficient result memory for all operations.
3093 err = rvu_aq_alloc(rvu, &block->aq,
3094 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3095 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3099 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3100 rvu_write64(rvu, block->addr,
3101 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3105 int rvu_nix_init(struct rvu *rvu)
3107 struct rvu_hwinfo *hw = rvu->hw;
3108 struct rvu_block *block;
3112 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
3115 block = &hw->block[blkaddr];
3117 if (is_rvu_96xx_B0(rvu)) {
3118 /* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3119 * internal state when conditional clocks are turned off.
3120 * Hence enable them.
3122 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3123 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3125 /* Set chan/link to backpressure TL3 instead of TL2 */
3126 rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3128 /* Disable SQ manager's sticky mode operation (set TM6 = 0)
3129 * This sticky mode is known to cause SQ stalls when multiple
3130 * SQs are mapped to same SMQ and transmitting pkts at a time.
3132 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3133 cfg &= ~BIT_ULL(15);
3134 rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3137 /* Calibrate X2P bus to check if CGX/LBK links are fine */
3138 err = nix_calibrate_x2p(rvu, blkaddr);
3142 /* Set num of links of each type */
3143 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
3144 hw->cgx = (cfg >> 12) & 0xF;
3145 hw->lmac_per_cgx = (cfg >> 8) & 0xF;
3146 hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
3150 /* Initialize admin queue */
3151 err = nix_aq_init(rvu, block);
3155 /* Restore CINT timer delay to HW reset values */
3156 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3158 if (blkaddr == BLKADDR_NIX0) {
3159 hw->nix0 = devm_kzalloc(rvu->dev,
3160 sizeof(struct nix_hw), GFP_KERNEL);
3164 err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
3168 err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
3172 err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
3176 /* Configure segmentation offload formats */
3177 nix_setup_lso(rvu, hw->nix0, blkaddr);
3179 /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3180 * This helps HW protocol checker to identify headers
3181 * and validate length and checksums.
3183 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3184 (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
3185 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3186 (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
3187 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3188 (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
3189 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3190 (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
3191 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3192 (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F);
3193 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3194 (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
3195 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3196 (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F);
3197 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3198 (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
3199 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3200 (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F);
3201 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3202 (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
3203 rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3204 (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) |
3207 err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3211 /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3212 nix_link_config(rvu, blkaddr);
3214 /* Enable Channel backpressure */
3215 rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3220 void rvu_nix_freemem(struct rvu *rvu)
3222 struct rvu_hwinfo *hw = rvu->hw;
3223 struct rvu_block *block;
3224 struct nix_txsch *txsch;
3225 struct nix_mcast *mcast;
3226 struct nix_hw *nix_hw;
3229 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
3233 block = &hw->block[blkaddr];
3234 rvu_aq_free(rvu, block->aq);
3236 if (blkaddr == BLKADDR_NIX0) {
3237 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3241 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3242 txsch = &nix_hw->txsch[lvl];
3243 kfree(txsch->schq.bmap);
3246 mcast = &nix_hw->mcast;
3247 qmem_free(rvu->dev, mcast->mce_ctx);
3248 qmem_free(rvu->dev, mcast->mcast_buf);
3249 mutex_destroy(&mcast->mce_lock);
3253 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3254 struct msg_rsp *rsp)
3256 u16 pcifunc = req->hdr.pcifunc;
3259 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3263 rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3265 return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3268 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3269 struct msg_rsp *rsp)
3271 u16 pcifunc = req->hdr.pcifunc;
3274 err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3278 rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
3280 return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3283 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3285 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3286 struct hwctx_disable_req ctx_req;
3289 ctx_req.hdr.pcifunc = pcifunc;
3291 /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3292 nix_interface_deinit(rvu, pcifunc, nixlf);
3293 nix_rx_sync(rvu, blkaddr);
3294 nix_txschq_free(rvu, pcifunc);
3296 rvu_cgx_start_stop_io(rvu, pcifunc, false);
3299 ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3300 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3302 dev_err(rvu->dev, "SQ ctx disable failed\n");
3306 ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3307 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3309 dev_err(rvu->dev, "RQ ctx disable failed\n");
3313 ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3314 err = nix_lf_hwctx_disable(rvu, &ctx_req);
3316 dev_err(rvu->dev, "CQ ctx disable failed\n");
3319 nix_ctx_free(rvu, pfvf);
3322 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3323 struct nix_lso_format_cfg *req,
3324 struct nix_lso_format_cfg_rsp *rsp)
3326 u16 pcifunc = req->hdr.pcifunc;
3327 struct nix_hw *nix_hw;
3328 struct rvu_pfvf *pfvf;
3329 int blkaddr, idx, f;
3332 pfvf = rvu_get_pfvf(rvu, pcifunc);
3333 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3334 if (!pfvf->nixlf || blkaddr < 0)
3335 return NIX_AF_ERR_AF_LF_INVALID;
3337 nix_hw = get_nix_hw(rvu->hw, blkaddr);
3341 /* Find existing matching LSO format, if any */
3342 for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3343 for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3344 reg = rvu_read64(rvu, blkaddr,
3345 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3346 if (req->fields[f] != (reg & req->field_mask))
3350 if (f == NIX_LSO_FIELD_MAX)
3354 if (idx < nix_hw->lso.in_use) {
3356 rsp->lso_format_idx = idx;
3360 if (nix_hw->lso.in_use == nix_hw->lso.total)
3361 return NIX_AF_ERR_LSO_CFG_FAIL;
3363 rsp->lso_format_idx = nix_hw->lso.in_use++;
3365 for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3366 rvu_write64(rvu, blkaddr,
3367 NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),