1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
14 #include "rvu_struct.h"
19 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
45 /* For now considering MC resources needed for broadcast
46 * pkt replication only. i.e 256 HWVFs + 12 PFs.
48 #define MC_TBL_SIZE MC_TBL_SZ_512
49 #define MC_BUF_CNT MC_BUF_CNT_128
52 struct hlist_node node;
57 static void nix_mce_list_init(struct nix_mce_list *list, int max)
59 INIT_HLIST_HEAD(&list->head);
64 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
71 idx = mcast->next_free_mce;
72 mcast->next_free_mce += count;
76 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
78 if (blkaddr == BLKADDR_NIX0 && hw->nix0)
84 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
85 int lvl, u16 pcifunc, u16 schq)
87 struct nix_txsch *txsch;
88 struct nix_hw *nix_hw;
90 nix_hw = get_nix_hw(rvu->hw, blkaddr);
94 txsch = &nix_hw->txsch[lvl];
95 /* Check out of bounds */
96 if (schq >= txsch->schq.max)
99 spin_lock(&rvu->rsrc_lock);
100 if (txsch->pfvf_map[schq] != pcifunc) {
101 spin_unlock(&rvu->rsrc_lock);
104 spin_unlock(&rvu->rsrc_lock);
108 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
110 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
115 pf = rvu_get_pf(pcifunc);
116 if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
120 case NIX_INTF_TYPE_CGX:
121 pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
122 rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
124 pkind = rvu_npc_get_pkind(rvu, pf);
127 "PF_Func 0x%x: Invalid pkind\n", pcifunc);
130 cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
131 rvu_npc_set_pkind(rvu, pkind, pfvf);
133 case NIX_INTF_TYPE_LBK:
137 /* Add this PF_FUNC to bcast pkt replication list */
138 err = nix_update_bcast_mce_list(rvu, pcifunc, true);
141 "Bcast list, failed to enable PF_FUNC 0x%x\n",
147 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
151 /* Remove this PF_FUNC from bcast pkt replication list */
152 err = nix_update_bcast_mce_list(rvu, pcifunc, false);
155 "Bcast list, failed to disable PF_FUNC 0x%x\n",
160 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
161 u64 format, bool v4, u64 *fidx)
163 struct nix_lso_format field = {0};
165 /* IP's Length field */
166 field.layer = NIX_TXLAYER_OL3;
167 /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
168 field.offset = v4 ? 2 : 4;
169 field.sizem1 = 1; /* i.e 2 bytes */
170 field.alg = NIX_LSOALG_ADD_PAYLEN;
171 rvu_write64(rvu, blkaddr,
172 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
175 /* No ID field in IPv6 header */
180 field.layer = NIX_TXLAYER_OL3;
182 field.sizem1 = 1; /* i.e 2 bytes */
183 field.alg = NIX_LSOALG_ADD_SEGNUM;
184 rvu_write64(rvu, blkaddr,
185 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
189 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
190 u64 format, u64 *fidx)
192 struct nix_lso_format field = {0};
194 /* TCP's sequence number field */
195 field.layer = NIX_TXLAYER_OL4;
197 field.sizem1 = 3; /* i.e 4 bytes */
198 field.alg = NIX_LSOALG_ADD_OFFSET;
199 rvu_write64(rvu, blkaddr,
200 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
203 /* TCP's flags field */
204 field.layer = NIX_TXLAYER_OL4;
206 field.sizem1 = 0; /* not needed */
207 field.alg = NIX_LSOALG_TCP_FLAGS;
208 rvu_write64(rvu, blkaddr,
209 NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
213 static void nix_setup_lso(struct rvu *rvu, int blkaddr)
215 u64 cfg, idx, fidx = 0;
218 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
219 /* For TSO, set first and middle segment flags to
220 * mask out PSH, RST & FIN flags in TCP packet
222 cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
223 cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
224 rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
226 /* Configure format fields for TCPv4 segmentation offload */
227 idx = NIX_LSO_FORMAT_IDX_TSOV4;
228 nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
229 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
231 /* Set rest of the fields to NOP */
232 for (; fidx < 8; fidx++) {
233 rvu_write64(rvu, blkaddr,
234 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
237 /* Configure format fields for TCPv6 segmentation offload */
238 idx = NIX_LSO_FORMAT_IDX_TSOV6;
240 nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
241 nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
243 /* Set rest of the fields to NOP */
244 for (; fidx < 8; fidx++) {
245 rvu_write64(rvu, blkaddr,
246 NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
250 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
252 kfree(pfvf->rq_bmap);
253 kfree(pfvf->sq_bmap);
254 kfree(pfvf->cq_bmap);
256 qmem_free(rvu->dev, pfvf->rq_ctx);
258 qmem_free(rvu->dev, pfvf->sq_ctx);
260 qmem_free(rvu->dev, pfvf->cq_ctx);
262 qmem_free(rvu->dev, pfvf->rss_ctx);
263 if (pfvf->nix_qints_ctx)
264 qmem_free(rvu->dev, pfvf->nix_qints_ctx);
265 if (pfvf->cq_ints_ctx)
266 qmem_free(rvu->dev, pfvf->cq_ints_ctx);
268 pfvf->rq_bmap = NULL;
269 pfvf->cq_bmap = NULL;
270 pfvf->sq_bmap = NULL;
274 pfvf->rss_ctx = NULL;
275 pfvf->nix_qints_ctx = NULL;
276 pfvf->cq_ints_ctx = NULL;
279 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
280 struct rvu_pfvf *pfvf, int nixlf,
281 int rss_sz, int rss_grps, int hwctx_size)
283 int err, grp, num_indices;
285 /* RSS is not requested for this NIXLF */
288 num_indices = rss_sz * rss_grps;
290 /* Alloc NIX RSS HW context memory and config the base */
291 err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
295 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
296 (u64)pfvf->rss_ctx->iova);
298 /* Config full RSS table size, enable RSS and caching */
299 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
300 BIT_ULL(36) | BIT_ULL(4) |
301 ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
302 /* Config RSS group offset and sizes */
303 for (grp = 0; grp < rss_grps; grp++)
304 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
305 ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
309 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
310 struct nix_aq_inst_s *inst)
312 struct admin_queue *aq = block->aq;
313 struct nix_aq_res_s *result;
317 result = (struct nix_aq_res_s *)aq->res->base;
319 /* Get current head pointer where to append this instruction */
320 reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
321 head = (reg >> 4) & AQ_PTR_MASK;
323 memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
324 (void *)inst, aq->inst->entry_sz);
325 memset(result, 0, sizeof(*result));
326 /* sync into memory */
329 /* Ring the doorbell and wait for result */
330 rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
331 while (result->compcode == NIX_AQ_COMP_NOTDONE) {
339 if (result->compcode != NIX_AQ_COMP_GOOD)
340 /* TODO: Replace this with some error code */
346 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
347 struct nix_aq_enq_rsp *rsp)
349 struct rvu_hwinfo *hw = rvu->hw;
350 u16 pcifunc = req->hdr.pcifunc;
351 int nixlf, blkaddr, rc = 0;
352 struct nix_aq_inst_s inst;
353 struct rvu_block *block;
354 struct admin_queue *aq;
355 struct rvu_pfvf *pfvf;
360 pfvf = rvu_get_pfvf(rvu, pcifunc);
361 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
362 if (!pfvf->nixlf || blkaddr < 0)
363 return NIX_AF_ERR_AF_LF_INVALID;
365 block = &hw->block[blkaddr];
368 dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
369 return NIX_AF_ERR_AQ_ENQUEUE;
372 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
374 return NIX_AF_ERR_AF_LF_INVALID;
376 switch (req->ctype) {
377 case NIX_AQ_CTYPE_RQ:
378 /* Check if index exceeds max no of queues */
379 if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
380 rc = NIX_AF_ERR_AQ_ENQUEUE;
382 case NIX_AQ_CTYPE_SQ:
383 if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
384 rc = NIX_AF_ERR_AQ_ENQUEUE;
386 case NIX_AQ_CTYPE_CQ:
387 if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
388 rc = NIX_AF_ERR_AQ_ENQUEUE;
390 case NIX_AQ_CTYPE_RSS:
391 /* Check if RSS is enabled and qidx is within range */
392 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
393 if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
394 (req->qidx >= (256UL << (cfg & 0xF))))
395 rc = NIX_AF_ERR_AQ_ENQUEUE;
397 case NIX_AQ_CTYPE_MCE:
398 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
399 /* Check if index exceeds MCE list length */
400 if (!hw->nix0->mcast.mce_ctx ||
401 (req->qidx >= (256UL << (cfg & 0xF))))
402 rc = NIX_AF_ERR_AQ_ENQUEUE;
404 /* Adding multicast lists for requests from PF/VFs is not
405 * yet supported, so ignore this.
408 rc = NIX_AF_ERR_AQ_ENQUEUE;
411 rc = NIX_AF_ERR_AQ_ENQUEUE;
417 /* Check if SQ pointed SMQ belongs to this PF/VF or not */
418 if (req->ctype == NIX_AQ_CTYPE_SQ &&
419 req->op != NIX_AQ_INSTOP_WRITE) {
420 if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
421 pcifunc, req->sq.smq))
422 return NIX_AF_ERR_AQ_ENQUEUE;
425 memset(&inst, 0, sizeof(struct nix_aq_inst_s));
427 inst.cindex = req->qidx;
428 inst.ctype = req->ctype;
430 /* Currently we are not supporting enqueuing multiple instructions,
431 * so always choose first entry in result memory.
433 inst.res_addr = (u64)aq->res->iova;
435 /* Clean result + context memory */
436 memset(aq->res->base, 0, aq->res->entry_sz);
437 /* Context needs to be written at RES_ADDR + 128 */
438 ctx = aq->res->base + 128;
439 /* Mask needs to be written at RES_ADDR + 256 */
440 mask = aq->res->base + 256;
443 case NIX_AQ_INSTOP_WRITE:
444 if (req->ctype == NIX_AQ_CTYPE_RQ)
445 memcpy(mask, &req->rq_mask,
446 sizeof(struct nix_rq_ctx_s));
447 else if (req->ctype == NIX_AQ_CTYPE_SQ)
448 memcpy(mask, &req->sq_mask,
449 sizeof(struct nix_sq_ctx_s));
450 else if (req->ctype == NIX_AQ_CTYPE_CQ)
451 memcpy(mask, &req->cq_mask,
452 sizeof(struct nix_cq_ctx_s));
453 else if (req->ctype == NIX_AQ_CTYPE_RSS)
454 memcpy(mask, &req->rss_mask,
455 sizeof(struct nix_rsse_s));
456 else if (req->ctype == NIX_AQ_CTYPE_MCE)
457 memcpy(mask, &req->mce_mask,
458 sizeof(struct nix_rx_mce_s));
460 case NIX_AQ_INSTOP_INIT:
461 if (req->ctype == NIX_AQ_CTYPE_RQ)
462 memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
463 else if (req->ctype == NIX_AQ_CTYPE_SQ)
464 memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
465 else if (req->ctype == NIX_AQ_CTYPE_CQ)
466 memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
467 else if (req->ctype == NIX_AQ_CTYPE_RSS)
468 memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
469 else if (req->ctype == NIX_AQ_CTYPE_MCE)
470 memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
472 case NIX_AQ_INSTOP_NOP:
473 case NIX_AQ_INSTOP_READ:
474 case NIX_AQ_INSTOP_LOCK:
475 case NIX_AQ_INSTOP_UNLOCK:
478 rc = NIX_AF_ERR_AQ_ENQUEUE;
482 spin_lock(&aq->lock);
484 /* Submit the instruction to AQ */
485 rc = nix_aq_enqueue_wait(rvu, block, &inst);
487 spin_unlock(&aq->lock);
491 /* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
492 if (req->op == NIX_AQ_INSTOP_INIT) {
493 if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
494 __set_bit(req->qidx, pfvf->rq_bmap);
495 if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
496 __set_bit(req->qidx, pfvf->sq_bmap);
497 if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
498 __set_bit(req->qidx, pfvf->cq_bmap);
501 if (req->op == NIX_AQ_INSTOP_WRITE) {
502 if (req->ctype == NIX_AQ_CTYPE_RQ) {
503 ena = (req->rq.ena & req->rq_mask.ena) |
504 (test_bit(req->qidx, pfvf->rq_bmap) &
507 __set_bit(req->qidx, pfvf->rq_bmap);
509 __clear_bit(req->qidx, pfvf->rq_bmap);
511 if (req->ctype == NIX_AQ_CTYPE_SQ) {
512 ena = (req->rq.ena & req->sq_mask.ena) |
513 (test_bit(req->qidx, pfvf->sq_bmap) &
516 __set_bit(req->qidx, pfvf->sq_bmap);
518 __clear_bit(req->qidx, pfvf->sq_bmap);
520 if (req->ctype == NIX_AQ_CTYPE_CQ) {
521 ena = (req->rq.ena & req->cq_mask.ena) |
522 (test_bit(req->qidx, pfvf->cq_bmap) &
525 __set_bit(req->qidx, pfvf->cq_bmap);
527 __clear_bit(req->qidx, pfvf->cq_bmap);
532 /* Copy read context into mailbox */
533 if (req->op == NIX_AQ_INSTOP_READ) {
534 if (req->ctype == NIX_AQ_CTYPE_RQ)
535 memcpy(&rsp->rq, ctx,
536 sizeof(struct nix_rq_ctx_s));
537 else if (req->ctype == NIX_AQ_CTYPE_SQ)
538 memcpy(&rsp->sq, ctx,
539 sizeof(struct nix_sq_ctx_s));
540 else if (req->ctype == NIX_AQ_CTYPE_CQ)
541 memcpy(&rsp->cq, ctx,
542 sizeof(struct nix_cq_ctx_s));
543 else if (req->ctype == NIX_AQ_CTYPE_RSS)
544 memcpy(&rsp->rss, ctx,
545 sizeof(struct nix_cq_ctx_s));
546 else if (req->ctype == NIX_AQ_CTYPE_MCE)
547 memcpy(&rsp->mce, ctx,
548 sizeof(struct nix_rx_mce_s));
552 spin_unlock(&aq->lock);
556 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
558 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
559 struct nix_aq_enq_req aq_req;
564 if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
565 return NIX_AF_ERR_AQ_ENQUEUE;
567 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
568 aq_req.hdr.pcifunc = req->hdr.pcifunc;
570 if (req->ctype == NIX_AQ_CTYPE_CQ) {
572 aq_req.cq_mask.ena = 1;
573 q_cnt = pfvf->cq_ctx->qsize;
574 bmap = pfvf->cq_bmap;
576 if (req->ctype == NIX_AQ_CTYPE_SQ) {
578 aq_req.sq_mask.ena = 1;
579 q_cnt = pfvf->sq_ctx->qsize;
580 bmap = pfvf->sq_bmap;
582 if (req->ctype == NIX_AQ_CTYPE_RQ) {
584 aq_req.rq_mask.ena = 1;
585 q_cnt = pfvf->rq_ctx->qsize;
586 bmap = pfvf->rq_bmap;
589 aq_req.ctype = req->ctype;
590 aq_req.op = NIX_AQ_INSTOP_WRITE;
592 for (qidx = 0; qidx < q_cnt; qidx++) {
593 if (!test_bit(qidx, bmap))
596 rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
599 dev_err(rvu->dev, "Failed to disable %s:%d context\n",
600 (req->ctype == NIX_AQ_CTYPE_CQ) ?
601 "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
609 int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
610 struct nix_aq_enq_req *req,
611 struct nix_aq_enq_rsp *rsp)
613 return rvu_nix_aq_enq_inst(rvu, req, rsp);
616 int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
617 struct hwctx_disable_req *req,
620 return nix_lf_hwctx_disable(rvu, req);
623 int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
624 struct nix_lf_alloc_req *req,
625 struct nix_lf_alloc_rsp *rsp)
627 int nixlf, qints, hwctx_size, err, rc = 0;
628 struct rvu_hwinfo *hw = rvu->hw;
629 u16 pcifunc = req->hdr.pcifunc;
630 struct rvu_block *block;
631 struct rvu_pfvf *pfvf;
635 if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
636 return NIX_AF_ERR_PARAM;
638 pfvf = rvu_get_pfvf(rvu, pcifunc);
639 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
640 if (!pfvf->nixlf || blkaddr < 0)
641 return NIX_AF_ERR_AF_LF_INVALID;
643 block = &hw->block[blkaddr];
644 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
646 return NIX_AF_ERR_AF_LF_INVALID;
648 /* If RSS is being enabled, check if requested config is valid.
649 * RSS table size should be power of two, otherwise
650 * RSS_GRP::OFFSET + adder might go beyond that group or
651 * won't be able to use entire table.
653 if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
654 !is_power_of_2(req->rss_sz)))
655 return NIX_AF_ERR_RSS_SIZE_INVALID;
658 (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
659 return NIX_AF_ERR_RSS_GRPS_INVALID;
661 /* Reset this NIX LF */
662 err = rvu_lf_reset(rvu, block, nixlf);
664 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
665 block->addr - BLKADDR_NIX0, nixlf);
666 return NIX_AF_ERR_LF_RESET;
669 ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
671 /* Alloc NIX RQ HW context memory and config the base */
672 hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
673 err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
677 pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
681 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
682 (u64)pfvf->rq_ctx->iova);
684 /* Set caching and queue count in HW */
685 cfg = BIT_ULL(36) | (req->rq_cnt - 1);
686 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
688 /* Alloc NIX SQ HW context memory and config the base */
689 hwctx_size = 1UL << (ctx_cfg & 0xF);
690 err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
694 pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
698 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
699 (u64)pfvf->sq_ctx->iova);
700 cfg = BIT_ULL(36) | (req->sq_cnt - 1);
701 rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
703 /* Alloc NIX CQ HW context memory and config the base */
704 hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
705 err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
709 pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
713 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
714 (u64)pfvf->cq_ctx->iova);
715 cfg = BIT_ULL(36) | (req->cq_cnt - 1);
716 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
718 /* Initialize receive side scaling (RSS) */
719 hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
720 err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
721 req->rss_sz, req->rss_grps, hwctx_size);
725 /* Alloc memory for CQINT's HW contexts */
726 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
727 qints = (cfg >> 24) & 0xFFF;
728 hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
729 err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
733 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
734 (u64)pfvf->cq_ints_ctx->iova);
735 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
737 /* Alloc memory for QINT's HW contexts */
738 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
739 qints = (cfg >> 12) & 0xFFF;
740 hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
741 err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
745 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
746 (u64)pfvf->nix_qints_ctx->iova);
747 rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
749 /* Enable LMTST for this NIX LF */
750 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
752 /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
753 * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
756 if (req->npa_func == RVU_DEFAULT_PF_FUNC)
761 if (req->sso_func == RVU_DEFAULT_PF_FUNC)
762 cfg |= (u64)pcifunc << 16;
764 cfg |= (u64)req->sso_func << 16;
766 cfg |= (u64)req->xqe_sz << 33;
767 rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
769 /* Config Rx pkt length, csum checks and apad enable / disable */
770 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
772 err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf);
779 nix_ctx_free(rvu, pfvf);
783 /* Set macaddr of this PF/VF */
784 ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
786 /* set SQB size info */
787 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
788 rsp->sqb_size = (cfg >> 34) & 0xFFFF;
789 rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
790 rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
794 int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
797 struct rvu_hwinfo *hw = rvu->hw;
798 u16 pcifunc = req->hdr.pcifunc;
799 struct rvu_block *block;
800 int blkaddr, nixlf, err;
801 struct rvu_pfvf *pfvf;
803 pfvf = rvu_get_pfvf(rvu, pcifunc);
804 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
805 if (!pfvf->nixlf || blkaddr < 0)
806 return NIX_AF_ERR_AF_LF_INVALID;
808 block = &hw->block[blkaddr];
809 nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
811 return NIX_AF_ERR_AF_LF_INVALID;
813 nix_interface_deinit(rvu, pcifunc, nixlf);
815 /* Reset this NIX LF */
816 err = rvu_lf_reset(rvu, block, nixlf);
818 dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
819 block->addr - BLKADDR_NIX0, nixlf);
820 return NIX_AF_ERR_LF_RESET;
823 nix_ctx_free(rvu, pfvf);
828 /* Disable shaping of pkts by a scheduler queue
829 * at a given scheduler level.
831 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
834 u64 cir_reg = 0, pir_reg = 0;
838 case NIX_TXSCH_LVL_TL1:
839 cir_reg = NIX_AF_TL1X_CIR(schq);
840 pir_reg = 0; /* PIR not available at TL1 */
842 case NIX_TXSCH_LVL_TL2:
843 cir_reg = NIX_AF_TL2X_CIR(schq);
844 pir_reg = NIX_AF_TL2X_PIR(schq);
846 case NIX_TXSCH_LVL_TL3:
847 cir_reg = NIX_AF_TL3X_CIR(schq);
848 pir_reg = NIX_AF_TL3X_PIR(schq);
850 case NIX_TXSCH_LVL_TL4:
851 cir_reg = NIX_AF_TL4X_CIR(schq);
852 pir_reg = NIX_AF_TL4X_PIR(schq);
858 cfg = rvu_read64(rvu, blkaddr, cir_reg);
859 rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
863 cfg = rvu_read64(rvu, blkaddr, pir_reg);
864 rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
867 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
870 struct rvu_hwinfo *hw = rvu->hw;
873 /* Reset TL4's SDP link config */
874 if (lvl == NIX_TXSCH_LVL_TL4)
875 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
877 if (lvl != NIX_TXSCH_LVL_TL2)
880 /* Reset TL2's CGX or LBK link config */
881 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
882 rvu_write64(rvu, blkaddr,
883 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
886 int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
887 struct nix_txsch_alloc_req *req,
888 struct nix_txsch_alloc_rsp *rsp)
890 u16 pcifunc = req->hdr.pcifunc;
891 struct nix_txsch *txsch;
892 int lvl, idx, req_schq;
893 struct rvu_pfvf *pfvf;
894 struct nix_hw *nix_hw;
898 pfvf = rvu_get_pfvf(rvu, pcifunc);
899 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
900 if (!pfvf->nixlf || blkaddr < 0)
901 return NIX_AF_ERR_AF_LF_INVALID;
903 nix_hw = get_nix_hw(rvu->hw, blkaddr);
907 spin_lock(&rvu->rsrc_lock);
908 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
909 txsch = &nix_hw->txsch[lvl];
910 req_schq = req->schq_contig[lvl] + req->schq[lvl];
912 /* There are only 28 TL1s */
913 if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
916 /* Check if request is valid */
917 if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
920 /* If contiguous queues are needed, check for availability */
921 if (req->schq_contig[lvl] &&
922 !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
925 /* Check if full request can be accommodated */
926 if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
930 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
931 txsch = &nix_hw->txsch[lvl];
932 rsp->schq_contig[lvl] = req->schq_contig[lvl];
933 rsp->schq[lvl] = req->schq[lvl];
936 /* Alloc contiguous queues first */
937 if (req->schq_contig[lvl]) {
938 schq = rvu_alloc_rsrc_contig(&txsch->schq,
939 req->schq_contig[lvl]);
941 for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
942 txsch->pfvf_map[schq] = pcifunc;
943 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
944 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
945 rsp->schq_contig_list[lvl][idx] = schq;
950 /* Alloc non-contiguous queues */
951 for (idx = 0; idx < req->schq[lvl]; idx++) {
952 schq = rvu_alloc_rsrc(&txsch->schq);
953 txsch->pfvf_map[schq] = pcifunc;
954 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
955 nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
956 rsp->schq_list[lvl][idx] = schq;
961 rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
963 spin_unlock(&rvu->rsrc_lock);
967 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
969 int blkaddr, nixlf, lvl, schq, err;
970 struct rvu_hwinfo *hw = rvu->hw;
971 struct nix_txsch *txsch;
972 struct nix_hw *nix_hw;
975 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
977 return NIX_AF_ERR_AF_LF_INVALID;
979 nix_hw = get_nix_hw(rvu->hw, blkaddr);
983 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
985 return NIX_AF_ERR_AF_LF_INVALID;
987 /* Disable TL2/3 queue links before SMQ flush*/
988 spin_lock(&rvu->rsrc_lock);
989 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
990 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
993 txsch = &nix_hw->txsch[lvl];
994 for (schq = 0; schq < txsch->schq.max; schq++) {
995 if (txsch->pfvf_map[schq] != pcifunc)
997 nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1002 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1003 for (schq = 0; schq < txsch->schq.max; schq++) {
1004 if (txsch->pfvf_map[schq] != pcifunc)
1006 cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1007 /* Do SMQ flush and set enqueue xoff */
1008 cfg |= BIT_ULL(50) | BIT_ULL(49);
1009 rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1011 /* Wait for flush to complete */
1012 err = rvu_poll_reg(rvu, blkaddr,
1013 NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
1016 "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
1020 /* Now free scheduler queues to free pool */
1021 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1022 txsch = &nix_hw->txsch[lvl];
1023 for (schq = 0; schq < txsch->schq.max; schq++) {
1024 if (txsch->pfvf_map[schq] != pcifunc)
1026 rvu_free_rsrc(&txsch->schq, schq);
1027 txsch->pfvf_map[schq] = 0;
1030 spin_unlock(&rvu->rsrc_lock);
1032 /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1033 rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1034 err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1036 dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1041 int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
1042 struct nix_txsch_free_req *req,
1043 struct msg_rsp *rsp)
1045 return nix_txschq_free(rvu, req->hdr.pcifunc);
1048 static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1049 int lvl, u64 reg, u64 regval)
1051 u64 regbase = reg & 0xFFFF;
1054 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1057 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1058 /* Check if this schq belongs to this PF/VF or not */
1059 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1062 parent = (regval >> 16) & 0x1FF;
1063 /* Validate MDQ's TL4 parent */
1064 if (regbase == NIX_AF_MDQX_PARENT(0) &&
1065 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1068 /* Validate TL4's TL3 parent */
1069 if (regbase == NIX_AF_TL4X_PARENT(0) &&
1070 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1073 /* Validate TL3's TL2 parent */
1074 if (regbase == NIX_AF_TL3X_PARENT(0) &&
1075 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1078 /* Validate TL2's TL1 parent */
1079 if (regbase == NIX_AF_TL2X_PARENT(0) &&
1080 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1086 int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
1087 struct nix_txschq_config *req,
1088 struct msg_rsp *rsp)
1090 struct rvu_hwinfo *hw = rvu->hw;
1091 u16 pcifunc = req->hdr.pcifunc;
1092 u64 reg, regval, schq_regbase;
1093 struct nix_txsch *txsch;
1094 struct nix_hw *nix_hw;
1095 int blkaddr, idx, err;
1098 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1099 req->num_regs > MAX_REGS_PER_MBOX_MSG)
1100 return NIX_AF_INVAL_TXSCHQ_CFG;
1102 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1104 return NIX_AF_ERR_AF_LF_INVALID;
1106 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1110 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1112 return NIX_AF_ERR_AF_LF_INVALID;
1114 txsch = &nix_hw->txsch[req->lvl];
1115 for (idx = 0; idx < req->num_regs; idx++) {
1116 reg = req->reg[idx];
1117 regval = req->regval[idx];
1118 schq_regbase = reg & 0xFFFF;
1120 if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
1121 txsch->lvl, reg, regval))
1122 return NIX_AF_INVAL_TXSCHQ_CFG;
1124 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1125 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1126 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1128 regval &= ~(0x7FULL << 24);
1129 regval |= ((u64)nixlf << 24);
1132 rvu_write64(rvu, blkaddr, reg, regval);
1134 /* Check for SMQ flush, if so, poll for its completion */
1135 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1136 (regval & BIT_ULL(49))) {
1137 err = rvu_poll_reg(rvu, blkaddr,
1138 reg, BIT_ULL(49), true);
1140 return NIX_AF_SMQ_FLUSH_FAILED;
1146 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1147 u16 pcifunc, int next, bool eol)
1149 struct nix_aq_enq_req aq_req;
1152 aq_req.hdr.pcifunc = pcifunc;
1153 aq_req.ctype = NIX_AQ_CTYPE_MCE;
1157 /* Forward bcast pkts to RQ0, RSS not needed */
1159 aq_req.mce.index = 0;
1160 aq_req.mce.eol = eol;
1161 aq_req.mce.pf_func = pcifunc;
1162 aq_req.mce.next = next;
1164 /* All fields valid */
1165 *(u64 *)(&aq_req.mce_mask) = ~0ULL;
1167 err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1169 dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1170 rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1176 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1177 u16 pcifunc, int idx, bool add)
1179 struct mce *mce, *tail = NULL;
1180 bool delete = false;
1182 /* Scan through the current list */
1183 hlist_for_each_entry(mce, &mce_list->head, node) {
1184 /* If already exists, then delete */
1185 if (mce->pcifunc == pcifunc && !add) {
1193 hlist_del(&mce->node);
1202 /* Add a new one to the list, at the tail */
1203 mce = kzalloc(sizeof(*mce), GFP_KERNEL);
1207 mce->pcifunc = pcifunc;
1209 hlist_add_head(&mce->node, &mce_list->head);
1211 hlist_add_behind(&mce->node, &tail->node);
1216 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
1218 int err = 0, idx, next_idx, count;
1219 struct nix_mce_list *mce_list;
1220 struct mce *mce, *next_mce;
1221 struct nix_mcast *mcast;
1222 struct nix_hw *nix_hw;
1223 struct rvu_pfvf *pfvf;
1226 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1230 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1234 mcast = &nix_hw->mcast;
1236 /* Get this PF/VF func's MCE index */
1237 pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1238 idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
1240 mce_list = &pfvf->bcast_mce_list;
1241 if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
1243 "%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
1244 __func__, idx, mce_list->max,
1245 pcifunc >> RVU_PFVF_PF_SHIFT);
1249 spin_lock(&mcast->mce_lock);
1251 err = nix_update_mce_list(mce_list, pcifunc, idx, add);
1255 /* Disable MCAM entry in NPC */
1257 if (!mce_list->count)
1259 count = mce_list->count;
1261 /* Dump the updated list to HW */
1262 hlist_for_each_entry(mce, &mce_list->head, node) {
1266 next_mce = hlist_entry(mce->node.next,
1268 next_idx = next_mce->idx;
1270 /* EOL should be set in last MCE */
1271 err = nix_setup_mce(rvu, mce->idx,
1272 NIX_AQ_INSTOP_WRITE, mce->pcifunc,
1273 next_idx, count ? false : true);
1279 spin_unlock(&mcast->mce_lock);
1283 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
1285 struct nix_mcast *mcast = &nix_hw->mcast;
1286 int err, pf, numvfs, idx;
1287 struct rvu_pfvf *pfvf;
1291 /* Skip PF0 (i.e AF) */
1292 for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
1293 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1294 /* If PF is not enabled, nothing to do */
1295 if (!((cfg >> 20) & 0x01))
1297 /* Get numVFs attached to this PF */
1298 numvfs = (cfg >> 12) & 0xFF;
1300 pfvf = &rvu->pf[pf];
1301 /* Save the start MCE */
1302 pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
1304 nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
1306 for (idx = 0; idx < (numvfs + 1); idx++) {
1307 /* idx-0 is for PF, followed by VFs */
1308 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1310 /* Add dummy entries now, so that we don't have to check
1311 * for whether AQ_OP should be INIT/WRITE later on.
1312 * Will be updated when a NIXLF is attached/detached to
1315 err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
1325 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1327 struct nix_mcast *mcast = &nix_hw->mcast;
1328 struct rvu_hwinfo *hw = rvu->hw;
1331 size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
1332 size = (1ULL << size);
1334 /* Alloc memory for multicast/mirror replication entries */
1335 err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
1336 (256UL << MC_TBL_SIZE), size);
1340 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
1341 (u64)mcast->mce_ctx->iova);
1343 /* Set max list length equal to max no of VFs per PF + PF itself */
1344 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
1345 BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
1347 /* Alloc memory for multicast replication buffers */
1348 size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
1349 err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
1350 (8UL << MC_BUF_CNT), size);
1354 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
1355 (u64)mcast->mcast_buf->iova);
1357 /* Alloc pkind for NIX internal RX multicast/mirror replay */
1358 mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
1360 rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
1361 BIT_ULL(63) | (mcast->replay_pkind << 24) |
1362 BIT_ULL(20) | MC_BUF_CNT);
1364 spin_lock_init(&mcast->mce_lock);
1366 return nix_setup_bcast_tables(rvu, nix_hw);
1369 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1371 struct nix_txsch *txsch;
1375 /* Get scheduler queue count of each type and alloc
1376 * bitmap for each for alloc/free/attach operations.
1378 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1379 txsch = &nix_hw->txsch[lvl];
1382 case NIX_TXSCH_LVL_SMQ:
1383 reg = NIX_AF_MDQ_CONST;
1385 case NIX_TXSCH_LVL_TL4:
1386 reg = NIX_AF_TL4_CONST;
1388 case NIX_TXSCH_LVL_TL3:
1389 reg = NIX_AF_TL3_CONST;
1391 case NIX_TXSCH_LVL_TL2:
1392 reg = NIX_AF_TL2_CONST;
1394 case NIX_TXSCH_LVL_TL1:
1395 reg = NIX_AF_TL1_CONST;
1398 cfg = rvu_read64(rvu, blkaddr, reg);
1399 txsch->schq.max = cfg & 0xFFFF;
1400 err = rvu_alloc_bitmap(&txsch->schq);
1404 /* Allocate memory for scheduler queues to
1405 * PF/VF pcifunc mapping info.
1407 txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
1408 sizeof(u16), GFP_KERNEL);
1409 if (!txsch->pfvf_map)
1415 int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
1416 struct msg_rsp *rsp)
1418 struct rvu_hwinfo *hw = rvu->hw;
1419 u16 pcifunc = req->hdr.pcifunc;
1420 int i, nixlf, blkaddr;
1423 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1425 return NIX_AF_ERR_AF_LF_INVALID;
1427 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1429 return NIX_AF_ERR_AF_LF_INVALID;
1431 /* Get stats count supported by HW */
1432 stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1434 /* Reset tx stats */
1435 for (i = 0; i < ((stats >> 24) & 0xFF); i++)
1436 rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
1438 /* Reset rx stats */
1439 for (i = 0; i < ((stats >> 32) & 0xFF); i++)
1440 rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
1445 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
1450 /* Start X2P bus calibration */
1451 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
1452 rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
1453 /* Wait for calibration to complete */
1454 err = rvu_poll_reg(rvu, blkaddr,
1455 NIX_AF_STATUS, BIT_ULL(10), false);
1457 dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
1461 status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
1462 /* Check if CGX devices are ready */
1463 for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
1464 if (status & (BIT_ULL(16 + idx)))
1467 "CGX%d didn't respond to NIX X2P calibration\n", idx);
1471 /* Check if LBK is ready */
1472 if (!(status & BIT_ULL(19))) {
1474 "LBK didn't respond to NIX X2P calibration\n");
1478 /* Clear 'calibrate_x2p' bit */
1479 rvu_write64(rvu, blkaddr, NIX_AF_CFG,
1480 rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
1481 if (err || (status & 0x3FFULL))
1483 "NIX X2P calibration failed, status 0x%llx\n", status);
1489 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
1494 /* Set admin queue endianness */
1495 cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
1498 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
1501 rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
1504 /* Do not bypass NDC cache */
1505 cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
1507 rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
1509 /* Result structure can be followed by RQ/SQ/CQ context at
1510 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
1511 * operation type. Alloc sufficient result memory for all operations.
1513 err = rvu_aq_alloc(rvu, &block->aq,
1514 Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
1515 ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
1519 rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
1520 rvu_write64(rvu, block->addr,
1521 NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
1525 int rvu_nix_init(struct rvu *rvu)
1527 struct rvu_hwinfo *hw = rvu->hw;
1528 struct rvu_block *block;
1532 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
1535 block = &hw->block[blkaddr];
1537 /* Calibrate X2P bus to check if CGX/LBK links are fine */
1538 err = nix_calibrate_x2p(rvu, blkaddr);
1542 /* Set num of links of each type */
1543 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
1544 hw->cgx = (cfg >> 12) & 0xF;
1545 hw->lmac_per_cgx = (cfg >> 8) & 0xF;
1546 hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
1550 /* Initialize admin queue */
1551 err = nix_aq_init(rvu, block);
1555 /* Restore CINT timer delay to HW reset values */
1556 rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
1558 /* Configure segmentation offload formats */
1559 nix_setup_lso(rvu, blkaddr);
1561 if (blkaddr == BLKADDR_NIX0) {
1562 hw->nix0 = devm_kzalloc(rvu->dev,
1563 sizeof(struct nix_hw), GFP_KERNEL);
1567 err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
1571 err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
1578 void rvu_nix_freemem(struct rvu *rvu)
1580 struct rvu_hwinfo *hw = rvu->hw;
1581 struct rvu_block *block;
1582 struct nix_txsch *txsch;
1583 struct nix_mcast *mcast;
1584 struct nix_hw *nix_hw;
1587 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
1591 block = &hw->block[blkaddr];
1592 rvu_aq_free(rvu, block->aq);
1594 if (blkaddr == BLKADDR_NIX0) {
1595 nix_hw = get_nix_hw(rvu->hw, blkaddr);
1599 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1600 txsch = &nix_hw->txsch[lvl];
1601 kfree(txsch->schq.bmap);
1604 mcast = &nix_hw->mcast;
1605 qmem_free(rvu->dev, mcast->mce_ctx);
1606 qmem_free(rvu->dev, mcast->mcast_buf);