1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2018 Marvell.
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/delay.h>
11 #include <linux/irq.h>
12 #include <linux/pci.h>
13 #include <linux/sysfs.h>
20 #include "rvu_trace.h"
22 #define DRV_NAME "rvu_af"
23 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
25 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
27 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
28 struct rvu_block *block, int lf);
29 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
30 struct rvu_block *block, int lf);
31 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
33 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
35 void (mbox_handler)(struct work_struct *),
36 void (mbox_up_handler)(struct work_struct *));
42 /* Supported devices */
43 static const struct pci_device_id rvu_id_table[] = {
44 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
45 { 0, } /* end of table */
48 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
49 MODULE_DESCRIPTION(DRV_STRING);
50 MODULE_LICENSE("GPL v2");
51 MODULE_DEVICE_TABLE(pci, rvu_id_table);
53 static char *mkex_profile; /* MKEX profile name */
54 module_param(mkex_profile, charp, 0000);
55 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
57 static char *kpu_profile; /* KPU profile name */
58 module_param(kpu_profile, charp, 0000);
59 MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
61 static void rvu_setup_hw_capabilities(struct rvu *rvu)
63 struct rvu_hwinfo *hw = rvu->hw;
65 hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
66 hw->cap.nix_fixed_txschq_mapping = false;
67 hw->cap.nix_shaping = true;
68 hw->cap.nix_tx_link_bp = true;
69 hw->cap.nix_rx_multicast = true;
70 hw->cap.nix_shaper_toggle_wait = false;
73 if (is_rvu_pre_96xx_C0(rvu)) {
74 hw->cap.nix_fixed_txschq_mapping = true;
75 hw->cap.nix_txsch_per_cgx_lmac = 4;
76 hw->cap.nix_txsch_per_lbk_lmac = 132;
77 hw->cap.nix_txsch_per_sdp_lmac = 76;
78 hw->cap.nix_shaping = false;
79 hw->cap.nix_tx_link_bp = false;
80 if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
81 hw->cap.nix_rx_multicast = false;
83 if (!is_rvu_pre_96xx_C0(rvu))
84 hw->cap.nix_shaper_toggle_wait = true;
86 if (!is_rvu_otx2(rvu))
87 hw->cap.per_pf_mbox_regs = true;
90 /* Poll a RVU block's register 'offset', for a 'zero'
91 * or 'nonzero' at bits specified by 'mask'
93 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
95 unsigned long timeout = jiffies + usecs_to_jiffies(20000);
100 reg = rvu->afreg_base + ((block << 28) | offset);
102 reg_val = readq(reg);
103 if (zero && !(reg_val & mask))
105 if (!zero && (reg_val & mask))
107 if (time_before(jiffies, timeout)) {
111 /* In scenarios where CPU is scheduled out before checking
112 * 'time_before' (above) and gets scheduled in such that
113 * jiffies are beyond timeout value, then check again if HW is
114 * done with the operation in the meantime.
123 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
130 id = find_first_zero_bit(rsrc->bmap, rsrc->max);
134 __set_bit(id, rsrc->bmap);
139 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
146 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
147 if (start >= rsrc->max)
150 bitmap_set(rsrc->bmap, start, nrsrc);
154 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
158 if (start >= rsrc->max)
161 bitmap_clear(rsrc->bmap, start, nrsrc);
164 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
171 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
172 if (start >= rsrc->max)
178 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
183 __clear_bit(id, rsrc->bmap);
186 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
193 used = bitmap_weight(rsrc->bmap, rsrc->max);
194 return (rsrc->max - used);
197 bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
202 return !test_bit(id, rsrc->bmap);
205 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
207 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
208 sizeof(long), GFP_KERNEL);
214 void rvu_free_bitmap(struct rsrc_bmap *rsrc)
219 /* Get block LF's HW index from a PF_FUNC's block slot number */
220 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
225 mutex_lock(&rvu->rsrc_lock);
226 for (lf = 0; lf < block->lf.max; lf++) {
227 if (block->fn_map[lf] == pcifunc) {
229 mutex_unlock(&rvu->rsrc_lock);
235 mutex_unlock(&rvu->rsrc_lock);
239 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
240 * Some silicon variants of OcteonTX2 supports
241 * multiple blocks of same type.
243 * @pcifunc has to be zero when no LF is yet attached.
245 * For a pcifunc if LFs are attached from multiple blocks of same type, then
246 * return blkaddr of first encountered block.
248 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
250 int devnum, blkaddr = -ENODEV;
256 blkaddr = BLKADDR_NPC;
259 blkaddr = BLKADDR_NPA;
262 /* For now assume NIX0 */
264 blkaddr = BLKADDR_NIX0;
269 blkaddr = BLKADDR_SSO;
272 blkaddr = BLKADDR_SSOW;
275 blkaddr = BLKADDR_TIM;
278 /* For now assume CPT0 */
280 blkaddr = BLKADDR_CPT0;
286 /* Check if this is a RVU PF or VF */
287 if (pcifunc & RVU_PFVF_FUNC_MASK) {
289 devnum = rvu_get_hwvf(rvu, pcifunc);
292 devnum = rvu_get_pf(pcifunc);
295 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
298 if (blktype == BLKTYPE_NIX) {
299 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
300 RVU_PRIV_HWVFX_NIXX_CFG(0);
301 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
303 blkaddr = BLKADDR_NIX0;
307 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
308 RVU_PRIV_HWVFX_NIXX_CFG(1);
309 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
311 blkaddr = BLKADDR_NIX1;
314 if (blktype == BLKTYPE_CPT) {
315 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
316 RVU_PRIV_HWVFX_CPTX_CFG(0);
317 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
319 blkaddr = BLKADDR_CPT0;
323 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
324 RVU_PRIV_HWVFX_CPTX_CFG(1);
325 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
327 blkaddr = BLKADDR_CPT1;
331 if (is_block_implemented(rvu->hw, blkaddr))
336 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
337 struct rvu_block *block, u16 pcifunc,
340 int devnum, num_lfs = 0;
344 if (lf >= block->lf.max) {
345 dev_err(&rvu->pdev->dev,
346 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
347 __func__, lf, block->name, block->lf.max);
351 /* Check if this is for a RVU PF or VF */
352 if (pcifunc & RVU_PFVF_FUNC_MASK) {
354 devnum = rvu_get_hwvf(rvu, pcifunc);
357 devnum = rvu_get_pf(pcifunc);
360 block->fn_map[lf] = attach ? pcifunc : 0;
362 switch (block->addr) {
364 pfvf->npalf = attach ? true : false;
365 num_lfs = pfvf->npalf;
369 pfvf->nixlf = attach ? true : false;
370 num_lfs = pfvf->nixlf;
373 attach ? pfvf->sso++ : pfvf->sso--;
377 attach ? pfvf->ssow++ : pfvf->ssow--;
378 num_lfs = pfvf->ssow;
381 attach ? pfvf->timlfs++ : pfvf->timlfs--;
382 num_lfs = pfvf->timlfs;
385 attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
386 num_lfs = pfvf->cptlfs;
389 attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
390 num_lfs = pfvf->cpt1_lfs;
394 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
395 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
398 inline int rvu_get_pf(u16 pcifunc)
400 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
403 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
407 /* Get numVFs attached to this PF and first HWVF */
408 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
410 *numvfs = (cfg >> 12) & 0xFF;
415 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
420 pf = rvu_get_pf(pcifunc);
421 func = pcifunc & RVU_PFVF_FUNC_MASK;
423 /* Get first HWVF attached to this PF */
424 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
426 return ((cfg & 0xFFF) + func - 1);
429 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
431 /* Check if it is a PF or VF */
432 if (pcifunc & RVU_PFVF_FUNC_MASK)
433 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
435 return &rvu->pf[rvu_get_pf(pcifunc)];
438 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
443 pf = rvu_get_pf(pcifunc);
444 if (pf >= rvu->hw->total_pfs)
447 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
450 /* Check if VF is within number of VFs attached to this PF */
451 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
452 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
453 nvfs = (cfg >> 12) & 0xFF;
460 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
462 struct rvu_block *block;
464 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
467 block = &hw->block[blkaddr];
468 return block->implemented;
471 static void rvu_check_block_implemented(struct rvu *rvu)
473 struct rvu_hwinfo *hw = rvu->hw;
474 struct rvu_block *block;
478 /* For each block check if 'implemented' bit is set */
479 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
480 block = &hw->block[blkid];
481 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
482 if (cfg & BIT_ULL(11))
483 block->implemented = true;
487 static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
489 rvu_write64(rvu, BLKADDR_RVUM,
490 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
494 static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
496 rvu_write64(rvu, BLKADDR_RVUM,
497 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
500 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
504 if (!block->implemented)
507 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
508 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
513 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
515 struct rvu_block *block = &rvu->hw->block[blkaddr];
518 if (!block->implemented)
521 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
522 err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
524 dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
525 while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
530 static void rvu_reset_all_blocks(struct rvu *rvu)
532 /* Do a HW reset of all RVU blocks */
533 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
534 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
535 rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
536 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
537 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
538 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
539 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
540 rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
541 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
542 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
543 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
544 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
545 rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
548 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
550 struct rvu_pfvf *pfvf;
554 for (lf = 0; lf < block->lf.max; lf++) {
555 cfg = rvu_read64(rvu, block->addr,
556 block->lfcfg_reg | (lf << block->lfshift));
557 if (!(cfg & BIT_ULL(63)))
560 /* Set this resource as being used */
561 __set_bit(lf, block->lf.bmap);
563 /* Get, to whom this LF is attached */
564 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
565 rvu_update_rsrc_map(rvu, pfvf, block,
566 (cfg >> 8) & 0xFFFF, lf, true);
568 /* Set start MSIX vector for this LF within this PF/VF */
569 rvu_set_msix_offset(rvu, pfvf, block, lf);
573 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
582 "PF%d:VF%d is configured with zero msix vectors, %d\n",
589 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
591 min_vecs = RVU_PF_INT_VEC_CNT;
593 if (!(nvecs < min_vecs))
596 "PF%d is configured with too few vectors, %d, min is %d\n",
597 pf, nvecs, min_vecs);
600 static int rvu_setup_msix_resources(struct rvu *rvu)
602 struct rvu_hwinfo *hw = rvu->hw;
603 int pf, vf, numvfs, hwvf, err;
604 int nvecs, offset, max_msix;
605 struct rvu_pfvf *pfvf;
609 for (pf = 0; pf < hw->total_pfs; pf++) {
610 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
611 /* If PF is not enabled, nothing to do */
612 if (!((cfg >> 20) & 0x01))
615 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
618 /* Get num of MSIX vectors attached to this PF */
619 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
620 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
621 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
623 /* Alloc msix bitmap for this PF */
624 err = rvu_alloc_bitmap(&pfvf->msix);
628 /* Allocate memory for MSIX vector to RVU block LF mapping */
629 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
630 sizeof(u16), GFP_KERNEL);
631 if (!pfvf->msix_lfmap)
634 /* For PF0 (AF) firmware will set msix vector offsets for
635 * AF, block AF and PF0_INT vectors, so jump to VFs.
640 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
641 * These are allocated on driver init and never freed,
642 * so no need to set 'msix_lfmap' for these.
644 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
645 nvecs = (cfg >> 12) & 0xFF;
647 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
648 rvu_write64(rvu, BLKADDR_RVUM,
649 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
651 /* Alloc msix bitmap for VFs */
652 for (vf = 0; vf < numvfs; vf++) {
653 pfvf = &rvu->hwvf[hwvf + vf];
654 /* Get num of MSIX vectors attached to this VF */
655 cfg = rvu_read64(rvu, BLKADDR_RVUM,
656 RVU_PRIV_PFX_MSIX_CFG(pf));
657 pfvf->msix.max = (cfg & 0xFFF) + 1;
658 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
660 /* Alloc msix bitmap for this VF */
661 err = rvu_alloc_bitmap(&pfvf->msix);
666 devm_kcalloc(rvu->dev, pfvf->msix.max,
667 sizeof(u16), GFP_KERNEL);
668 if (!pfvf->msix_lfmap)
671 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
672 * These are allocated on driver init and never freed,
673 * so no need to set 'msix_lfmap' for these.
675 cfg = rvu_read64(rvu, BLKADDR_RVUM,
676 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
677 nvecs = (cfg >> 12) & 0xFF;
679 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
680 rvu_write64(rvu, BLKADDR_RVUM,
681 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
686 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
687 * create an IOMMU mapping for the physical address configured by
688 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
690 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
691 max_msix = cfg & 0xFFFFF;
692 if (rvu->fwdata && rvu->fwdata->msixtr_base)
693 phy_addr = rvu->fwdata->msixtr_base;
695 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
697 iova = dma_map_resource(rvu->dev, phy_addr,
698 max_msix * PCI_MSIX_ENTRY_SIZE,
699 DMA_BIDIRECTIONAL, 0);
701 if (dma_mapping_error(rvu->dev, iova))
704 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
705 rvu->msix_base_iova = iova;
706 rvu->msixtr_base_phy = phy_addr;
711 static void rvu_reset_msix(struct rvu *rvu)
713 /* Restore msixtr base register */
714 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
715 rvu->msixtr_base_phy);
718 static void rvu_free_hw_resources(struct rvu *rvu)
720 struct rvu_hwinfo *hw = rvu->hw;
721 struct rvu_block *block;
722 struct rvu_pfvf *pfvf;
726 rvu_npa_freemem(rvu);
727 rvu_npc_freemem(rvu);
728 rvu_nix_freemem(rvu);
730 /* Free block LF bitmaps */
731 for (id = 0; id < BLK_COUNT; id++) {
732 block = &hw->block[id];
733 kfree(block->lf.bmap);
736 /* Free MSIX bitmaps */
737 for (id = 0; id < hw->total_pfs; id++) {
739 kfree(pfvf->msix.bmap);
742 for (id = 0; id < hw->total_vfs; id++) {
743 pfvf = &rvu->hwvf[id];
744 kfree(pfvf->msix.bmap);
747 /* Unmap MSIX vector base IOVA mapping */
748 if (!rvu->msix_base_iova)
750 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
751 max_msix = cfg & 0xFFFFF;
752 dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
753 max_msix * PCI_MSIX_ENTRY_SIZE,
754 DMA_BIDIRECTIONAL, 0);
757 mutex_destroy(&rvu->rsrc_lock);
760 static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
762 struct rvu_hwinfo *hw = rvu->hw;
763 int pf, vf, numvfs, hwvf;
764 struct rvu_pfvf *pfvf;
767 for (pf = 0; pf < hw->total_pfs; pf++) {
768 /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
772 if (!is_pf_cgxmapped(rvu, pf))
774 /* Assign MAC address to PF */
776 if (rvu->fwdata && pf < PF_MACNUM_MAX) {
777 mac = &rvu->fwdata->pf_macs[pf];
779 u64_to_ether_addr(*mac, pfvf->mac_addr);
781 eth_random_addr(pfvf->mac_addr);
783 eth_random_addr(pfvf->mac_addr);
785 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
788 /* Assign MAC address to VFs*/
789 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
790 for (vf = 0; vf < numvfs; vf++, hwvf++) {
791 pfvf = &rvu->hwvf[hwvf];
792 if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
793 mac = &rvu->fwdata->vf_macs[hwvf];
795 u64_to_ether_addr(*mac, pfvf->mac_addr);
797 eth_random_addr(pfvf->mac_addr);
799 eth_random_addr(pfvf->mac_addr);
801 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
806 static int rvu_fwdata_init(struct rvu *rvu)
811 /* Get firmware data base address */
812 err = cgx_get_fwdata_base(&fwdbase);
815 rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
818 if (!is_rvu_fwdata_valid(rvu)) {
820 "Mismatch in 'fwdata' struct btw kernel and firmware\n");
821 iounmap(rvu->fwdata);
827 dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
831 static void rvu_fwdata_exit(struct rvu *rvu)
834 iounmap(rvu->fwdata);
837 static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
839 struct rvu_hwinfo *hw = rvu->hw;
840 struct rvu_block *block;
844 /* Init NIX LF's bitmap */
845 block = &hw->block[blkaddr];
846 if (!block->implemented)
848 blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
849 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
850 block->lf.max = cfg & 0xFFF;
851 block->addr = blkaddr;
852 block->type = BLKTYPE_NIX;
854 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
855 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
856 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
857 block->lfcfg_reg = NIX_PRIV_LFX_CFG;
858 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
859 block->lfreset_reg = NIX_AF_LF_RST;
861 sprintf(block->name, "NIX%d", blkid);
862 rvu->nix_blkaddr[blkid] = blkaddr;
863 return rvu_alloc_bitmap(&block->lf);
866 static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
868 struct rvu_hwinfo *hw = rvu->hw;
869 struct rvu_block *block;
873 /* Init CPT LF's bitmap */
874 block = &hw->block[blkaddr];
875 if (!block->implemented)
877 blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
878 cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
879 block->lf.max = cfg & 0xFF;
880 block->addr = blkaddr;
881 block->type = BLKTYPE_CPT;
882 block->multislot = true;
884 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
885 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
886 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
887 block->lfcfg_reg = CPT_PRIV_LFX_CFG;
888 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
889 block->lfreset_reg = CPT_AF_LF_RST;
891 sprintf(block->name, "CPT%d", blkid);
892 return rvu_alloc_bitmap(&block->lf);
895 static void rvu_get_lbk_bufsize(struct rvu *rvu)
897 struct pci_dev *pdev = NULL;
901 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
902 PCI_DEVID_OCTEONTX2_LBK, pdev);
906 base = pci_ioremap_bar(pdev, 0);
910 lbk_const = readq(base + LBK_CONST);
912 /* cache fifo size */
913 rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
920 static int rvu_setup_hw_resources(struct rvu *rvu)
922 struct rvu_hwinfo *hw = rvu->hw;
923 struct rvu_block *block;
927 /* Get HW supported max RVU PF & VF count */
928 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
929 hw->total_pfs = (cfg >> 32) & 0xFF;
930 hw->total_vfs = (cfg >> 20) & 0xFFF;
931 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
933 /* Init NPA LF's bitmap */
934 block = &hw->block[BLKADDR_NPA];
935 if (!block->implemented)
937 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
938 block->lf.max = (cfg >> 16) & 0xFFF;
939 block->addr = BLKADDR_NPA;
940 block->type = BLKTYPE_NPA;
942 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
943 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
944 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
945 block->lfcfg_reg = NPA_PRIV_LFX_CFG;
946 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
947 block->lfreset_reg = NPA_AF_LF_RST;
949 sprintf(block->name, "NPA");
950 err = rvu_alloc_bitmap(&block->lf);
953 "%s: Failed to allocate NPA LF bitmap\n", __func__);
958 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
961 "%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
965 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
968 "%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
972 /* Init SSO group's bitmap */
973 block = &hw->block[BLKADDR_SSO];
974 if (!block->implemented)
976 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
977 block->lf.max = cfg & 0xFFFF;
978 block->addr = BLKADDR_SSO;
979 block->type = BLKTYPE_SSO;
980 block->multislot = true;
982 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
983 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
984 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
985 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
986 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
987 block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
989 sprintf(block->name, "SSO GROUP");
990 err = rvu_alloc_bitmap(&block->lf);
993 "%s: Failed to allocate SSO LF bitmap\n", __func__);
998 /* Init SSO workslot's bitmap */
999 block = &hw->block[BLKADDR_SSOW];
1000 if (!block->implemented)
1002 block->lf.max = (cfg >> 56) & 0xFF;
1003 block->addr = BLKADDR_SSOW;
1004 block->type = BLKTYPE_SSOW;
1005 block->multislot = true;
1007 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
1008 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
1009 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
1010 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
1011 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
1012 block->lfreset_reg = SSOW_AF_LF_HWS_RST;
1014 sprintf(block->name, "SSOWS");
1015 err = rvu_alloc_bitmap(&block->lf);
1018 "%s: Failed to allocate SSOW LF bitmap\n", __func__);
1023 /* Init TIM LF's bitmap */
1024 block = &hw->block[BLKADDR_TIM];
1025 if (!block->implemented)
1027 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
1028 block->lf.max = cfg & 0xFFFF;
1029 block->addr = BLKADDR_TIM;
1030 block->type = BLKTYPE_TIM;
1031 block->multislot = true;
1033 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
1034 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
1035 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
1036 block->lfcfg_reg = TIM_PRIV_LFX_CFG;
1037 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
1038 block->lfreset_reg = TIM_AF_LF_RST;
1040 sprintf(block->name, "TIM");
1041 err = rvu_alloc_bitmap(&block->lf);
1044 "%s: Failed to allocate TIM LF bitmap\n", __func__);
1049 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
1052 "%s: Failed to allocate CPT0 LF bitmap\n", __func__);
1055 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
1058 "%s: Failed to allocate CPT1 LF bitmap\n", __func__);
1062 /* Allocate memory for PFVF data */
1063 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1064 sizeof(struct rvu_pfvf), GFP_KERNEL);
1067 "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
1071 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1072 sizeof(struct rvu_pfvf), GFP_KERNEL);
1075 "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
1079 mutex_init(&rvu->rsrc_lock);
1081 rvu_fwdata_init(rvu);
1083 err = rvu_setup_msix_resources(rvu);
1086 "%s: Failed to setup MSIX resources\n", __func__);
1090 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1091 block = &hw->block[blkid];
1092 if (!block->lf.bmap)
1095 /* Allocate memory for block LF/slot to pcifunc mapping info */
1096 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1097 sizeof(u16), GFP_KERNEL);
1098 if (!block->fn_map) {
1103 /* Scan all blocks to check if low level firmware has
1104 * already provisioned any of the resources to a PF/VF.
1106 rvu_scan_block(rvu, block);
1109 err = rvu_set_channels_base(rvu);
1113 err = rvu_npc_init(rvu);
1115 dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
1119 err = rvu_cgx_init(rvu);
1121 dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
1125 /* Assign MACs for CGX mapped functions */
1126 rvu_setup_pfvf_macaddress(rvu);
1128 err = rvu_npa_init(rvu);
1130 dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
1134 rvu_get_lbk_bufsize(rvu);
1136 err = rvu_nix_init(rvu);
1138 dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
1142 err = rvu_sdp_init(rvu);
1144 dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
1148 rvu_program_channels(rvu);
1153 rvu_nix_freemem(rvu);
1155 rvu_npa_freemem(rvu);
1159 rvu_npc_freemem(rvu);
1160 rvu_fwdata_exit(rvu);
1162 rvu_reset_msix(rvu);
1166 /* NPA and NIX admin queue APIs */
1167 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1172 qmem_free(rvu->dev, aq->inst);
1173 qmem_free(rvu->dev, aq->res);
1174 devm_kfree(rvu->dev, aq);
1177 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1178 int qsize, int inst_size, int res_size)
1180 struct admin_queue *aq;
1183 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1188 /* Alloc memory for instructions i.e AQ */
1189 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1191 devm_kfree(rvu->dev, aq);
1195 /* Alloc memory for results */
1196 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1198 rvu_aq_free(rvu, aq);
1202 spin_lock_init(&aq->lock);
1206 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1207 struct ready_msg_rsp *rsp)
1210 rsp->rclk_freq = rvu->fwdata->rclk;
1211 rsp->sclk_freq = rvu->fwdata->sclk;
1216 /* Get current count of a RVU block's LF/slots
1217 * provisioned to a given RVU func.
1219 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1223 return pfvf->npalf ? 1 : 0;
1226 return pfvf->nixlf ? 1 : 0;
1232 return pfvf->timlfs;
1234 return pfvf->cptlfs;
1236 return pfvf->cpt1_lfs;
1241 /* Return true if LFs of block type are attached to pcifunc */
1242 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1246 return pfvf->npalf ? 1 : 0;
1248 return pfvf->nixlf ? 1 : 0;
1252 return !!pfvf->ssow;
1254 return !!pfvf->timlfs;
1256 return pfvf->cptlfs || pfvf->cpt1_lfs;
1262 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1264 struct rvu_pfvf *pfvf;
1266 if (!is_pf_func_valid(rvu, pcifunc))
1269 pfvf = rvu_get_pfvf(rvu, pcifunc);
1271 /* Check if this PFFUNC has a LF of type blktype attached */
1272 if (!is_blktype_attached(pfvf, blktype))
1278 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1279 int pcifunc, int slot)
1283 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1284 rvu_write64(rvu, block->addr, block->lookup_reg, val);
1285 /* Wait for the lookup to finish */
1286 /* TODO: put some timeout here */
1287 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1290 val = rvu_read64(rvu, block->addr, block->lookup_reg);
1292 /* Check LF valid bit */
1293 if (!(val & (1ULL << 12)))
1296 return (val & 0xFFF);
1299 int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
1300 u16 global_slot, u16 *slot_in_block)
1302 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1303 int numlfs, total_lfs = 0, nr_blocks = 0;
1304 int i, num_blkaddr[BLK_COUNT] = { 0 };
1305 struct rvu_block *block;
1309 if (!is_blktype_attached(pfvf, blktype))
1312 /* Get all the block addresses from which LFs are attached to
1313 * the given pcifunc in num_blkaddr[].
1315 for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
1316 block = &rvu->hw->block[blkaddr];
1317 if (block->type != blktype)
1319 if (!is_block_implemented(rvu->hw, blkaddr))
1322 numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
1324 total_lfs += numlfs;
1325 num_blkaddr[nr_blocks] = blkaddr;
1330 if (global_slot >= total_lfs)
1333 /* Based on the given global slot number retrieve the
1334 * correct block address out of all attached block
1335 * addresses and slot number in that block.
1339 for (i = 0; i < nr_blocks; i++) {
1340 numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
1341 total_lfs += numlfs;
1342 if (global_slot < total_lfs) {
1343 blkaddr = num_blkaddr[i];
1344 start_slot = total_lfs - numlfs;
1345 *slot_in_block = global_slot - start_slot;
1353 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1355 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1356 struct rvu_hwinfo *hw = rvu->hw;
1357 struct rvu_block *block;
1358 int slot, lf, num_lfs;
1361 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1365 if (blktype == BLKTYPE_NIX)
1366 rvu_nix_reset_mac(pfvf, pcifunc);
1368 block = &hw->block[blkaddr];
1370 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1374 for (slot = 0; slot < num_lfs; slot++) {
1375 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1376 if (lf < 0) /* This should never happen */
1379 /* Disable the LF */
1380 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1381 (lf << block->lfshift), 0x00ULL);
1383 /* Update SW maintained mapping info as well */
1384 rvu_update_rsrc_map(rvu, pfvf, block,
1385 pcifunc, lf, false);
1387 /* Free the resource */
1388 rvu_free_rsrc(&block->lf, lf);
1390 /* Clear MSIX vector offset for this LF */
1391 rvu_clear_msix_offset(rvu, pfvf, block, lf);
1395 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1398 struct rvu_hwinfo *hw = rvu->hw;
1399 bool detach_all = true;
1400 struct rvu_block *block;
1403 mutex_lock(&rvu->rsrc_lock);
1405 /* Check for partial resource detach */
1406 if (detach && detach->partial)
1409 /* Check for RVU block's LFs attached to this func,
1410 * if so, detach them.
1412 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1413 block = &hw->block[blkid];
1414 if (!block->lf.bmap)
1416 if (!detach_all && detach) {
1417 if (blkid == BLKADDR_NPA && !detach->npalf)
1419 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1421 else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1423 else if ((blkid == BLKADDR_SSO) && !detach->sso)
1425 else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1427 else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1429 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1431 else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1434 rvu_detach_block(rvu, pcifunc, block->type);
1437 mutex_unlock(&rvu->rsrc_lock);
1441 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1442 struct rsrc_detach *detach,
1443 struct msg_rsp *rsp)
1445 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1448 int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1450 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1451 int blkaddr = BLKADDR_NIX0, vf;
1452 struct rvu_pfvf *pf;
1454 pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1456 /* All CGX mapped PFs are set with assigned NIX block during init */
1457 if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1458 blkaddr = pf->nix_blkaddr;
1459 } else if (is_afvf(pcifunc)) {
1461 /* Assign NIX based on VF number. All even numbered VFs get
1462 * NIX0 and odd numbered gets NIX1
1464 blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1465 /* NIX1 is not present on all silicons */
1466 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1467 blkaddr = BLKADDR_NIX0;
1470 /* if SDP1 then the blkaddr is NIX1 */
1471 if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
1472 blkaddr = BLKADDR_NIX1;
1476 pfvf->nix_blkaddr = BLKADDR_NIX1;
1477 pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1478 pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1482 pfvf->nix_blkaddr = BLKADDR_NIX0;
1483 pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1484 pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1488 return pfvf->nix_blkaddr;
1491 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1492 u16 pcifunc, struct rsrc_attach *attach)
1498 blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1501 if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1502 return rvu_get_blkaddr(rvu, blktype, 0);
1503 blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1505 if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1509 return rvu_get_blkaddr(rvu, blktype, 0);
1512 if (is_block_implemented(rvu->hw, blkaddr))
1518 static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1519 int num_lfs, struct rsrc_attach *attach)
1521 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1522 struct rvu_hwinfo *hw = rvu->hw;
1523 struct rvu_block *block;
1531 blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1535 block = &hw->block[blkaddr];
1536 if (!block->lf.bmap)
1539 for (slot = 0; slot < num_lfs; slot++) {
1540 /* Allocate the resource */
1541 lf = rvu_alloc_rsrc(&block->lf);
1545 cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1546 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1547 (lf << block->lfshift), cfg);
1548 rvu_update_rsrc_map(rvu, pfvf, block,
1551 /* Set start MSIX vector for this LF within this PF/VF */
1552 rvu_set_msix_offset(rvu, pfvf, block, lf);
1556 static int rvu_check_rsrc_availability(struct rvu *rvu,
1557 struct rsrc_attach *req, u16 pcifunc)
1559 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1560 int free_lfs, mappedlfs, blkaddr;
1561 struct rvu_hwinfo *hw = rvu->hw;
1562 struct rvu_block *block;
1564 /* Only one NPA LF can be attached */
1565 if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1566 block = &hw->block[BLKADDR_NPA];
1567 free_lfs = rvu_rsrc_free_count(&block->lf);
1570 } else if (req->npalf) {
1571 dev_err(&rvu->pdev->dev,
1572 "Func 0x%x: Invalid req, already has NPA\n",
1577 /* Only one NIX LF can be attached */
1578 if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1579 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1583 block = &hw->block[blkaddr];
1584 free_lfs = rvu_rsrc_free_count(&block->lf);
1587 } else if (req->nixlf) {
1588 dev_err(&rvu->pdev->dev,
1589 "Func 0x%x: Invalid req, already has NIX\n",
1595 block = &hw->block[BLKADDR_SSO];
1596 /* Is request within limits ? */
1597 if (req->sso > block->lf.max) {
1598 dev_err(&rvu->pdev->dev,
1599 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1600 pcifunc, req->sso, block->lf.max);
1603 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1604 free_lfs = rvu_rsrc_free_count(&block->lf);
1605 /* Check if additional resources are available */
1606 if (req->sso > mappedlfs &&
1607 ((req->sso - mappedlfs) > free_lfs))
1612 block = &hw->block[BLKADDR_SSOW];
1613 if (req->ssow > block->lf.max) {
1614 dev_err(&rvu->pdev->dev,
1615 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1616 pcifunc, req->sso, block->lf.max);
1619 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1620 free_lfs = rvu_rsrc_free_count(&block->lf);
1621 if (req->ssow > mappedlfs &&
1622 ((req->ssow - mappedlfs) > free_lfs))
1627 block = &hw->block[BLKADDR_TIM];
1628 if (req->timlfs > block->lf.max) {
1629 dev_err(&rvu->pdev->dev,
1630 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1631 pcifunc, req->timlfs, block->lf.max);
1634 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1635 free_lfs = rvu_rsrc_free_count(&block->lf);
1636 if (req->timlfs > mappedlfs &&
1637 ((req->timlfs - mappedlfs) > free_lfs))
1642 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1646 block = &hw->block[blkaddr];
1647 if (req->cptlfs > block->lf.max) {
1648 dev_err(&rvu->pdev->dev,
1649 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1650 pcifunc, req->cptlfs, block->lf.max);
1653 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1654 free_lfs = rvu_rsrc_free_count(&block->lf);
1655 if (req->cptlfs > mappedlfs &&
1656 ((req->cptlfs - mappedlfs) > free_lfs))
1663 dev_info(rvu->dev, "Request for %s failed\n", block->name);
1667 static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1668 struct rsrc_attach *attach)
1670 int blkaddr, num_lfs;
1672 blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1673 attach->hdr.pcifunc, attach);
1677 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1679 /* Requester already has LFs from given block ? */
1683 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1684 struct rsrc_attach *attach,
1685 struct msg_rsp *rsp)
1687 u16 pcifunc = attach->hdr.pcifunc;
1690 /* If first request, detach all existing attached resources */
1691 if (!attach->modify)
1692 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1694 mutex_lock(&rvu->rsrc_lock);
1696 /* Check if the request can be accommodated */
1697 err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1701 /* Now attach the requested resources */
1703 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1706 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1709 /* RVU func doesn't know which exact LF or slot is attached
1710 * to it, it always sees as slot 0,1,2. So for a 'modify'
1711 * request, simply detach all existing attached LFs/slots
1712 * and attach a fresh.
1715 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1716 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1717 attach->sso, attach);
1722 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1723 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1724 attach->ssow, attach);
1727 if (attach->timlfs) {
1729 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1730 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1731 attach->timlfs, attach);
1734 if (attach->cptlfs) {
1735 if (attach->modify &&
1736 rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1737 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1738 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1739 attach->cptlfs, attach);
1743 mutex_unlock(&rvu->rsrc_lock);
1747 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1748 int blkaddr, int lf)
1753 return MSIX_VECTOR_INVALID;
1755 for (vec = 0; vec < pfvf->msix.max; vec++) {
1756 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1759 return MSIX_VECTOR_INVALID;
1762 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1763 struct rvu_block *block, int lf)
1765 u16 nvecs, vec, offset;
1768 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1769 (lf << block->lfshift));
1770 nvecs = (cfg >> 12) & 0xFF;
1772 /* Check and alloc MSIX vectors, must be contiguous */
1773 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1776 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1778 /* Config MSIX offset in LF */
1779 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1780 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1782 /* Update the bitmap as well */
1783 for (vec = 0; vec < nvecs; vec++)
1784 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1787 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1788 struct rvu_block *block, int lf)
1790 u16 nvecs, vec, offset;
1793 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1794 (lf << block->lfshift));
1795 nvecs = (cfg >> 12) & 0xFF;
1797 /* Clear MSIX offset in LF */
1798 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1799 (lf << block->lfshift), cfg & ~0x7FFULL);
1801 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1803 /* Update the mapping */
1804 for (vec = 0; vec < nvecs; vec++)
1805 pfvf->msix_lfmap[offset + vec] = 0;
1807 /* Free the same in MSIX bitmap */
1808 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1811 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1812 struct msix_offset_rsp *rsp)
1814 struct rvu_hwinfo *hw = rvu->hw;
1815 u16 pcifunc = req->hdr.pcifunc;
1816 struct rvu_pfvf *pfvf;
1817 int lf, slot, blkaddr;
1819 pfvf = rvu_get_pfvf(rvu, pcifunc);
1820 if (!pfvf->msix.bmap)
1823 /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1824 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1825 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1827 /* Get BLKADDR from which LFs are attached to pcifunc */
1828 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1830 rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1832 lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1833 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1836 rsp->sso = pfvf->sso;
1837 for (slot = 0; slot < rsp->sso; slot++) {
1838 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1839 rsp->sso_msixoff[slot] =
1840 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1843 rsp->ssow = pfvf->ssow;
1844 for (slot = 0; slot < rsp->ssow; slot++) {
1845 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1846 rsp->ssow_msixoff[slot] =
1847 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1850 rsp->timlfs = pfvf->timlfs;
1851 for (slot = 0; slot < rsp->timlfs; slot++) {
1852 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1853 rsp->timlf_msixoff[slot] =
1854 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1857 rsp->cptlfs = pfvf->cptlfs;
1858 for (slot = 0; slot < rsp->cptlfs; slot++) {
1859 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1860 rsp->cptlf_msixoff[slot] =
1861 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1864 rsp->cpt1_lfs = pfvf->cpt1_lfs;
1865 for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1866 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1867 rsp->cpt1_lf_msixoff[slot] =
1868 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1874 int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
1875 struct free_rsrcs_rsp *rsp)
1877 struct rvu_hwinfo *hw = rvu->hw;
1878 struct rvu_block *block;
1879 struct nix_txsch *txsch;
1880 struct nix_hw *nix_hw;
1882 mutex_lock(&rvu->rsrc_lock);
1884 block = &hw->block[BLKADDR_NPA];
1885 rsp->npa = rvu_rsrc_free_count(&block->lf);
1887 block = &hw->block[BLKADDR_NIX0];
1888 rsp->nix = rvu_rsrc_free_count(&block->lf);
1890 block = &hw->block[BLKADDR_NIX1];
1891 rsp->nix1 = rvu_rsrc_free_count(&block->lf);
1893 block = &hw->block[BLKADDR_SSO];
1894 rsp->sso = rvu_rsrc_free_count(&block->lf);
1896 block = &hw->block[BLKADDR_SSOW];
1897 rsp->ssow = rvu_rsrc_free_count(&block->lf);
1899 block = &hw->block[BLKADDR_TIM];
1900 rsp->tim = rvu_rsrc_free_count(&block->lf);
1902 block = &hw->block[BLKADDR_CPT0];
1903 rsp->cpt = rvu_rsrc_free_count(&block->lf);
1905 block = &hw->block[BLKADDR_CPT1];
1906 rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
1908 if (rvu->hw->cap.nix_fixed_txschq_mapping) {
1909 rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
1910 rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
1911 rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
1912 rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
1914 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1916 rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
1917 rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
1918 rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
1919 rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
1921 nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
1922 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1923 rsp->schq[NIX_TXSCH_LVL_SMQ] =
1924 rvu_rsrc_free_count(&txsch->schq);
1926 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1927 rsp->schq[NIX_TXSCH_LVL_TL4] =
1928 rvu_rsrc_free_count(&txsch->schq);
1930 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1931 rsp->schq[NIX_TXSCH_LVL_TL3] =
1932 rvu_rsrc_free_count(&txsch->schq);
1934 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1935 rsp->schq[NIX_TXSCH_LVL_TL2] =
1936 rvu_rsrc_free_count(&txsch->schq);
1938 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1941 nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
1942 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1943 rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
1944 rvu_rsrc_free_count(&txsch->schq);
1946 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1947 rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
1948 rvu_rsrc_free_count(&txsch->schq);
1950 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1951 rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
1952 rvu_rsrc_free_count(&txsch->schq);
1954 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1955 rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
1956 rvu_rsrc_free_count(&txsch->schq);
1959 rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
1961 rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
1962 mutex_unlock(&rvu->rsrc_lock);
1967 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1968 struct msg_rsp *rsp)
1970 u16 pcifunc = req->hdr.pcifunc;
1974 vf = pcifunc & RVU_PFVF_FUNC_MASK;
1975 cfg = rvu_read64(rvu, BLKADDR_RVUM,
1976 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1977 numvfs = (cfg >> 12) & 0xFF;
1979 if (vf && vf <= numvfs)
1980 __rvu_flr_handler(rvu, pcifunc);
1982 return RVU_INVALID_VF_ID;
1987 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1988 struct get_hw_cap_rsp *rsp)
1990 struct rvu_hwinfo *hw = rvu->hw;
1992 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1993 rsp->nix_shaping = hw->cap.nix_shaping;
1998 int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
1999 struct msg_rsp *rsp)
2001 struct rvu_hwinfo *hw = rvu->hw;
2002 u16 pcifunc = req->hdr.pcifunc;
2003 struct rvu_pfvf *pfvf;
2007 /* Only PF can add VF permissions */
2008 if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
2011 target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
2012 pfvf = rvu_get_pfvf(rvu, target);
2014 if (req->flags & RESET_VF_PERM) {
2015 pfvf->flags &= RVU_CLEAR_VF_PERM;
2016 } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
2017 (req->flags & VF_TRUSTED)) {
2018 change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
2019 /* disable multicast and promisc entries */
2020 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
2021 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
2024 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2028 npc_enadis_default_mce_entry(rvu, target, nixlf,
2029 NIXLF_ALLMULTI_ENTRY,
2031 npc_enadis_default_mce_entry(rvu, target, nixlf,
2032 NIXLF_PROMISC_ENTRY,
2040 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
2041 struct mbox_msghdr *req)
2043 struct rvu *rvu = pci_get_drvdata(mbox->pdev);
2045 /* Check if valid, if not reply with a invalid msg */
2046 if (req->sig != OTX2_MBOX_REQ_SIG)
2050 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
2052 struct _rsp_type *rsp; \
2055 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
2057 sizeof(struct _rsp_type)); \
2058 /* some handlers should complete even if reply */ \
2059 /* could not be allocated */ \
2061 _id != MBOX_MSG_DETACH_RESOURCES && \
2062 _id != MBOX_MSG_NIX_TXSCH_FREE && \
2063 _id != MBOX_MSG_VF_FLR) \
2066 rsp->hdr.id = _id; \
2067 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
2068 rsp->hdr.pcifunc = req->pcifunc; \
2072 err = rvu_mbox_handler_ ## _fn_name(rvu, \
2073 (struct _req_type *)req, \
2076 rsp->hdr.rc = err; \
2078 trace_otx2_msg_process(mbox->pdev, _id, err); \
2079 return rsp ? err : -ENOMEM; \
2086 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
2091 static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
2093 struct rvu *rvu = mwork->rvu;
2094 int offset, err, id, devid;
2095 struct otx2_mbox_dev *mdev;
2096 struct mbox_hdr *req_hdr;
2097 struct mbox_msghdr *msg;
2098 struct mbox_wq_info *mw;
2099 struct otx2_mbox *mbox;
2103 mw = &rvu->afpf_wq_info;
2106 mw = &rvu->afvf_wq_info;
2112 devid = mwork - mw->mbox_wrk;
2114 mdev = &mbox->dev[devid];
2116 /* Process received mbox messages */
2117 req_hdr = mdev->mbase + mbox->rx_start;
2118 if (mw->mbox_wrk[devid].num_msgs == 0)
2121 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
2123 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
2124 msg = mdev->mbase + offset;
2126 /* Set which PF/VF sent this message based on mbox IRQ */
2130 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
2131 msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
2135 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
2136 msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
2140 err = rvu_process_mbox_msg(mbox, devid, msg);
2142 offset = mbox->rx_start + msg->next_msgoff;
2146 if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
2147 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
2148 err, otx2_mbox_id2name(msg->id),
2149 msg->id, rvu_get_pf(msg->pcifunc),
2150 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2152 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
2153 err, otx2_mbox_id2name(msg->id),
2156 mw->mbox_wrk[devid].num_msgs = 0;
2158 /* Send mbox responses to VF/PF */
2159 otx2_mbox_msg_send(mbox, devid);
2162 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
2164 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2166 __rvu_mbox_handler(mwork, TYPE_AFPF);
2169 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
2171 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2173 __rvu_mbox_handler(mwork, TYPE_AFVF);
2176 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
2178 struct rvu *rvu = mwork->rvu;
2179 struct otx2_mbox_dev *mdev;
2180 struct mbox_hdr *rsp_hdr;
2181 struct mbox_msghdr *msg;
2182 struct mbox_wq_info *mw;
2183 struct otx2_mbox *mbox;
2184 int offset, id, devid;
2188 mw = &rvu->afpf_wq_info;
2191 mw = &rvu->afvf_wq_info;
2197 devid = mwork - mw->mbox_wrk_up;
2198 mbox = &mw->mbox_up;
2199 mdev = &mbox->dev[devid];
2201 rsp_hdr = mdev->mbase + mbox->rx_start;
2202 if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
2203 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
2207 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
2209 for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
2210 msg = mdev->mbase + offset;
2212 if (msg->id >= MBOX_MSG_MAX) {
2214 "Mbox msg with unknown ID 0x%x\n", msg->id);
2218 if (msg->sig != OTX2_MBOX_RSP_SIG) {
2220 "Mbox msg with wrong signature %x, ID 0x%x\n",
2226 case MBOX_MSG_CGX_LINK_EVENT:
2231 "Mbox msg response has err %d, ID 0x%x\n",
2236 offset = mbox->rx_start + msg->next_msgoff;
2239 mw->mbox_wrk_up[devid].up_num_msgs = 0;
2241 otx2_mbox_reset(mbox, devid);
2244 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2246 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2248 __rvu_mbox_up_handler(mwork, TYPE_AFPF);
2251 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2253 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2255 __rvu_mbox_up_handler(mwork, TYPE_AFVF);
2258 static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
2261 struct rvu_hwinfo *hw = rvu->hw;
2265 /* For cn10k platform VF mailbox regions of a PF follows after the
2266 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2267 * RVU_PF_VF_BAR4_ADDR register.
2269 if (type == TYPE_AFVF) {
2270 for (region = 0; region < num; region++) {
2271 if (hw->cap.per_pf_mbox_regs) {
2272 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2273 RVU_AF_PFX_BAR4_ADDR(0)) +
2275 bar4 += region * MBOX_SIZE;
2277 bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2278 bar4 += region * MBOX_SIZE;
2280 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2281 if (!mbox_addr[region])
2287 /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2288 * PF registers. Whereas for Octeontx2 it is read from
2289 * RVU_AF_PF_BAR4_ADDR register.
2291 for (region = 0; region < num; region++) {
2292 if (hw->cap.per_pf_mbox_regs) {
2293 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2294 RVU_AF_PFX_BAR4_ADDR(region));
2296 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2297 RVU_AF_PF_BAR4_ADDR);
2298 bar4 += region * MBOX_SIZE;
2300 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2301 if (!mbox_addr[region])
2308 iounmap((void __iomem *)mbox_addr[region]);
2312 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2314 void (mbox_handler)(struct work_struct *),
2315 void (mbox_up_handler)(struct work_struct *))
2317 int err = -EINVAL, i, dir, dir_up;
2318 void __iomem *reg_base;
2319 struct rvu_work *mwork;
2320 void **mbox_regions;
2323 mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
2329 name = "rvu_afpf_mailbox";
2330 dir = MBOX_DIR_AFPF;
2331 dir_up = MBOX_DIR_AFPF_UP;
2332 reg_base = rvu->afreg_base;
2333 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
2338 name = "rvu_afvf_mailbox";
2339 dir = MBOX_DIR_PFVF;
2340 dir_up = MBOX_DIR_PFVF_UP;
2341 reg_base = rvu->pfreg_base;
2342 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
2350 mw->mbox_wq = alloc_workqueue(name,
2351 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2358 mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2359 sizeof(struct rvu_work), GFP_KERNEL);
2360 if (!mw->mbox_wrk) {
2365 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2366 sizeof(struct rvu_work), GFP_KERNEL);
2367 if (!mw->mbox_wrk_up) {
2372 err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2373 reg_base, dir, num);
2377 err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2378 reg_base, dir_up, num);
2382 for (i = 0; i < num; i++) {
2383 mwork = &mw->mbox_wrk[i];
2385 INIT_WORK(&mwork->work, mbox_handler);
2387 mwork = &mw->mbox_wrk_up[i];
2389 INIT_WORK(&mwork->work, mbox_up_handler);
2391 kfree(mbox_regions);
2395 destroy_workqueue(mw->mbox_wq);
2398 iounmap((void __iomem *)mbox_regions[num]);
2400 kfree(mbox_regions);
2404 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2406 struct otx2_mbox *mbox = &mw->mbox;
2407 struct otx2_mbox_dev *mdev;
2411 destroy_workqueue(mw->mbox_wq);
2415 for (devid = 0; devid < mbox->ndevs; devid++) {
2416 mdev = &mbox->dev[devid];
2418 iounmap((void __iomem *)mdev->hwbase);
2421 otx2_mbox_destroy(&mw->mbox);
2422 otx2_mbox_destroy(&mw->mbox_up);
2425 static void rvu_queue_work(struct mbox_wq_info *mw, int first,
2426 int mdevs, u64 intr)
2428 struct otx2_mbox_dev *mdev;
2429 struct otx2_mbox *mbox;
2430 struct mbox_hdr *hdr;
2433 for (i = first; i < mdevs; i++) {
2435 if (!(intr & BIT_ULL(i - first)))
2439 mdev = &mbox->dev[i];
2440 hdr = mdev->mbase + mbox->rx_start;
2442 /*The hdr->num_msgs is set to zero immediately in the interrupt
2443 * handler to ensure that it holds a correct value next time
2444 * when the interrupt handler is called.
2445 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2446 * pf>mbox.up_num_msgs holds the data for use in
2447 * pfaf_mbox_up_handler.
2450 if (hdr->num_msgs) {
2451 mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2453 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2455 mbox = &mw->mbox_up;
2456 mdev = &mbox->dev[i];
2457 hdr = mdev->mbase + mbox->rx_start;
2458 if (hdr->num_msgs) {
2459 mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2461 queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2466 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2468 struct rvu *rvu = (struct rvu *)rvu_irq;
2472 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2473 /* Clear interrupts */
2474 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2476 trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2478 /* Sync with mbox memory region */
2481 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2483 /* Handle VF interrupts */
2485 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2486 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2488 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2492 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2493 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2495 trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2497 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2502 static void rvu_enable_mbox_intr(struct rvu *rvu)
2504 struct rvu_hwinfo *hw = rvu->hw;
2506 /* Clear spurious irqs, if any */
2507 rvu_write64(rvu, BLKADDR_RVUM,
2508 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2510 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2511 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2512 INTR_MASK(hw->total_pfs) & ~1ULL);
2515 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2517 struct rvu_block *block;
2518 int slot, lf, num_lfs;
2521 block = &rvu->hw->block[blkaddr];
2522 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2526 for (slot = 0; slot < num_lfs; slot++) {
2527 lf = rvu_get_lf(rvu, block, pcifunc, slot);
2531 /* Cleanup LF and reset it */
2532 if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2533 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2534 else if (block->addr == BLKADDR_NPA)
2535 rvu_npa_lf_teardown(rvu, pcifunc, lf);
2536 else if ((block->addr == BLKADDR_CPT0) ||
2537 (block->addr == BLKADDR_CPT1))
2538 rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
2541 err = rvu_lf_reset(rvu, block, lf);
2543 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2549 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2551 mutex_lock(&rvu->flr_lock);
2552 /* Reset order should reflect inter-block dependencies:
2553 * 1. Reset any packet/work sources (NIX, CPT, TIM)
2554 * 2. Flush and reset SSO/SSOW
2555 * 3. Cleanup pools (NPA)
2557 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2558 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2559 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2560 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2561 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2562 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2563 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2564 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2565 rvu_reset_lmt_map_tbl(rvu, pcifunc);
2566 rvu_detach_rsrcs(rvu, NULL, pcifunc);
2567 mutex_unlock(&rvu->flr_lock);
2570 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2574 /* pcifunc = 0(PF0) | (vf + 1) */
2575 __rvu_flr_handler(rvu, vf + 1);
2582 /* Signal FLR finish and enable IRQ */
2583 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2584 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2587 static void rvu_flr_handler(struct work_struct *work)
2589 struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2590 struct rvu *rvu = flrwork->rvu;
2591 u16 pcifunc, numvfs, vf;
2595 pf = flrwork - rvu->flr_wrk;
2596 if (pf >= rvu->hw->total_pfs) {
2597 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2601 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2602 numvfs = (cfg >> 12) & 0xFF;
2603 pcifunc = pf << RVU_PFVF_PF_SHIFT;
2605 for (vf = 0; vf < numvfs; vf++)
2606 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2608 __rvu_flr_handler(rvu, pcifunc);
2610 /* Signal FLR finish */
2611 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2613 /* Enable interrupt */
2614 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
2617 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2619 int dev, vf, reg = 0;
2625 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2629 for (vf = 0; vf < numvfs; vf++) {
2630 if (!(intr & BIT_ULL(vf)))
2632 /* Clear and disable the interrupt */
2633 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2634 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2636 dev = vf + start_vf + rvu->hw->total_pfs;
2637 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2641 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2643 struct rvu *rvu = (struct rvu *)rvu_irq;
2647 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2651 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2652 if (intr & (1ULL << pf)) {
2653 /* clear interrupt */
2654 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2656 /* Disable the interrupt */
2657 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2659 /* PF is already dead do only AF related operations */
2660 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2665 rvu_afvf_queue_flr_work(rvu, 0, 64);
2667 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2672 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2676 /* Nothing to be done here other than clearing the
2679 for (vf = 0; vf < 64; vf++) {
2680 if (intr & (1ULL << vf)) {
2681 /* clear the trpend due to ME(master enable) */
2682 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2683 /* clear interrupt */
2684 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2689 /* Handles ME interrupts from VFs of AF */
2690 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2692 struct rvu *rvu = (struct rvu *)rvu_irq;
2696 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2698 for (vfset = 0; vfset <= 1; vfset++) {
2699 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2701 rvu_me_handle_vfset(rvu, vfset, intr);
2707 /* Handles ME interrupts from PFs */
2708 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2710 struct rvu *rvu = (struct rvu *)rvu_irq;
2714 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2716 /* Nothing to be done here other than clearing the
2719 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2720 if (intr & (1ULL << pf)) {
2721 /* clear the trpend due to ME(master enable) */
2722 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2724 /* clear interrupt */
2725 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2733 static void rvu_unregister_interrupts(struct rvu *rvu)
2737 rvu_cpt_unregister_interrupts(rvu);
2739 /* Disable the Mbox interrupt */
2740 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2741 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2743 /* Disable the PF FLR interrupt */
2744 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2745 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2747 /* Disable the PF ME interrupt */
2748 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2749 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2751 for (irq = 0; irq < rvu->num_vec; irq++) {
2752 if (rvu->irq_allocated[irq]) {
2753 free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2754 rvu->irq_allocated[irq] = false;
2758 pci_free_irq_vectors(rvu->pdev);
2762 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2764 struct rvu_pfvf *pfvf = &rvu->pf[0];
2768 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2770 /* Make sure there are enough MSIX vectors configured so that
2771 * VF interrupts can be handled. Offset equal to zero means
2772 * that PF vectors are not configured and overlapping AF vectors.
2774 return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2778 static int rvu_register_interrupts(struct rvu *rvu)
2780 int ret, offset, pf_vec_start;
2782 rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2784 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2785 NAME_SIZE, GFP_KERNEL);
2789 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2790 sizeof(bool), GFP_KERNEL);
2791 if (!rvu->irq_allocated)
2795 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2796 rvu->num_vec, PCI_IRQ_MSIX);
2799 "RVUAF: Request for %d msix vectors failed, ret %d\n",
2804 /* Register mailbox interrupt handler */
2805 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2806 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2807 rvu_mbox_intr_handler, 0,
2808 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2811 "RVUAF: IRQ registration failed for mbox irq\n");
2815 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2817 /* Enable mailbox interrupts from all PFs */
2818 rvu_enable_mbox_intr(rvu);
2820 /* Register FLR interrupt handler */
2821 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2823 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2824 rvu_flr_intr_handler, 0,
2825 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2829 "RVUAF: IRQ registration failed for FLR\n");
2832 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2834 /* Enable FLR interrupt for all PFs*/
2835 rvu_write64(rvu, BLKADDR_RVUM,
2836 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2838 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2839 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2841 /* Register ME interrupt handler */
2842 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2844 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2845 rvu_me_pf_intr_handler, 0,
2846 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2850 "RVUAF: IRQ registration failed for ME\n");
2852 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2854 /* Clear TRPEND bit for all PF */
2855 rvu_write64(rvu, BLKADDR_RVUM,
2856 RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2857 /* Enable ME interrupt for all PFs*/
2858 rvu_write64(rvu, BLKADDR_RVUM,
2859 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2861 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2862 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2864 if (!rvu_afvf_msix_vectors_num_ok(rvu))
2867 /* Get PF MSIX vectors offset. */
2868 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2869 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2871 /* Register MBOX0 interrupt. */
2872 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2873 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2874 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2875 rvu_mbox_intr_handler, 0,
2876 &rvu->irq_name[offset * NAME_SIZE],
2880 "RVUAF: IRQ registration failed for Mbox0\n");
2882 rvu->irq_allocated[offset] = true;
2884 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2885 * simply increment current offset by 1.
2887 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2888 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2889 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2890 rvu_mbox_intr_handler, 0,
2891 &rvu->irq_name[offset * NAME_SIZE],
2895 "RVUAF: IRQ registration failed for Mbox1\n");
2897 rvu->irq_allocated[offset] = true;
2899 /* Register FLR interrupt handler for AF's VFs */
2900 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2901 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2902 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2903 rvu_flr_intr_handler, 0,
2904 &rvu->irq_name[offset * NAME_SIZE], rvu);
2907 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2910 rvu->irq_allocated[offset] = true;
2912 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2913 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2914 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2915 rvu_flr_intr_handler, 0,
2916 &rvu->irq_name[offset * NAME_SIZE], rvu);
2919 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2922 rvu->irq_allocated[offset] = true;
2924 /* Register ME interrupt handler for AF's VFs */
2925 offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2926 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2927 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2928 rvu_me_vf_intr_handler, 0,
2929 &rvu->irq_name[offset * NAME_SIZE], rvu);
2932 "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2935 rvu->irq_allocated[offset] = true;
2937 offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2938 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2939 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2940 rvu_me_vf_intr_handler, 0,
2941 &rvu->irq_name[offset * NAME_SIZE], rvu);
2944 "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2947 rvu->irq_allocated[offset] = true;
2949 ret = rvu_cpt_register_interrupts(rvu);
2956 rvu_unregister_interrupts(rvu);
2960 static void rvu_flr_wq_destroy(struct rvu *rvu)
2963 destroy_workqueue(rvu->flr_wq);
2968 static int rvu_flr_init(struct rvu *rvu)
2974 /* Enable FLR for all PFs*/
2975 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2976 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2977 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2981 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2982 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2987 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2988 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2989 sizeof(struct rvu_work), GFP_KERNEL);
2990 if (!rvu->flr_wrk) {
2991 destroy_workqueue(rvu->flr_wq);
2995 for (dev = 0; dev < num_devs; dev++) {
2996 rvu->flr_wrk[dev].rvu = rvu;
2997 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
3000 mutex_init(&rvu->flr_lock);
3005 static void rvu_disable_afvf_intr(struct rvu *rvu)
3009 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
3010 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
3011 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
3015 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
3016 INTR_MASK(vfs - 64));
3017 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3018 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3021 static void rvu_enable_afvf_intr(struct rvu *rvu)
3025 /* Clear any pending interrupts and enable AF VF interrupts for
3029 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
3030 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
3033 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
3034 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
3035 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
3037 /* Same for remaining VFs, if any. */
3041 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
3042 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
3043 INTR_MASK(vfs - 64));
3045 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
3046 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3047 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3050 int rvu_get_num_lbk_chans(void)
3052 struct pci_dev *pdev;
3056 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
3061 base = pci_ioremap_bar(pdev, 0);
3065 /* Read number of available LBK channels from LBK(0)_CONST register. */
3066 ret = (readq(base + 0x10) >> 32) & 0xffff;
3074 static int rvu_enable_sriov(struct rvu *rvu)
3076 struct pci_dev *pdev = rvu->pdev;
3077 int err, chans, vfs;
3079 if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
3080 dev_warn(&pdev->dev,
3081 "Skipping SRIOV enablement since not enough IRQs are available\n");
3085 chans = rvu_get_num_lbk_chans();
3089 vfs = pci_sriov_get_totalvfs(pdev);
3091 /* Limit VFs in case we have more VFs than LBK channels available. */
3098 /* LBK channel number 63 is used for switching packets between
3099 * CGX mapped VFs. Hence limit LBK pairs till 62 only.
3104 /* Save VFs number for reference in VF interrupts handlers.
3105 * Since interrupts might start arriving during SRIOV enablement
3106 * ordinary API cannot be used to get number of enabled VFs.
3110 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
3111 rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
3115 rvu_enable_afvf_intr(rvu);
3116 /* Make sure IRQs are enabled before SRIOV. */
3119 err = pci_enable_sriov(pdev, vfs);
3121 rvu_disable_afvf_intr(rvu);
3122 rvu_mbox_destroy(&rvu->afvf_wq_info);
3129 static void rvu_disable_sriov(struct rvu *rvu)
3131 rvu_disable_afvf_intr(rvu);
3132 rvu_mbox_destroy(&rvu->afvf_wq_info);
3133 pci_disable_sriov(rvu->pdev);
3136 static void rvu_update_module_params(struct rvu *rvu)
3138 const char *default_pfl_name = "default";
3140 strscpy(rvu->mkex_pfl_name,
3141 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
3142 strscpy(rvu->kpu_pfl_name,
3143 kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
3146 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3148 struct device *dev = &pdev->dev;
3152 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
3156 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
3158 devm_kfree(dev, rvu);
3162 pci_set_drvdata(pdev, rvu);
3164 rvu->dev = &pdev->dev;
3166 err = pci_enable_device(pdev);
3168 dev_err(dev, "Failed to enable PCI device\n");
3172 err = pci_request_regions(pdev, DRV_NAME);
3174 dev_err(dev, "PCI request regions failed 0x%x\n", err);
3175 goto err_disable_device;
3178 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
3180 dev_err(dev, "DMA mask config failed, abort\n");
3181 goto err_release_regions;
3184 pci_set_master(pdev);
3186 rvu->ptp = ptp_get();
3187 if (IS_ERR(rvu->ptp)) {
3188 err = PTR_ERR(rvu->ptp);
3189 if (err == -EPROBE_DEFER)
3190 goto err_release_regions;
3194 /* Map Admin function CSRs */
3195 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
3196 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
3197 if (!rvu->afreg_base || !rvu->pfreg_base) {
3198 dev_err(dev, "Unable to map admin function CSRs, aborting\n");
3203 /* Store module params in rvu structure */
3204 rvu_update_module_params(rvu);
3206 /* Check which blocks the HW supports */
3207 rvu_check_block_implemented(rvu);
3209 rvu_reset_all_blocks(rvu);
3211 rvu_setup_hw_capabilities(rvu);
3213 err = rvu_setup_hw_resources(rvu);
3217 /* Init mailbox btw AF and PFs */
3218 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
3219 rvu->hw->total_pfs, rvu_afpf_mbox_handler,
3220 rvu_afpf_mbox_up_handler);
3222 dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
3226 err = rvu_flr_init(rvu);
3228 dev_err(dev, "%s: Failed to initialize flr\n", __func__);
3232 err = rvu_register_interrupts(rvu);
3234 dev_err(dev, "%s: Failed to register interrupts\n", __func__);
3238 err = rvu_register_dl(rvu);
3240 dev_err(dev, "%s: Failed to register devlink\n", __func__);
3244 rvu_setup_rvum_blk_revid(rvu);
3246 /* Enable AF's VFs (if any) */
3247 err = rvu_enable_sriov(rvu);
3249 dev_err(dev, "%s: Failed to enable sriov\n", __func__);
3253 /* Initialize debugfs */
3256 mutex_init(&rvu->rswitch.switch_lock);
3259 ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
3260 rvu->fwdata->ptp_ext_tstamp);
3264 rvu_unregister_dl(rvu);
3266 rvu_unregister_interrupts(rvu);
3268 rvu_flr_wq_destroy(rvu);
3270 rvu_mbox_destroy(&rvu->afpf_wq_info);
3273 rvu_fwdata_exit(rvu);
3274 rvu_reset_all_blocks(rvu);
3275 rvu_free_hw_resources(rvu);
3276 rvu_clear_rvum_blk_revid(rvu);
3279 err_release_regions:
3280 pci_release_regions(pdev);
3282 pci_disable_device(pdev);
3284 pci_set_drvdata(pdev, NULL);
3285 devm_kfree(&pdev->dev, rvu->hw);
3286 devm_kfree(dev, rvu);
3290 static void rvu_remove(struct pci_dev *pdev)
3292 struct rvu *rvu = pci_get_drvdata(pdev);
3295 rvu_unregister_dl(rvu);
3296 rvu_unregister_interrupts(rvu);
3297 rvu_flr_wq_destroy(rvu);
3299 rvu_fwdata_exit(rvu);
3300 rvu_mbox_destroy(&rvu->afpf_wq_info);
3301 rvu_disable_sriov(rvu);
3302 rvu_reset_all_blocks(rvu);
3303 rvu_free_hw_resources(rvu);
3304 rvu_clear_rvum_blk_revid(rvu);
3306 pci_release_regions(pdev);
3307 pci_disable_device(pdev);
3308 pci_set_drvdata(pdev, NULL);
3310 devm_kfree(&pdev->dev, rvu->hw);
3311 devm_kfree(&pdev->dev, rvu);
3314 static struct pci_driver rvu_driver = {
3316 .id_table = rvu_id_table,
3318 .remove = rvu_remove,
3321 static int __init rvu_init_module(void)
3325 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3327 err = pci_register_driver(&cgx_driver);
3331 err = pci_register_driver(&ptp_driver);
3335 err = pci_register_driver(&rvu_driver);
3341 pci_unregister_driver(&ptp_driver);
3343 pci_unregister_driver(&cgx_driver);
3348 static void __exit rvu_cleanup_module(void)
3350 pci_unregister_driver(&rvu_driver);
3351 pci_unregister_driver(&ptp_driver);
3352 pci_unregister_driver(&cgx_driver);
3355 module_init(rvu_init_module);
3356 module_exit(rvu_cleanup_module);