1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2018 Marvell.
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/delay.h>
11 #include <linux/irq.h>
12 #include <linux/pci.h>
13 #include <linux/sysfs.h>
20 #include "rvu_trace.h"
22 #define DRV_NAME "rvu_af"
23 #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
25 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
27 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
28 struct rvu_block *block, int lf);
29 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
30 struct rvu_block *block, int lf);
31 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
33 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
35 void (mbox_handler)(struct work_struct *),
36 void (mbox_up_handler)(struct work_struct *));
42 /* Supported devices */
43 static const struct pci_device_id rvu_id_table[] = {
44 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
45 { 0, } /* end of table */
48 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
49 MODULE_DESCRIPTION(DRV_STRING);
50 MODULE_LICENSE("GPL v2");
51 MODULE_DEVICE_TABLE(pci, rvu_id_table);
53 static char *mkex_profile; /* MKEX profile name */
54 module_param(mkex_profile, charp, 0000);
55 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
57 static char *kpu_profile; /* KPU profile name */
58 module_param(kpu_profile, charp, 0000);
59 MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
61 static void rvu_setup_hw_capabilities(struct rvu *rvu)
63 struct rvu_hwinfo *hw = rvu->hw;
65 hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
66 hw->cap.nix_fixed_txschq_mapping = false;
67 hw->cap.nix_shaping = true;
68 hw->cap.nix_tx_link_bp = true;
69 hw->cap.nix_rx_multicast = true;
70 hw->cap.nix_shaper_toggle_wait = false;
73 if (is_rvu_pre_96xx_C0(rvu)) {
74 hw->cap.nix_fixed_txschq_mapping = true;
75 hw->cap.nix_txsch_per_cgx_lmac = 4;
76 hw->cap.nix_txsch_per_lbk_lmac = 132;
77 hw->cap.nix_txsch_per_sdp_lmac = 76;
78 hw->cap.nix_shaping = false;
79 hw->cap.nix_tx_link_bp = false;
80 if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
81 hw->cap.nix_rx_multicast = false;
83 if (!is_rvu_pre_96xx_C0(rvu))
84 hw->cap.nix_shaper_toggle_wait = true;
86 if (!is_rvu_otx2(rvu))
87 hw->cap.per_pf_mbox_regs = true;
90 /* Poll a RVU block's register 'offset', for a 'zero'
91 * or 'nonzero' at bits specified by 'mask'
93 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
95 unsigned long timeout = jiffies + usecs_to_jiffies(20000);
100 reg = rvu->afreg_base + ((block << 28) | offset);
102 reg_val = readq(reg);
103 if (zero && !(reg_val & mask))
105 if (!zero && (reg_val & mask))
107 if (time_before(jiffies, timeout)) {
111 /* In scenarios where CPU is scheduled out before checking
112 * 'time_before' (above) and gets scheduled in such that
113 * jiffies are beyond timeout value, then check again if HW is
114 * done with the operation in the meantime.
123 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
130 id = find_first_zero_bit(rsrc->bmap, rsrc->max);
134 __set_bit(id, rsrc->bmap);
139 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
146 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
147 if (start >= rsrc->max)
150 bitmap_set(rsrc->bmap, start, nrsrc);
154 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
158 if (start >= rsrc->max)
161 bitmap_clear(rsrc->bmap, start, nrsrc);
164 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
171 start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
172 if (start >= rsrc->max)
178 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
183 __clear_bit(id, rsrc->bmap);
186 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
193 used = bitmap_weight(rsrc->bmap, rsrc->max);
194 return (rsrc->max - used);
197 bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
202 return !test_bit(id, rsrc->bmap);
205 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
207 rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
208 sizeof(long), GFP_KERNEL);
214 void rvu_free_bitmap(struct rsrc_bmap *rsrc)
219 /* Get block LF's HW index from a PF_FUNC's block slot number */
220 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
225 mutex_lock(&rvu->rsrc_lock);
226 for (lf = 0; lf < block->lf.max; lf++) {
227 if (block->fn_map[lf] == pcifunc) {
229 mutex_unlock(&rvu->rsrc_lock);
235 mutex_unlock(&rvu->rsrc_lock);
239 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
240 * Some silicon variants of OcteonTX2 supports
241 * multiple blocks of same type.
243 * @pcifunc has to be zero when no LF is yet attached.
245 * For a pcifunc if LFs are attached from multiple blocks of same type, then
246 * return blkaddr of first encountered block.
248 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
250 int devnum, blkaddr = -ENODEV;
256 blkaddr = BLKADDR_NPC;
259 blkaddr = BLKADDR_NPA;
262 /* For now assume NIX0 */
264 blkaddr = BLKADDR_NIX0;
269 blkaddr = BLKADDR_SSO;
272 blkaddr = BLKADDR_SSOW;
275 blkaddr = BLKADDR_TIM;
278 /* For now assume CPT0 */
280 blkaddr = BLKADDR_CPT0;
286 /* Check if this is a RVU PF or VF */
287 if (pcifunc & RVU_PFVF_FUNC_MASK) {
289 devnum = rvu_get_hwvf(rvu, pcifunc);
292 devnum = rvu_get_pf(pcifunc);
295 /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
298 if (blktype == BLKTYPE_NIX) {
299 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
300 RVU_PRIV_HWVFX_NIXX_CFG(0);
301 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
303 blkaddr = BLKADDR_NIX0;
307 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
308 RVU_PRIV_HWVFX_NIXX_CFG(1);
309 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
311 blkaddr = BLKADDR_NIX1;
314 if (blktype == BLKTYPE_CPT) {
315 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
316 RVU_PRIV_HWVFX_CPTX_CFG(0);
317 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
319 blkaddr = BLKADDR_CPT0;
323 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
324 RVU_PRIV_HWVFX_CPTX_CFG(1);
325 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
327 blkaddr = BLKADDR_CPT1;
331 if (is_block_implemented(rvu->hw, blkaddr))
336 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
337 struct rvu_block *block, u16 pcifunc,
340 int devnum, num_lfs = 0;
344 if (lf >= block->lf.max) {
345 dev_err(&rvu->pdev->dev,
346 "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
347 __func__, lf, block->name, block->lf.max);
351 /* Check if this is for a RVU PF or VF */
352 if (pcifunc & RVU_PFVF_FUNC_MASK) {
354 devnum = rvu_get_hwvf(rvu, pcifunc);
357 devnum = rvu_get_pf(pcifunc);
360 block->fn_map[lf] = attach ? pcifunc : 0;
362 switch (block->addr) {
364 pfvf->npalf = attach ? true : false;
365 num_lfs = pfvf->npalf;
369 pfvf->nixlf = attach ? true : false;
370 num_lfs = pfvf->nixlf;
373 attach ? pfvf->sso++ : pfvf->sso--;
377 attach ? pfvf->ssow++ : pfvf->ssow--;
378 num_lfs = pfvf->ssow;
381 attach ? pfvf->timlfs++ : pfvf->timlfs--;
382 num_lfs = pfvf->timlfs;
385 attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
386 num_lfs = pfvf->cptlfs;
389 attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
390 num_lfs = pfvf->cpt1_lfs;
394 reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
395 rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
398 inline int rvu_get_pf(u16 pcifunc)
400 return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
403 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
407 /* Get numVFs attached to this PF and first HWVF */
408 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
410 *numvfs = (cfg >> 12) & 0xFF;
415 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
420 pf = rvu_get_pf(pcifunc);
421 func = pcifunc & RVU_PFVF_FUNC_MASK;
423 /* Get first HWVF attached to this PF */
424 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
426 return ((cfg & 0xFFF) + func - 1);
429 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
431 /* Check if it is a PF or VF */
432 if (pcifunc & RVU_PFVF_FUNC_MASK)
433 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
435 return &rvu->pf[rvu_get_pf(pcifunc)];
438 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
443 pf = rvu_get_pf(pcifunc);
444 if (pf >= rvu->hw->total_pfs)
447 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
450 /* Check if VF is within number of VFs attached to this PF */
451 vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
452 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
453 nvfs = (cfg >> 12) & 0xFF;
460 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
462 struct rvu_block *block;
464 if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
467 block = &hw->block[blkaddr];
468 return block->implemented;
471 static void rvu_check_block_implemented(struct rvu *rvu)
473 struct rvu_hwinfo *hw = rvu->hw;
474 struct rvu_block *block;
478 /* For each block check if 'implemented' bit is set */
479 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
480 block = &hw->block[blkid];
481 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
482 if (cfg & BIT_ULL(11))
483 block->implemented = true;
487 static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
489 rvu_write64(rvu, BLKADDR_RVUM,
490 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
494 static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
496 rvu_write64(rvu, BLKADDR_RVUM,
497 RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
500 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
504 if (!block->implemented)
507 rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
508 err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
513 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
515 struct rvu_block *block = &rvu->hw->block[blkaddr];
518 if (!block->implemented)
521 rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
522 err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
524 dev_err(rvu->dev, "HW block:%d reset failed\n", blkaddr);
527 static void rvu_reset_all_blocks(struct rvu *rvu)
529 /* Do a HW reset of all RVU blocks */
530 rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
531 rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
532 rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
533 rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
534 rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
535 rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
536 rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
537 rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
538 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
539 rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
540 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
541 rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
542 rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
545 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
547 struct rvu_pfvf *pfvf;
551 for (lf = 0; lf < block->lf.max; lf++) {
552 cfg = rvu_read64(rvu, block->addr,
553 block->lfcfg_reg | (lf << block->lfshift));
554 if (!(cfg & BIT_ULL(63)))
557 /* Set this resource as being used */
558 __set_bit(lf, block->lf.bmap);
560 /* Get, to whom this LF is attached */
561 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
562 rvu_update_rsrc_map(rvu, pfvf, block,
563 (cfg >> 8) & 0xFFFF, lf, true);
565 /* Set start MSIX vector for this LF within this PF/VF */
566 rvu_set_msix_offset(rvu, pfvf, block, lf);
570 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
579 "PF%d:VF%d is configured with zero msix vectors, %d\n",
586 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
588 min_vecs = RVU_PF_INT_VEC_CNT;
590 if (!(nvecs < min_vecs))
593 "PF%d is configured with too few vectors, %d, min is %d\n",
594 pf, nvecs, min_vecs);
597 static int rvu_setup_msix_resources(struct rvu *rvu)
599 struct rvu_hwinfo *hw = rvu->hw;
600 int pf, vf, numvfs, hwvf, err;
601 int nvecs, offset, max_msix;
602 struct rvu_pfvf *pfvf;
606 for (pf = 0; pf < hw->total_pfs; pf++) {
607 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
608 /* If PF is not enabled, nothing to do */
609 if (!((cfg >> 20) & 0x01))
612 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
615 /* Get num of MSIX vectors attached to this PF */
616 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
617 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
618 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
620 /* Alloc msix bitmap for this PF */
621 err = rvu_alloc_bitmap(&pfvf->msix);
625 /* Allocate memory for MSIX vector to RVU block LF mapping */
626 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
627 sizeof(u16), GFP_KERNEL);
628 if (!pfvf->msix_lfmap)
631 /* For PF0 (AF) firmware will set msix vector offsets for
632 * AF, block AF and PF0_INT vectors, so jump to VFs.
637 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
638 * These are allocated on driver init and never freed,
639 * so no need to set 'msix_lfmap' for these.
641 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
642 nvecs = (cfg >> 12) & 0xFF;
644 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
645 rvu_write64(rvu, BLKADDR_RVUM,
646 RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
648 /* Alloc msix bitmap for VFs */
649 for (vf = 0; vf < numvfs; vf++) {
650 pfvf = &rvu->hwvf[hwvf + vf];
651 /* Get num of MSIX vectors attached to this VF */
652 cfg = rvu_read64(rvu, BLKADDR_RVUM,
653 RVU_PRIV_PFX_MSIX_CFG(pf));
654 pfvf->msix.max = (cfg & 0xFFF) + 1;
655 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
657 /* Alloc msix bitmap for this VF */
658 err = rvu_alloc_bitmap(&pfvf->msix);
663 devm_kcalloc(rvu->dev, pfvf->msix.max,
664 sizeof(u16), GFP_KERNEL);
665 if (!pfvf->msix_lfmap)
668 /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
669 * These are allocated on driver init and never freed,
670 * so no need to set 'msix_lfmap' for these.
672 cfg = rvu_read64(rvu, BLKADDR_RVUM,
673 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
674 nvecs = (cfg >> 12) & 0xFF;
676 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
677 rvu_write64(rvu, BLKADDR_RVUM,
678 RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
683 /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
684 * create an IOMMU mapping for the physical address configured by
685 * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
687 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
688 max_msix = cfg & 0xFFFFF;
689 if (rvu->fwdata && rvu->fwdata->msixtr_base)
690 phy_addr = rvu->fwdata->msixtr_base;
692 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
694 iova = dma_map_resource(rvu->dev, phy_addr,
695 max_msix * PCI_MSIX_ENTRY_SIZE,
696 DMA_BIDIRECTIONAL, 0);
698 if (dma_mapping_error(rvu->dev, iova))
701 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
702 rvu->msix_base_iova = iova;
703 rvu->msixtr_base_phy = phy_addr;
708 static void rvu_reset_msix(struct rvu *rvu)
710 /* Restore msixtr base register */
711 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
712 rvu->msixtr_base_phy);
715 static void rvu_free_hw_resources(struct rvu *rvu)
717 struct rvu_hwinfo *hw = rvu->hw;
718 struct rvu_block *block;
719 struct rvu_pfvf *pfvf;
723 rvu_npa_freemem(rvu);
724 rvu_npc_freemem(rvu);
725 rvu_nix_freemem(rvu);
727 /* Free block LF bitmaps */
728 for (id = 0; id < BLK_COUNT; id++) {
729 block = &hw->block[id];
730 kfree(block->lf.bmap);
733 /* Free MSIX bitmaps */
734 for (id = 0; id < hw->total_pfs; id++) {
736 kfree(pfvf->msix.bmap);
739 for (id = 0; id < hw->total_vfs; id++) {
740 pfvf = &rvu->hwvf[id];
741 kfree(pfvf->msix.bmap);
744 /* Unmap MSIX vector base IOVA mapping */
745 if (!rvu->msix_base_iova)
747 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
748 max_msix = cfg & 0xFFFFF;
749 dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
750 max_msix * PCI_MSIX_ENTRY_SIZE,
751 DMA_BIDIRECTIONAL, 0);
754 mutex_destroy(&rvu->rsrc_lock);
757 static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
759 struct rvu_hwinfo *hw = rvu->hw;
760 int pf, vf, numvfs, hwvf;
761 struct rvu_pfvf *pfvf;
764 for (pf = 0; pf < hw->total_pfs; pf++) {
765 /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
769 if (!is_pf_cgxmapped(rvu, pf))
771 /* Assign MAC address to PF */
773 if (rvu->fwdata && pf < PF_MACNUM_MAX) {
774 mac = &rvu->fwdata->pf_macs[pf];
776 u64_to_ether_addr(*mac, pfvf->mac_addr);
778 eth_random_addr(pfvf->mac_addr);
780 eth_random_addr(pfvf->mac_addr);
782 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
785 /* Assign MAC address to VFs*/
786 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
787 for (vf = 0; vf < numvfs; vf++, hwvf++) {
788 pfvf = &rvu->hwvf[hwvf];
789 if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
790 mac = &rvu->fwdata->vf_macs[hwvf];
792 u64_to_ether_addr(*mac, pfvf->mac_addr);
794 eth_random_addr(pfvf->mac_addr);
796 eth_random_addr(pfvf->mac_addr);
798 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
803 static int rvu_fwdata_init(struct rvu *rvu)
808 /* Get firmware data base address */
809 err = cgx_get_fwdata_base(&fwdbase);
812 rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
815 if (!is_rvu_fwdata_valid(rvu)) {
817 "Mismatch in 'fwdata' struct btw kernel and firmware\n");
818 iounmap(rvu->fwdata);
824 dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
828 static void rvu_fwdata_exit(struct rvu *rvu)
831 iounmap(rvu->fwdata);
834 static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
836 struct rvu_hwinfo *hw = rvu->hw;
837 struct rvu_block *block;
841 /* Init NIX LF's bitmap */
842 block = &hw->block[blkaddr];
843 if (!block->implemented)
845 blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
846 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
847 block->lf.max = cfg & 0xFFF;
848 block->addr = blkaddr;
849 block->type = BLKTYPE_NIX;
851 block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
852 block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
853 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
854 block->lfcfg_reg = NIX_PRIV_LFX_CFG;
855 block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
856 block->lfreset_reg = NIX_AF_LF_RST;
858 sprintf(block->name, "NIX%d", blkid);
859 rvu->nix_blkaddr[blkid] = blkaddr;
860 return rvu_alloc_bitmap(&block->lf);
863 static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
865 struct rvu_hwinfo *hw = rvu->hw;
866 struct rvu_block *block;
870 /* Init CPT LF's bitmap */
871 block = &hw->block[blkaddr];
872 if (!block->implemented)
874 blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
875 cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
876 block->lf.max = cfg & 0xFF;
877 block->addr = blkaddr;
878 block->type = BLKTYPE_CPT;
879 block->multislot = true;
881 block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
882 block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
883 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
884 block->lfcfg_reg = CPT_PRIV_LFX_CFG;
885 block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
886 block->lfreset_reg = CPT_AF_LF_RST;
888 sprintf(block->name, "CPT%d", blkid);
889 return rvu_alloc_bitmap(&block->lf);
892 static void rvu_get_lbk_bufsize(struct rvu *rvu)
894 struct pci_dev *pdev = NULL;
898 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
899 PCI_DEVID_OCTEONTX2_LBK, pdev);
903 base = pci_ioremap_bar(pdev, 0);
907 lbk_const = readq(base + LBK_CONST);
909 /* cache fifo size */
910 rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
917 static int rvu_setup_hw_resources(struct rvu *rvu)
919 struct rvu_hwinfo *hw = rvu->hw;
920 struct rvu_block *block;
924 /* Get HW supported max RVU PF & VF count */
925 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
926 hw->total_pfs = (cfg >> 32) & 0xFF;
927 hw->total_vfs = (cfg >> 20) & 0xFFF;
928 hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
930 /* Init NPA LF's bitmap */
931 block = &hw->block[BLKADDR_NPA];
932 if (!block->implemented)
934 cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
935 block->lf.max = (cfg >> 16) & 0xFFF;
936 block->addr = BLKADDR_NPA;
937 block->type = BLKTYPE_NPA;
939 block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
940 block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
941 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
942 block->lfcfg_reg = NPA_PRIV_LFX_CFG;
943 block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
944 block->lfreset_reg = NPA_AF_LF_RST;
946 sprintf(block->name, "NPA");
947 err = rvu_alloc_bitmap(&block->lf);
950 "%s: Failed to allocate NPA LF bitmap\n", __func__);
955 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
958 "%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
962 err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
965 "%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
969 /* Init SSO group's bitmap */
970 block = &hw->block[BLKADDR_SSO];
971 if (!block->implemented)
973 cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
974 block->lf.max = cfg & 0xFFFF;
975 block->addr = BLKADDR_SSO;
976 block->type = BLKTYPE_SSO;
977 block->multislot = true;
979 block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
980 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
981 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
982 block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
983 block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
984 block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
986 sprintf(block->name, "SSO GROUP");
987 err = rvu_alloc_bitmap(&block->lf);
990 "%s: Failed to allocate SSO LF bitmap\n", __func__);
995 /* Init SSO workslot's bitmap */
996 block = &hw->block[BLKADDR_SSOW];
997 if (!block->implemented)
999 block->lf.max = (cfg >> 56) & 0xFF;
1000 block->addr = BLKADDR_SSOW;
1001 block->type = BLKTYPE_SSOW;
1002 block->multislot = true;
1004 block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
1005 block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
1006 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
1007 block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
1008 block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
1009 block->lfreset_reg = SSOW_AF_LF_HWS_RST;
1011 sprintf(block->name, "SSOWS");
1012 err = rvu_alloc_bitmap(&block->lf);
1015 "%s: Failed to allocate SSOW LF bitmap\n", __func__);
1020 /* Init TIM LF's bitmap */
1021 block = &hw->block[BLKADDR_TIM];
1022 if (!block->implemented)
1024 cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
1025 block->lf.max = cfg & 0xFFFF;
1026 block->addr = BLKADDR_TIM;
1027 block->type = BLKTYPE_TIM;
1028 block->multislot = true;
1030 block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
1031 block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
1032 block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
1033 block->lfcfg_reg = TIM_PRIV_LFX_CFG;
1034 block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
1035 block->lfreset_reg = TIM_AF_LF_RST;
1037 sprintf(block->name, "TIM");
1038 err = rvu_alloc_bitmap(&block->lf);
1041 "%s: Failed to allocate TIM LF bitmap\n", __func__);
1046 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
1049 "%s: Failed to allocate CPT0 LF bitmap\n", __func__);
1052 err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
1055 "%s: Failed to allocate CPT1 LF bitmap\n", __func__);
1059 /* Allocate memory for PFVF data */
1060 rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1061 sizeof(struct rvu_pfvf), GFP_KERNEL);
1064 "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
1068 rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1069 sizeof(struct rvu_pfvf), GFP_KERNEL);
1072 "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
1076 mutex_init(&rvu->rsrc_lock);
1078 rvu_fwdata_init(rvu);
1080 err = rvu_setup_msix_resources(rvu);
1083 "%s: Failed to setup MSIX resources\n", __func__);
1087 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1088 block = &hw->block[blkid];
1089 if (!block->lf.bmap)
1092 /* Allocate memory for block LF/slot to pcifunc mapping info */
1093 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1094 sizeof(u16), GFP_KERNEL);
1095 if (!block->fn_map) {
1100 /* Scan all blocks to check if low level firmware has
1101 * already provisioned any of the resources to a PF/VF.
1103 rvu_scan_block(rvu, block);
1106 err = rvu_set_channels_base(rvu);
1110 err = rvu_npc_init(rvu);
1112 dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
1116 err = rvu_cgx_init(rvu);
1118 dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
1122 /* Assign MACs for CGX mapped functions */
1123 rvu_setup_pfvf_macaddress(rvu);
1125 err = rvu_npa_init(rvu);
1127 dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
1131 rvu_get_lbk_bufsize(rvu);
1133 err = rvu_nix_init(rvu);
1135 dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
1139 err = rvu_sdp_init(rvu);
1141 dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
1145 rvu_program_channels(rvu);
1150 rvu_nix_freemem(rvu);
1152 rvu_npa_freemem(rvu);
1156 rvu_npc_freemem(rvu);
1157 rvu_fwdata_exit(rvu);
1159 rvu_reset_msix(rvu);
1163 /* NPA and NIX admin queue APIs */
1164 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1169 qmem_free(rvu->dev, aq->inst);
1170 qmem_free(rvu->dev, aq->res);
1171 devm_kfree(rvu->dev, aq);
1174 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1175 int qsize, int inst_size, int res_size)
1177 struct admin_queue *aq;
1180 *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1185 /* Alloc memory for instructions i.e AQ */
1186 err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1188 devm_kfree(rvu->dev, aq);
1192 /* Alloc memory for results */
1193 err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1195 rvu_aq_free(rvu, aq);
1199 spin_lock_init(&aq->lock);
1203 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1204 struct ready_msg_rsp *rsp)
1207 rsp->rclk_freq = rvu->fwdata->rclk;
1208 rsp->sclk_freq = rvu->fwdata->sclk;
1213 /* Get current count of a RVU block's LF/slots
1214 * provisioned to a given RVU func.
1216 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1220 return pfvf->npalf ? 1 : 0;
1223 return pfvf->nixlf ? 1 : 0;
1229 return pfvf->timlfs;
1231 return pfvf->cptlfs;
1233 return pfvf->cpt1_lfs;
1238 /* Return true if LFs of block type are attached to pcifunc */
1239 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1243 return pfvf->npalf ? 1 : 0;
1245 return pfvf->nixlf ? 1 : 0;
1249 return !!pfvf->ssow;
1251 return !!pfvf->timlfs;
1253 return pfvf->cptlfs || pfvf->cpt1_lfs;
1259 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1261 struct rvu_pfvf *pfvf;
1263 if (!is_pf_func_valid(rvu, pcifunc))
1266 pfvf = rvu_get_pfvf(rvu, pcifunc);
1268 /* Check if this PFFUNC has a LF of type blktype attached */
1269 if (!is_blktype_attached(pfvf, blktype))
1275 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1276 int pcifunc, int slot)
1280 val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1281 rvu_write64(rvu, block->addr, block->lookup_reg, val);
1282 /* Wait for the lookup to finish */
1283 /* TODO: put some timeout here */
1284 while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1287 val = rvu_read64(rvu, block->addr, block->lookup_reg);
1289 /* Check LF valid bit */
1290 if (!(val & (1ULL << 12)))
1293 return (val & 0xFFF);
1296 int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
1297 u16 global_slot, u16 *slot_in_block)
1299 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1300 int numlfs, total_lfs = 0, nr_blocks = 0;
1301 int i, num_blkaddr[BLK_COUNT] = { 0 };
1302 struct rvu_block *block;
1306 if (!is_blktype_attached(pfvf, blktype))
1309 /* Get all the block addresses from which LFs are attached to
1310 * the given pcifunc in num_blkaddr[].
1312 for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
1313 block = &rvu->hw->block[blkaddr];
1314 if (block->type != blktype)
1316 if (!is_block_implemented(rvu->hw, blkaddr))
1319 numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
1321 total_lfs += numlfs;
1322 num_blkaddr[nr_blocks] = blkaddr;
1327 if (global_slot >= total_lfs)
1330 /* Based on the given global slot number retrieve the
1331 * correct block address out of all attached block
1332 * addresses and slot number in that block.
1336 for (i = 0; i < nr_blocks; i++) {
1337 numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
1338 total_lfs += numlfs;
1339 if (global_slot < total_lfs) {
1340 blkaddr = num_blkaddr[i];
1341 start_slot = total_lfs - numlfs;
1342 *slot_in_block = global_slot - start_slot;
1350 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1352 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1353 struct rvu_hwinfo *hw = rvu->hw;
1354 struct rvu_block *block;
1355 int slot, lf, num_lfs;
1358 blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1362 if (blktype == BLKTYPE_NIX)
1363 rvu_nix_reset_mac(pfvf, pcifunc);
1365 block = &hw->block[blkaddr];
1367 num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1371 for (slot = 0; slot < num_lfs; slot++) {
1372 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1373 if (lf < 0) /* This should never happen */
1376 /* Disable the LF */
1377 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1378 (lf << block->lfshift), 0x00ULL);
1380 /* Update SW maintained mapping info as well */
1381 rvu_update_rsrc_map(rvu, pfvf, block,
1382 pcifunc, lf, false);
1384 /* Free the resource */
1385 rvu_free_rsrc(&block->lf, lf);
1387 /* Clear MSIX vector offset for this LF */
1388 rvu_clear_msix_offset(rvu, pfvf, block, lf);
1392 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1395 struct rvu_hwinfo *hw = rvu->hw;
1396 bool detach_all = true;
1397 struct rvu_block *block;
1400 mutex_lock(&rvu->rsrc_lock);
1402 /* Check for partial resource detach */
1403 if (detach && detach->partial)
1406 /* Check for RVU block's LFs attached to this func,
1407 * if so, detach them.
1409 for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1410 block = &hw->block[blkid];
1411 if (!block->lf.bmap)
1413 if (!detach_all && detach) {
1414 if (blkid == BLKADDR_NPA && !detach->npalf)
1416 else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1418 else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1420 else if ((blkid == BLKADDR_SSO) && !detach->sso)
1422 else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1424 else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1426 else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1428 else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1431 rvu_detach_block(rvu, pcifunc, block->type);
1434 mutex_unlock(&rvu->rsrc_lock);
1438 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1439 struct rsrc_detach *detach,
1440 struct msg_rsp *rsp)
1442 return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1445 int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1447 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1448 int blkaddr = BLKADDR_NIX0, vf;
1449 struct rvu_pfvf *pf;
1451 pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1453 /* All CGX mapped PFs are set with assigned NIX block during init */
1454 if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1455 blkaddr = pf->nix_blkaddr;
1456 } else if (is_afvf(pcifunc)) {
1458 /* Assign NIX based on VF number. All even numbered VFs get
1459 * NIX0 and odd numbered gets NIX1
1461 blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1462 /* NIX1 is not present on all silicons */
1463 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1464 blkaddr = BLKADDR_NIX0;
1467 /* if SDP1 then the blkaddr is NIX1 */
1468 if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
1469 blkaddr = BLKADDR_NIX1;
1473 pfvf->nix_blkaddr = BLKADDR_NIX1;
1474 pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1475 pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1479 pfvf->nix_blkaddr = BLKADDR_NIX0;
1480 pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1481 pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1485 return pfvf->nix_blkaddr;
1488 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1489 u16 pcifunc, struct rsrc_attach *attach)
1495 blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1498 if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1499 return rvu_get_blkaddr(rvu, blktype, 0);
1500 blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1502 if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1506 return rvu_get_blkaddr(rvu, blktype, 0);
1509 if (is_block_implemented(rvu->hw, blkaddr))
1515 static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1516 int num_lfs, struct rsrc_attach *attach)
1518 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1519 struct rvu_hwinfo *hw = rvu->hw;
1520 struct rvu_block *block;
1528 blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1532 block = &hw->block[blkaddr];
1533 if (!block->lf.bmap)
1536 for (slot = 0; slot < num_lfs; slot++) {
1537 /* Allocate the resource */
1538 lf = rvu_alloc_rsrc(&block->lf);
1542 cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1543 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1544 (lf << block->lfshift), cfg);
1545 rvu_update_rsrc_map(rvu, pfvf, block,
1548 /* Set start MSIX vector for this LF within this PF/VF */
1549 rvu_set_msix_offset(rvu, pfvf, block, lf);
1553 static int rvu_check_rsrc_availability(struct rvu *rvu,
1554 struct rsrc_attach *req, u16 pcifunc)
1556 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1557 int free_lfs, mappedlfs, blkaddr;
1558 struct rvu_hwinfo *hw = rvu->hw;
1559 struct rvu_block *block;
1561 /* Only one NPA LF can be attached */
1562 if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1563 block = &hw->block[BLKADDR_NPA];
1564 free_lfs = rvu_rsrc_free_count(&block->lf);
1567 } else if (req->npalf) {
1568 dev_err(&rvu->pdev->dev,
1569 "Func 0x%x: Invalid req, already has NPA\n",
1574 /* Only one NIX LF can be attached */
1575 if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1576 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1580 block = &hw->block[blkaddr];
1581 free_lfs = rvu_rsrc_free_count(&block->lf);
1584 } else if (req->nixlf) {
1585 dev_err(&rvu->pdev->dev,
1586 "Func 0x%x: Invalid req, already has NIX\n",
1592 block = &hw->block[BLKADDR_SSO];
1593 /* Is request within limits ? */
1594 if (req->sso > block->lf.max) {
1595 dev_err(&rvu->pdev->dev,
1596 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1597 pcifunc, req->sso, block->lf.max);
1600 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1601 free_lfs = rvu_rsrc_free_count(&block->lf);
1602 /* Check if additional resources are available */
1603 if (req->sso > mappedlfs &&
1604 ((req->sso - mappedlfs) > free_lfs))
1609 block = &hw->block[BLKADDR_SSOW];
1610 if (req->ssow > block->lf.max) {
1611 dev_err(&rvu->pdev->dev,
1612 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1613 pcifunc, req->sso, block->lf.max);
1616 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1617 free_lfs = rvu_rsrc_free_count(&block->lf);
1618 if (req->ssow > mappedlfs &&
1619 ((req->ssow - mappedlfs) > free_lfs))
1624 block = &hw->block[BLKADDR_TIM];
1625 if (req->timlfs > block->lf.max) {
1626 dev_err(&rvu->pdev->dev,
1627 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1628 pcifunc, req->timlfs, block->lf.max);
1631 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1632 free_lfs = rvu_rsrc_free_count(&block->lf);
1633 if (req->timlfs > mappedlfs &&
1634 ((req->timlfs - mappedlfs) > free_lfs))
1639 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1643 block = &hw->block[blkaddr];
1644 if (req->cptlfs > block->lf.max) {
1645 dev_err(&rvu->pdev->dev,
1646 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1647 pcifunc, req->cptlfs, block->lf.max);
1650 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1651 free_lfs = rvu_rsrc_free_count(&block->lf);
1652 if (req->cptlfs > mappedlfs &&
1653 ((req->cptlfs - mappedlfs) > free_lfs))
1660 dev_info(rvu->dev, "Request for %s failed\n", block->name);
1664 static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1665 struct rsrc_attach *attach)
1667 int blkaddr, num_lfs;
1669 blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1670 attach->hdr.pcifunc, attach);
1674 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1676 /* Requester already has LFs from given block ? */
1680 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1681 struct rsrc_attach *attach,
1682 struct msg_rsp *rsp)
1684 u16 pcifunc = attach->hdr.pcifunc;
1687 /* If first request, detach all existing attached resources */
1688 if (!attach->modify)
1689 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1691 mutex_lock(&rvu->rsrc_lock);
1693 /* Check if the request can be accommodated */
1694 err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1698 /* Now attach the requested resources */
1700 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1703 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1706 /* RVU func doesn't know which exact LF or slot is attached
1707 * to it, it always sees as slot 0,1,2. So for a 'modify'
1708 * request, simply detach all existing attached LFs/slots
1709 * and attach a fresh.
1712 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1713 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1714 attach->sso, attach);
1719 rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1720 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1721 attach->ssow, attach);
1724 if (attach->timlfs) {
1726 rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1727 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1728 attach->timlfs, attach);
1731 if (attach->cptlfs) {
1732 if (attach->modify &&
1733 rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1734 rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1735 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1736 attach->cptlfs, attach);
1740 mutex_unlock(&rvu->rsrc_lock);
1744 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1745 int blkaddr, int lf)
1750 return MSIX_VECTOR_INVALID;
1752 for (vec = 0; vec < pfvf->msix.max; vec++) {
1753 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1756 return MSIX_VECTOR_INVALID;
1759 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1760 struct rvu_block *block, int lf)
1762 u16 nvecs, vec, offset;
1765 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1766 (lf << block->lfshift));
1767 nvecs = (cfg >> 12) & 0xFF;
1769 /* Check and alloc MSIX vectors, must be contiguous */
1770 if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1773 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1775 /* Config MSIX offset in LF */
1776 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1777 (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1779 /* Update the bitmap as well */
1780 for (vec = 0; vec < nvecs; vec++)
1781 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1784 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1785 struct rvu_block *block, int lf)
1787 u16 nvecs, vec, offset;
1790 cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1791 (lf << block->lfshift));
1792 nvecs = (cfg >> 12) & 0xFF;
1794 /* Clear MSIX offset in LF */
1795 rvu_write64(rvu, block->addr, block->msixcfg_reg |
1796 (lf << block->lfshift), cfg & ~0x7FFULL);
1798 offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1800 /* Update the mapping */
1801 for (vec = 0; vec < nvecs; vec++)
1802 pfvf->msix_lfmap[offset + vec] = 0;
1804 /* Free the same in MSIX bitmap */
1805 rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1808 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1809 struct msix_offset_rsp *rsp)
1811 struct rvu_hwinfo *hw = rvu->hw;
1812 u16 pcifunc = req->hdr.pcifunc;
1813 struct rvu_pfvf *pfvf;
1814 int lf, slot, blkaddr;
1816 pfvf = rvu_get_pfvf(rvu, pcifunc);
1817 if (!pfvf->msix.bmap)
1820 /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1821 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1822 rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1824 /* Get BLKADDR from which LFs are attached to pcifunc */
1825 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1827 rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1829 lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1830 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1833 rsp->sso = pfvf->sso;
1834 for (slot = 0; slot < rsp->sso; slot++) {
1835 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1836 rsp->sso_msixoff[slot] =
1837 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1840 rsp->ssow = pfvf->ssow;
1841 for (slot = 0; slot < rsp->ssow; slot++) {
1842 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1843 rsp->ssow_msixoff[slot] =
1844 rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1847 rsp->timlfs = pfvf->timlfs;
1848 for (slot = 0; slot < rsp->timlfs; slot++) {
1849 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1850 rsp->timlf_msixoff[slot] =
1851 rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1854 rsp->cptlfs = pfvf->cptlfs;
1855 for (slot = 0; slot < rsp->cptlfs; slot++) {
1856 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1857 rsp->cptlf_msixoff[slot] =
1858 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1861 rsp->cpt1_lfs = pfvf->cpt1_lfs;
1862 for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1863 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1864 rsp->cpt1_lf_msixoff[slot] =
1865 rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1871 int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
1872 struct free_rsrcs_rsp *rsp)
1874 struct rvu_hwinfo *hw = rvu->hw;
1875 struct rvu_block *block;
1876 struct nix_txsch *txsch;
1877 struct nix_hw *nix_hw;
1879 mutex_lock(&rvu->rsrc_lock);
1881 block = &hw->block[BLKADDR_NPA];
1882 rsp->npa = rvu_rsrc_free_count(&block->lf);
1884 block = &hw->block[BLKADDR_NIX0];
1885 rsp->nix = rvu_rsrc_free_count(&block->lf);
1887 block = &hw->block[BLKADDR_NIX1];
1888 rsp->nix1 = rvu_rsrc_free_count(&block->lf);
1890 block = &hw->block[BLKADDR_SSO];
1891 rsp->sso = rvu_rsrc_free_count(&block->lf);
1893 block = &hw->block[BLKADDR_SSOW];
1894 rsp->ssow = rvu_rsrc_free_count(&block->lf);
1896 block = &hw->block[BLKADDR_TIM];
1897 rsp->tim = rvu_rsrc_free_count(&block->lf);
1899 block = &hw->block[BLKADDR_CPT0];
1900 rsp->cpt = rvu_rsrc_free_count(&block->lf);
1902 block = &hw->block[BLKADDR_CPT1];
1903 rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
1905 if (rvu->hw->cap.nix_fixed_txschq_mapping) {
1906 rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
1907 rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
1908 rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
1909 rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
1911 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1913 rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
1914 rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
1915 rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
1916 rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
1918 nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
1919 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1920 rsp->schq[NIX_TXSCH_LVL_SMQ] =
1921 rvu_rsrc_free_count(&txsch->schq);
1923 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1924 rsp->schq[NIX_TXSCH_LVL_TL4] =
1925 rvu_rsrc_free_count(&txsch->schq);
1927 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1928 rsp->schq[NIX_TXSCH_LVL_TL3] =
1929 rvu_rsrc_free_count(&txsch->schq);
1931 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1932 rsp->schq[NIX_TXSCH_LVL_TL2] =
1933 rvu_rsrc_free_count(&txsch->schq);
1935 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1938 nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
1939 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1940 rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
1941 rvu_rsrc_free_count(&txsch->schq);
1943 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1944 rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
1945 rvu_rsrc_free_count(&txsch->schq);
1947 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1948 rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
1949 rvu_rsrc_free_count(&txsch->schq);
1951 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1952 rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
1953 rvu_rsrc_free_count(&txsch->schq);
1956 rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
1958 rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
1959 mutex_unlock(&rvu->rsrc_lock);
1964 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1965 struct msg_rsp *rsp)
1967 u16 pcifunc = req->hdr.pcifunc;
1971 vf = pcifunc & RVU_PFVF_FUNC_MASK;
1972 cfg = rvu_read64(rvu, BLKADDR_RVUM,
1973 RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1974 numvfs = (cfg >> 12) & 0xFF;
1976 if (vf && vf <= numvfs)
1977 __rvu_flr_handler(rvu, pcifunc);
1979 return RVU_INVALID_VF_ID;
1984 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1985 struct get_hw_cap_rsp *rsp)
1987 struct rvu_hwinfo *hw = rvu->hw;
1989 rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1990 rsp->nix_shaping = hw->cap.nix_shaping;
1995 int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
1996 struct msg_rsp *rsp)
1998 struct rvu_hwinfo *hw = rvu->hw;
1999 u16 pcifunc = req->hdr.pcifunc;
2000 struct rvu_pfvf *pfvf;
2004 /* Only PF can add VF permissions */
2005 if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
2008 target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
2009 pfvf = rvu_get_pfvf(rvu, target);
2011 if (req->flags & RESET_VF_PERM) {
2012 pfvf->flags &= RVU_CLEAR_VF_PERM;
2013 } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
2014 (req->flags & VF_TRUSTED)) {
2015 change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
2016 /* disable multicast and promisc entries */
2017 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
2018 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
2021 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2025 npc_enadis_default_mce_entry(rvu, target, nixlf,
2026 NIXLF_ALLMULTI_ENTRY,
2028 npc_enadis_default_mce_entry(rvu, target, nixlf,
2029 NIXLF_PROMISC_ENTRY,
2037 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
2038 struct mbox_msghdr *req)
2040 struct rvu *rvu = pci_get_drvdata(mbox->pdev);
2042 /* Check if valid, if not reply with a invalid msg */
2043 if (req->sig != OTX2_MBOX_REQ_SIG)
2047 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
2049 struct _rsp_type *rsp; \
2052 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
2054 sizeof(struct _rsp_type)); \
2055 /* some handlers should complete even if reply */ \
2056 /* could not be allocated */ \
2058 _id != MBOX_MSG_DETACH_RESOURCES && \
2059 _id != MBOX_MSG_NIX_TXSCH_FREE && \
2060 _id != MBOX_MSG_VF_FLR) \
2063 rsp->hdr.id = _id; \
2064 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
2065 rsp->hdr.pcifunc = req->pcifunc; \
2069 err = rvu_mbox_handler_ ## _fn_name(rvu, \
2070 (struct _req_type *)req, \
2073 rsp->hdr.rc = err; \
2075 trace_otx2_msg_process(mbox->pdev, _id, err); \
2076 return rsp ? err : -ENOMEM; \
2083 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
2088 static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
2090 struct rvu *rvu = mwork->rvu;
2091 int offset, err, id, devid;
2092 struct otx2_mbox_dev *mdev;
2093 struct mbox_hdr *req_hdr;
2094 struct mbox_msghdr *msg;
2095 struct mbox_wq_info *mw;
2096 struct otx2_mbox *mbox;
2100 mw = &rvu->afpf_wq_info;
2103 mw = &rvu->afvf_wq_info;
2109 devid = mwork - mw->mbox_wrk;
2111 mdev = &mbox->dev[devid];
2113 /* Process received mbox messages */
2114 req_hdr = mdev->mbase + mbox->rx_start;
2115 if (mw->mbox_wrk[devid].num_msgs == 0)
2118 offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
2120 for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
2121 msg = mdev->mbase + offset;
2123 /* Set which PF/VF sent this message based on mbox IRQ */
2127 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
2128 msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
2132 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
2133 msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
2137 err = rvu_process_mbox_msg(mbox, devid, msg);
2139 offset = mbox->rx_start + msg->next_msgoff;
2143 if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
2144 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
2145 err, otx2_mbox_id2name(msg->id),
2146 msg->id, rvu_get_pf(msg->pcifunc),
2147 (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2149 dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
2150 err, otx2_mbox_id2name(msg->id),
2153 mw->mbox_wrk[devid].num_msgs = 0;
2155 /* Send mbox responses to VF/PF */
2156 otx2_mbox_msg_send(mbox, devid);
2159 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
2161 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2163 __rvu_mbox_handler(mwork, TYPE_AFPF);
2166 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
2168 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2170 __rvu_mbox_handler(mwork, TYPE_AFVF);
2173 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
2175 struct rvu *rvu = mwork->rvu;
2176 struct otx2_mbox_dev *mdev;
2177 struct mbox_hdr *rsp_hdr;
2178 struct mbox_msghdr *msg;
2179 struct mbox_wq_info *mw;
2180 struct otx2_mbox *mbox;
2181 int offset, id, devid;
2185 mw = &rvu->afpf_wq_info;
2188 mw = &rvu->afvf_wq_info;
2194 devid = mwork - mw->mbox_wrk_up;
2195 mbox = &mw->mbox_up;
2196 mdev = &mbox->dev[devid];
2198 rsp_hdr = mdev->mbase + mbox->rx_start;
2199 if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
2200 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
2204 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
2206 for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
2207 msg = mdev->mbase + offset;
2209 if (msg->id >= MBOX_MSG_MAX) {
2211 "Mbox msg with unknown ID 0x%x\n", msg->id);
2215 if (msg->sig != OTX2_MBOX_RSP_SIG) {
2217 "Mbox msg with wrong signature %x, ID 0x%x\n",
2223 case MBOX_MSG_CGX_LINK_EVENT:
2228 "Mbox msg response has err %d, ID 0x%x\n",
2233 offset = mbox->rx_start + msg->next_msgoff;
2236 mw->mbox_wrk_up[devid].up_num_msgs = 0;
2238 otx2_mbox_reset(mbox, devid);
2241 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2243 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2245 __rvu_mbox_up_handler(mwork, TYPE_AFPF);
2248 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2250 struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2252 __rvu_mbox_up_handler(mwork, TYPE_AFVF);
2255 static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
2258 struct rvu_hwinfo *hw = rvu->hw;
2262 /* For cn10k platform VF mailbox regions of a PF follows after the
2263 * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2264 * RVU_PF_VF_BAR4_ADDR register.
2266 if (type == TYPE_AFVF) {
2267 for (region = 0; region < num; region++) {
2268 if (hw->cap.per_pf_mbox_regs) {
2269 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2270 RVU_AF_PFX_BAR4_ADDR(0)) +
2272 bar4 += region * MBOX_SIZE;
2274 bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2275 bar4 += region * MBOX_SIZE;
2277 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2278 if (!mbox_addr[region])
2284 /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2285 * PF registers. Whereas for Octeontx2 it is read from
2286 * RVU_AF_PF_BAR4_ADDR register.
2288 for (region = 0; region < num; region++) {
2289 if (hw->cap.per_pf_mbox_regs) {
2290 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2291 RVU_AF_PFX_BAR4_ADDR(region));
2293 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2294 RVU_AF_PF_BAR4_ADDR);
2295 bar4 += region * MBOX_SIZE;
2297 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2298 if (!mbox_addr[region])
2305 iounmap((void __iomem *)mbox_addr[region]);
2309 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2311 void (mbox_handler)(struct work_struct *),
2312 void (mbox_up_handler)(struct work_struct *))
2314 int err = -EINVAL, i, dir, dir_up;
2315 void __iomem *reg_base;
2316 struct rvu_work *mwork;
2317 void **mbox_regions;
2320 mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
2326 name = "rvu_afpf_mailbox";
2327 dir = MBOX_DIR_AFPF;
2328 dir_up = MBOX_DIR_AFPF_UP;
2329 reg_base = rvu->afreg_base;
2330 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
2335 name = "rvu_afvf_mailbox";
2336 dir = MBOX_DIR_PFVF;
2337 dir_up = MBOX_DIR_PFVF_UP;
2338 reg_base = rvu->pfreg_base;
2339 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
2347 mw->mbox_wq = alloc_workqueue(name,
2348 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2355 mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2356 sizeof(struct rvu_work), GFP_KERNEL);
2357 if (!mw->mbox_wrk) {
2362 mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2363 sizeof(struct rvu_work), GFP_KERNEL);
2364 if (!mw->mbox_wrk_up) {
2369 err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2370 reg_base, dir, num);
2374 err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2375 reg_base, dir_up, num);
2379 for (i = 0; i < num; i++) {
2380 mwork = &mw->mbox_wrk[i];
2382 INIT_WORK(&mwork->work, mbox_handler);
2384 mwork = &mw->mbox_wrk_up[i];
2386 INIT_WORK(&mwork->work, mbox_up_handler);
2388 kfree(mbox_regions);
2392 destroy_workqueue(mw->mbox_wq);
2395 iounmap((void __iomem *)mbox_regions[num]);
2397 kfree(mbox_regions);
2401 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2403 struct otx2_mbox *mbox = &mw->mbox;
2404 struct otx2_mbox_dev *mdev;
2408 destroy_workqueue(mw->mbox_wq);
2412 for (devid = 0; devid < mbox->ndevs; devid++) {
2413 mdev = &mbox->dev[devid];
2415 iounmap((void __iomem *)mdev->hwbase);
2418 otx2_mbox_destroy(&mw->mbox);
2419 otx2_mbox_destroy(&mw->mbox_up);
2422 static void rvu_queue_work(struct mbox_wq_info *mw, int first,
2423 int mdevs, u64 intr)
2425 struct otx2_mbox_dev *mdev;
2426 struct otx2_mbox *mbox;
2427 struct mbox_hdr *hdr;
2430 for (i = first; i < mdevs; i++) {
2432 if (!(intr & BIT_ULL(i - first)))
2436 mdev = &mbox->dev[i];
2437 hdr = mdev->mbase + mbox->rx_start;
2439 /*The hdr->num_msgs is set to zero immediately in the interrupt
2440 * handler to ensure that it holds a correct value next time
2441 * when the interrupt handler is called.
2442 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2443 * pf>mbox.up_num_msgs holds the data for use in
2444 * pfaf_mbox_up_handler.
2447 if (hdr->num_msgs) {
2448 mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2450 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2452 mbox = &mw->mbox_up;
2453 mdev = &mbox->dev[i];
2454 hdr = mdev->mbase + mbox->rx_start;
2455 if (hdr->num_msgs) {
2456 mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2458 queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2463 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2465 struct rvu *rvu = (struct rvu *)rvu_irq;
2469 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2470 /* Clear interrupts */
2471 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2473 trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2475 /* Sync with mbox memory region */
2478 rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2480 /* Handle VF interrupts */
2482 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2483 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2485 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2489 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2490 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2492 trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2494 rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2499 static void rvu_enable_mbox_intr(struct rvu *rvu)
2501 struct rvu_hwinfo *hw = rvu->hw;
2503 /* Clear spurious irqs, if any */
2504 rvu_write64(rvu, BLKADDR_RVUM,
2505 RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2507 /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2508 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2509 INTR_MASK(hw->total_pfs) & ~1ULL);
2512 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2514 struct rvu_block *block;
2515 int slot, lf, num_lfs;
2518 block = &rvu->hw->block[blkaddr];
2519 num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2523 for (slot = 0; slot < num_lfs; slot++) {
2524 lf = rvu_get_lf(rvu, block, pcifunc, slot);
2528 /* Cleanup LF and reset it */
2529 if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2530 rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2531 else if (block->addr == BLKADDR_NPA)
2532 rvu_npa_lf_teardown(rvu, pcifunc, lf);
2533 else if ((block->addr == BLKADDR_CPT0) ||
2534 (block->addr == BLKADDR_CPT1))
2535 rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
2538 err = rvu_lf_reset(rvu, block, lf);
2540 dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2546 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2548 mutex_lock(&rvu->flr_lock);
2549 /* Reset order should reflect inter-block dependencies:
2550 * 1. Reset any packet/work sources (NIX, CPT, TIM)
2551 * 2. Flush and reset SSO/SSOW
2552 * 3. Cleanup pools (NPA)
2554 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2555 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2556 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2557 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2558 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2559 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2560 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2561 rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2562 rvu_reset_lmt_map_tbl(rvu, pcifunc);
2563 rvu_detach_rsrcs(rvu, NULL, pcifunc);
2564 mutex_unlock(&rvu->flr_lock);
2567 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2571 /* pcifunc = 0(PF0) | (vf + 1) */
2572 __rvu_flr_handler(rvu, vf + 1);
2579 /* Signal FLR finish and enable IRQ */
2580 rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2581 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2584 static void rvu_flr_handler(struct work_struct *work)
2586 struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2587 struct rvu *rvu = flrwork->rvu;
2588 u16 pcifunc, numvfs, vf;
2592 pf = flrwork - rvu->flr_wrk;
2593 if (pf >= rvu->hw->total_pfs) {
2594 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2598 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2599 numvfs = (cfg >> 12) & 0xFF;
2600 pcifunc = pf << RVU_PFVF_PF_SHIFT;
2602 for (vf = 0; vf < numvfs; vf++)
2603 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2605 __rvu_flr_handler(rvu, pcifunc);
2607 /* Signal FLR finish */
2608 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2610 /* Enable interrupt */
2611 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
2614 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2616 int dev, vf, reg = 0;
2622 intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2626 for (vf = 0; vf < numvfs; vf++) {
2627 if (!(intr & BIT_ULL(vf)))
2629 /* Clear and disable the interrupt */
2630 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2631 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2633 dev = vf + start_vf + rvu->hw->total_pfs;
2634 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2638 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2640 struct rvu *rvu = (struct rvu *)rvu_irq;
2644 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2648 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2649 if (intr & (1ULL << pf)) {
2650 /* clear interrupt */
2651 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2653 /* Disable the interrupt */
2654 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2656 /* PF is already dead do only AF related operations */
2657 queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2662 rvu_afvf_queue_flr_work(rvu, 0, 64);
2664 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2669 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2673 /* Nothing to be done here other than clearing the
2676 for (vf = 0; vf < 64; vf++) {
2677 if (intr & (1ULL << vf)) {
2678 /* clear the trpend due to ME(master enable) */
2679 rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2680 /* clear interrupt */
2681 rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2686 /* Handles ME interrupts from VFs of AF */
2687 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2689 struct rvu *rvu = (struct rvu *)rvu_irq;
2693 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2695 for (vfset = 0; vfset <= 1; vfset++) {
2696 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2698 rvu_me_handle_vfset(rvu, vfset, intr);
2704 /* Handles ME interrupts from PFs */
2705 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2707 struct rvu *rvu = (struct rvu *)rvu_irq;
2711 intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2713 /* Nothing to be done here other than clearing the
2716 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2717 if (intr & (1ULL << pf)) {
2718 /* clear the trpend due to ME(master enable) */
2719 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2721 /* clear interrupt */
2722 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2730 static void rvu_unregister_interrupts(struct rvu *rvu)
2734 rvu_cpt_unregister_interrupts(rvu);
2736 /* Disable the Mbox interrupt */
2737 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2738 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2740 /* Disable the PF FLR interrupt */
2741 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2742 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2744 /* Disable the PF ME interrupt */
2745 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2746 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2748 for (irq = 0; irq < rvu->num_vec; irq++) {
2749 if (rvu->irq_allocated[irq]) {
2750 free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2751 rvu->irq_allocated[irq] = false;
2755 pci_free_irq_vectors(rvu->pdev);
2759 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2761 struct rvu_pfvf *pfvf = &rvu->pf[0];
2765 offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2767 /* Make sure there are enough MSIX vectors configured so that
2768 * VF interrupts can be handled. Offset equal to zero means
2769 * that PF vectors are not configured and overlapping AF vectors.
2771 return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2775 static int rvu_register_interrupts(struct rvu *rvu)
2777 int ret, offset, pf_vec_start;
2779 rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2781 rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2782 NAME_SIZE, GFP_KERNEL);
2786 rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2787 sizeof(bool), GFP_KERNEL);
2788 if (!rvu->irq_allocated)
2792 ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2793 rvu->num_vec, PCI_IRQ_MSIX);
2796 "RVUAF: Request for %d msix vectors failed, ret %d\n",
2801 /* Register mailbox interrupt handler */
2802 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2803 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2804 rvu_mbox_intr_handler, 0,
2805 &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2808 "RVUAF: IRQ registration failed for mbox irq\n");
2812 rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2814 /* Enable mailbox interrupts from all PFs */
2815 rvu_enable_mbox_intr(rvu);
2817 /* Register FLR interrupt handler */
2818 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2820 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2821 rvu_flr_intr_handler, 0,
2822 &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2826 "RVUAF: IRQ registration failed for FLR\n");
2829 rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2831 /* Enable FLR interrupt for all PFs*/
2832 rvu_write64(rvu, BLKADDR_RVUM,
2833 RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2835 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2836 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2838 /* Register ME interrupt handler */
2839 sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2841 ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2842 rvu_me_pf_intr_handler, 0,
2843 &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2847 "RVUAF: IRQ registration failed for ME\n");
2849 rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2851 /* Clear TRPEND bit for all PF */
2852 rvu_write64(rvu, BLKADDR_RVUM,
2853 RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2854 /* Enable ME interrupt for all PFs*/
2855 rvu_write64(rvu, BLKADDR_RVUM,
2856 RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2858 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2859 INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2861 if (!rvu_afvf_msix_vectors_num_ok(rvu))
2864 /* Get PF MSIX vectors offset. */
2865 pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2866 RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2868 /* Register MBOX0 interrupt. */
2869 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2870 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2871 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2872 rvu_mbox_intr_handler, 0,
2873 &rvu->irq_name[offset * NAME_SIZE],
2877 "RVUAF: IRQ registration failed for Mbox0\n");
2879 rvu->irq_allocated[offset] = true;
2881 /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2882 * simply increment current offset by 1.
2884 offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2885 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2886 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2887 rvu_mbox_intr_handler, 0,
2888 &rvu->irq_name[offset * NAME_SIZE],
2892 "RVUAF: IRQ registration failed for Mbox1\n");
2894 rvu->irq_allocated[offset] = true;
2896 /* Register FLR interrupt handler for AF's VFs */
2897 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2898 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2899 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2900 rvu_flr_intr_handler, 0,
2901 &rvu->irq_name[offset * NAME_SIZE], rvu);
2904 "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2907 rvu->irq_allocated[offset] = true;
2909 offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2910 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2911 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2912 rvu_flr_intr_handler, 0,
2913 &rvu->irq_name[offset * NAME_SIZE], rvu);
2916 "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2919 rvu->irq_allocated[offset] = true;
2921 /* Register ME interrupt handler for AF's VFs */
2922 offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2923 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2924 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2925 rvu_me_vf_intr_handler, 0,
2926 &rvu->irq_name[offset * NAME_SIZE], rvu);
2929 "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2932 rvu->irq_allocated[offset] = true;
2934 offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2935 sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2936 ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2937 rvu_me_vf_intr_handler, 0,
2938 &rvu->irq_name[offset * NAME_SIZE], rvu);
2941 "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2944 rvu->irq_allocated[offset] = true;
2946 ret = rvu_cpt_register_interrupts(rvu);
2953 rvu_unregister_interrupts(rvu);
2957 static void rvu_flr_wq_destroy(struct rvu *rvu)
2960 destroy_workqueue(rvu->flr_wq);
2965 static int rvu_flr_init(struct rvu *rvu)
2971 /* Enable FLR for all PFs*/
2972 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2973 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2974 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2978 rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2979 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2984 num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2985 rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2986 sizeof(struct rvu_work), GFP_KERNEL);
2987 if (!rvu->flr_wrk) {
2988 destroy_workqueue(rvu->flr_wq);
2992 for (dev = 0; dev < num_devs; dev++) {
2993 rvu->flr_wrk[dev].rvu = rvu;
2994 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
2997 mutex_init(&rvu->flr_lock);
3002 static void rvu_disable_afvf_intr(struct rvu *rvu)
3006 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
3007 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
3008 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
3012 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
3013 INTR_MASK(vfs - 64));
3014 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3015 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
3018 static void rvu_enable_afvf_intr(struct rvu *rvu)
3022 /* Clear any pending interrupts and enable AF VF interrupts for
3026 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
3027 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
3030 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
3031 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
3032 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
3034 /* Same for remaining VFs, if any. */
3038 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
3039 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
3040 INTR_MASK(vfs - 64));
3042 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
3043 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3044 rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
3047 int rvu_get_num_lbk_chans(void)
3049 struct pci_dev *pdev;
3053 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
3058 base = pci_ioremap_bar(pdev, 0);
3062 /* Read number of available LBK channels from LBK(0)_CONST register. */
3063 ret = (readq(base + 0x10) >> 32) & 0xffff;
3071 static int rvu_enable_sriov(struct rvu *rvu)
3073 struct pci_dev *pdev = rvu->pdev;
3074 int err, chans, vfs;
3076 if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
3077 dev_warn(&pdev->dev,
3078 "Skipping SRIOV enablement since not enough IRQs are available\n");
3082 chans = rvu_get_num_lbk_chans();
3086 vfs = pci_sriov_get_totalvfs(pdev);
3088 /* Limit VFs in case we have more VFs than LBK channels available. */
3095 /* LBK channel number 63 is used for switching packets between
3096 * CGX mapped VFs. Hence limit LBK pairs till 62 only.
3101 /* Save VFs number for reference in VF interrupts handlers.
3102 * Since interrupts might start arriving during SRIOV enablement
3103 * ordinary API cannot be used to get number of enabled VFs.
3107 err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
3108 rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
3112 rvu_enable_afvf_intr(rvu);
3113 /* Make sure IRQs are enabled before SRIOV. */
3116 err = pci_enable_sriov(pdev, vfs);
3118 rvu_disable_afvf_intr(rvu);
3119 rvu_mbox_destroy(&rvu->afvf_wq_info);
3126 static void rvu_disable_sriov(struct rvu *rvu)
3128 rvu_disable_afvf_intr(rvu);
3129 rvu_mbox_destroy(&rvu->afvf_wq_info);
3130 pci_disable_sriov(rvu->pdev);
3133 static void rvu_update_module_params(struct rvu *rvu)
3135 const char *default_pfl_name = "default";
3137 strscpy(rvu->mkex_pfl_name,
3138 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
3139 strscpy(rvu->kpu_pfl_name,
3140 kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
3143 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3145 struct device *dev = &pdev->dev;
3149 rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
3153 rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
3155 devm_kfree(dev, rvu);
3159 pci_set_drvdata(pdev, rvu);
3161 rvu->dev = &pdev->dev;
3163 err = pci_enable_device(pdev);
3165 dev_err(dev, "Failed to enable PCI device\n");
3169 err = pci_request_regions(pdev, DRV_NAME);
3171 dev_err(dev, "PCI request regions failed 0x%x\n", err);
3172 goto err_disable_device;
3175 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
3177 dev_err(dev, "DMA mask config failed, abort\n");
3178 goto err_release_regions;
3181 pci_set_master(pdev);
3183 rvu->ptp = ptp_get();
3184 if (IS_ERR(rvu->ptp)) {
3185 err = PTR_ERR(rvu->ptp);
3186 if (err == -EPROBE_DEFER)
3187 goto err_release_regions;
3191 /* Map Admin function CSRs */
3192 rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
3193 rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
3194 if (!rvu->afreg_base || !rvu->pfreg_base) {
3195 dev_err(dev, "Unable to map admin function CSRs, aborting\n");
3200 /* Store module params in rvu structure */
3201 rvu_update_module_params(rvu);
3203 /* Check which blocks the HW supports */
3204 rvu_check_block_implemented(rvu);
3206 rvu_reset_all_blocks(rvu);
3208 rvu_setup_hw_capabilities(rvu);
3210 err = rvu_setup_hw_resources(rvu);
3214 /* Init mailbox btw AF and PFs */
3215 err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
3216 rvu->hw->total_pfs, rvu_afpf_mbox_handler,
3217 rvu_afpf_mbox_up_handler);
3219 dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
3223 err = rvu_flr_init(rvu);
3225 dev_err(dev, "%s: Failed to initialize flr\n", __func__);
3229 err = rvu_register_interrupts(rvu);
3231 dev_err(dev, "%s: Failed to register interrupts\n", __func__);
3235 err = rvu_register_dl(rvu);
3237 dev_err(dev, "%s: Failed to register devlink\n", __func__);
3241 rvu_setup_rvum_blk_revid(rvu);
3243 /* Enable AF's VFs (if any) */
3244 err = rvu_enable_sriov(rvu);
3246 dev_err(dev, "%s: Failed to enable sriov\n", __func__);
3250 /* Initialize debugfs */
3253 mutex_init(&rvu->rswitch.switch_lock);
3256 ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
3257 rvu->fwdata->ptp_ext_tstamp);
3261 rvu_unregister_dl(rvu);
3263 rvu_unregister_interrupts(rvu);
3265 rvu_flr_wq_destroy(rvu);
3267 rvu_mbox_destroy(&rvu->afpf_wq_info);
3270 rvu_fwdata_exit(rvu);
3271 rvu_reset_all_blocks(rvu);
3272 rvu_free_hw_resources(rvu);
3273 rvu_clear_rvum_blk_revid(rvu);
3276 err_release_regions:
3277 pci_release_regions(pdev);
3279 pci_disable_device(pdev);
3281 pci_set_drvdata(pdev, NULL);
3282 devm_kfree(&pdev->dev, rvu->hw);
3283 devm_kfree(dev, rvu);
3287 static void rvu_remove(struct pci_dev *pdev)
3289 struct rvu *rvu = pci_get_drvdata(pdev);
3292 rvu_unregister_dl(rvu);
3293 rvu_unregister_interrupts(rvu);
3294 rvu_flr_wq_destroy(rvu);
3296 rvu_fwdata_exit(rvu);
3297 rvu_mbox_destroy(&rvu->afpf_wq_info);
3298 rvu_disable_sriov(rvu);
3299 rvu_reset_all_blocks(rvu);
3300 rvu_free_hw_resources(rvu);
3301 rvu_clear_rvum_blk_revid(rvu);
3303 pci_release_regions(pdev);
3304 pci_disable_device(pdev);
3305 pci_set_drvdata(pdev, NULL);
3307 devm_kfree(&pdev->dev, rvu->hw);
3308 devm_kfree(&pdev->dev, rvu);
3311 static struct pci_driver rvu_driver = {
3313 .id_table = rvu_id_table,
3315 .remove = rvu_remove,
3318 static int __init rvu_init_module(void)
3322 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3324 err = pci_register_driver(&cgx_driver);
3328 err = pci_register_driver(&ptp_driver);
3332 err = pci_register_driver(&rvu_driver);
3338 pci_unregister_driver(&ptp_driver);
3340 pci_unregister_driver(&cgx_driver);
3345 static void __exit rvu_cleanup_module(void)
3347 pci_unregister_driver(&rvu_driver);
3348 pci_unregister_driver(&ptp_driver);
3349 pci_unregister_driver(&cgx_driver);
3352 module_init(rvu_init_module);
3353 module_exit(rvu_cleanup_module);