Merge branch 'misc.namei' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/delay.h>
11 #include <linux/irq.h>
12 #include <linux/pci.h>
13 #include <linux/sysfs.h>
14
15 #include "cgx.h"
16 #include "rvu.h"
17 #include "rvu_reg.h"
18 #include "ptp.h"
19
20 #include "rvu_trace.h"
21
22 #define DRV_NAME        "rvu_af"
23 #define DRV_STRING      "Marvell OcteonTX2 RVU Admin Function Driver"
24
25 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
26
27 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
28                                 struct rvu_block *block, int lf);
29 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
30                                   struct rvu_block *block, int lf);
31 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
32
33 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
34                          int type, int num,
35                          void (mbox_handler)(struct work_struct *),
36                          void (mbox_up_handler)(struct work_struct *));
37 enum {
38         TYPE_AFVF,
39         TYPE_AFPF,
40 };
41
42 /* Supported devices */
43 static const struct pci_device_id rvu_id_table[] = {
44         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
45         { 0, }  /* end of table */
46 };
47
48 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
49 MODULE_DESCRIPTION(DRV_STRING);
50 MODULE_LICENSE("GPL v2");
51 MODULE_DEVICE_TABLE(pci, rvu_id_table);
52
53 static char *mkex_profile; /* MKEX profile name */
54 module_param(mkex_profile, charp, 0000);
55 MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
56
57 static char *kpu_profile; /* KPU profile name */
58 module_param(kpu_profile, charp, 0000);
59 MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
60
61 static void rvu_setup_hw_capabilities(struct rvu *rvu)
62 {
63         struct rvu_hwinfo *hw = rvu->hw;
64
65         hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
66         hw->cap.nix_fixed_txschq_mapping = false;
67         hw->cap.nix_shaping = true;
68         hw->cap.nix_tx_link_bp = true;
69         hw->cap.nix_rx_multicast = true;
70         hw->cap.nix_shaper_toggle_wait = false;
71         hw->rvu = rvu;
72
73         if (is_rvu_pre_96xx_C0(rvu)) {
74                 hw->cap.nix_fixed_txschq_mapping = true;
75                 hw->cap.nix_txsch_per_cgx_lmac = 4;
76                 hw->cap.nix_txsch_per_lbk_lmac = 132;
77                 hw->cap.nix_txsch_per_sdp_lmac = 76;
78                 hw->cap.nix_shaping = false;
79                 hw->cap.nix_tx_link_bp = false;
80                 if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu))
81                         hw->cap.nix_rx_multicast = false;
82         }
83         if (!is_rvu_pre_96xx_C0(rvu))
84                 hw->cap.nix_shaper_toggle_wait = true;
85
86         if (!is_rvu_otx2(rvu))
87                 hw->cap.per_pf_mbox_regs = true;
88 }
89
90 /* Poll a RVU block's register 'offset', for a 'zero'
91  * or 'nonzero' at bits specified by 'mask'
92  */
93 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
94 {
95         unsigned long timeout = jiffies + usecs_to_jiffies(20000);
96         bool twice = false;
97         void __iomem *reg;
98         u64 reg_val;
99
100         reg = rvu->afreg_base + ((block << 28) | offset);
101 again:
102         reg_val = readq(reg);
103         if (zero && !(reg_val & mask))
104                 return 0;
105         if (!zero && (reg_val & mask))
106                 return 0;
107         if (time_before(jiffies, timeout)) {
108                 usleep_range(1, 5);
109                 goto again;
110         }
111         /* In scenarios where CPU is scheduled out before checking
112          * 'time_before' (above) and gets scheduled in such that
113          * jiffies are beyond timeout value, then check again if HW is
114          * done with the operation in the meantime.
115          */
116         if (!twice) {
117                 twice = true;
118                 goto again;
119         }
120         return -EBUSY;
121 }
122
123 int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
124 {
125         int id;
126
127         if (!rsrc->bmap)
128                 return -EINVAL;
129
130         id = find_first_zero_bit(rsrc->bmap, rsrc->max);
131         if (id >= rsrc->max)
132                 return -ENOSPC;
133
134         __set_bit(id, rsrc->bmap);
135
136         return id;
137 }
138
139 int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
140 {
141         int start;
142
143         if (!rsrc->bmap)
144                 return -EINVAL;
145
146         start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
147         if (start >= rsrc->max)
148                 return -ENOSPC;
149
150         bitmap_set(rsrc->bmap, start, nrsrc);
151         return start;
152 }
153
154 static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
155 {
156         if (!rsrc->bmap)
157                 return;
158         if (start >= rsrc->max)
159                 return;
160
161         bitmap_clear(rsrc->bmap, start, nrsrc);
162 }
163
164 bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
165 {
166         int start;
167
168         if (!rsrc->bmap)
169                 return false;
170
171         start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
172         if (start >= rsrc->max)
173                 return false;
174
175         return true;
176 }
177
178 void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
179 {
180         if (!rsrc->bmap)
181                 return;
182
183         __clear_bit(id, rsrc->bmap);
184 }
185
186 int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
187 {
188         int used;
189
190         if (!rsrc->bmap)
191                 return 0;
192
193         used = bitmap_weight(rsrc->bmap, rsrc->max);
194         return (rsrc->max - used);
195 }
196
197 bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
198 {
199         if (!rsrc->bmap)
200                 return false;
201
202         return !test_bit(id, rsrc->bmap);
203 }
204
205 int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
206 {
207         rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
208                              sizeof(long), GFP_KERNEL);
209         if (!rsrc->bmap)
210                 return -ENOMEM;
211         return 0;
212 }
213
214 void rvu_free_bitmap(struct rsrc_bmap *rsrc)
215 {
216         kfree(rsrc->bmap);
217 }
218
219 /* Get block LF's HW index from a PF_FUNC's block slot number */
220 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
221 {
222         u16 match = 0;
223         int lf;
224
225         mutex_lock(&rvu->rsrc_lock);
226         for (lf = 0; lf < block->lf.max; lf++) {
227                 if (block->fn_map[lf] == pcifunc) {
228                         if (slot == match) {
229                                 mutex_unlock(&rvu->rsrc_lock);
230                                 return lf;
231                         }
232                         match++;
233                 }
234         }
235         mutex_unlock(&rvu->rsrc_lock);
236         return -ENODEV;
237 }
238
239 /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
240  * Some silicon variants of OcteonTX2 supports
241  * multiple blocks of same type.
242  *
243  * @pcifunc has to be zero when no LF is yet attached.
244  *
245  * For a pcifunc if LFs are attached from multiple blocks of same type, then
246  * return blkaddr of first encountered block.
247  */
248 int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
249 {
250         int devnum, blkaddr = -ENODEV;
251         u64 cfg, reg;
252         bool is_pf;
253
254         switch (blktype) {
255         case BLKTYPE_NPC:
256                 blkaddr = BLKADDR_NPC;
257                 goto exit;
258         case BLKTYPE_NPA:
259                 blkaddr = BLKADDR_NPA;
260                 goto exit;
261         case BLKTYPE_NIX:
262                 /* For now assume NIX0 */
263                 if (!pcifunc) {
264                         blkaddr = BLKADDR_NIX0;
265                         goto exit;
266                 }
267                 break;
268         case BLKTYPE_SSO:
269                 blkaddr = BLKADDR_SSO;
270                 goto exit;
271         case BLKTYPE_SSOW:
272                 blkaddr = BLKADDR_SSOW;
273                 goto exit;
274         case BLKTYPE_TIM:
275                 blkaddr = BLKADDR_TIM;
276                 goto exit;
277         case BLKTYPE_CPT:
278                 /* For now assume CPT0 */
279                 if (!pcifunc) {
280                         blkaddr = BLKADDR_CPT0;
281                         goto exit;
282                 }
283                 break;
284         }
285
286         /* Check if this is a RVU PF or VF */
287         if (pcifunc & RVU_PFVF_FUNC_MASK) {
288                 is_pf = false;
289                 devnum = rvu_get_hwvf(rvu, pcifunc);
290         } else {
291                 is_pf = true;
292                 devnum = rvu_get_pf(pcifunc);
293         }
294
295         /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
296          * 'BLKADDR_NIX1'.
297          */
298         if (blktype == BLKTYPE_NIX) {
299                 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
300                         RVU_PRIV_HWVFX_NIXX_CFG(0);
301                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
302                 if (cfg) {
303                         blkaddr = BLKADDR_NIX0;
304                         goto exit;
305                 }
306
307                 reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
308                         RVU_PRIV_HWVFX_NIXX_CFG(1);
309                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
310                 if (cfg)
311                         blkaddr = BLKADDR_NIX1;
312         }
313
314         if (blktype == BLKTYPE_CPT) {
315                 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
316                         RVU_PRIV_HWVFX_CPTX_CFG(0);
317                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
318                 if (cfg) {
319                         blkaddr = BLKADDR_CPT0;
320                         goto exit;
321                 }
322
323                 reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
324                         RVU_PRIV_HWVFX_CPTX_CFG(1);
325                 cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
326                 if (cfg)
327                         blkaddr = BLKADDR_CPT1;
328         }
329
330 exit:
331         if (is_block_implemented(rvu->hw, blkaddr))
332                 return blkaddr;
333         return -ENODEV;
334 }
335
336 static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
337                                 struct rvu_block *block, u16 pcifunc,
338                                 u16 lf, bool attach)
339 {
340         int devnum, num_lfs = 0;
341         bool is_pf;
342         u64 reg;
343
344         if (lf >= block->lf.max) {
345                 dev_err(&rvu->pdev->dev,
346                         "%s: FATAL: LF %d is >= %s's max lfs i.e %d\n",
347                         __func__, lf, block->name, block->lf.max);
348                 return;
349         }
350
351         /* Check if this is for a RVU PF or VF */
352         if (pcifunc & RVU_PFVF_FUNC_MASK) {
353                 is_pf = false;
354                 devnum = rvu_get_hwvf(rvu, pcifunc);
355         } else {
356                 is_pf = true;
357                 devnum = rvu_get_pf(pcifunc);
358         }
359
360         block->fn_map[lf] = attach ? pcifunc : 0;
361
362         switch (block->addr) {
363         case BLKADDR_NPA:
364                 pfvf->npalf = attach ? true : false;
365                 num_lfs = pfvf->npalf;
366                 break;
367         case BLKADDR_NIX0:
368         case BLKADDR_NIX1:
369                 pfvf->nixlf = attach ? true : false;
370                 num_lfs = pfvf->nixlf;
371                 break;
372         case BLKADDR_SSO:
373                 attach ? pfvf->sso++ : pfvf->sso--;
374                 num_lfs = pfvf->sso;
375                 break;
376         case BLKADDR_SSOW:
377                 attach ? pfvf->ssow++ : pfvf->ssow--;
378                 num_lfs = pfvf->ssow;
379                 break;
380         case BLKADDR_TIM:
381                 attach ? pfvf->timlfs++ : pfvf->timlfs--;
382                 num_lfs = pfvf->timlfs;
383                 break;
384         case BLKADDR_CPT0:
385                 attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
386                 num_lfs = pfvf->cptlfs;
387                 break;
388         case BLKADDR_CPT1:
389                 attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
390                 num_lfs = pfvf->cpt1_lfs;
391                 break;
392         }
393
394         reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
395         rvu_write64(rvu, BLKADDR_RVUM, reg | (devnum << 16), num_lfs);
396 }
397
398 inline int rvu_get_pf(u16 pcifunc)
399 {
400         return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
401 }
402
403 void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
404 {
405         u64 cfg;
406
407         /* Get numVFs attached to this PF and first HWVF */
408         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
409         if (numvfs)
410                 *numvfs = (cfg >> 12) & 0xFF;
411         if (hwvf)
412                 *hwvf = cfg & 0xFFF;
413 }
414
415 static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
416 {
417         int pf, func;
418         u64 cfg;
419
420         pf = rvu_get_pf(pcifunc);
421         func = pcifunc & RVU_PFVF_FUNC_MASK;
422
423         /* Get first HWVF attached to this PF */
424         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
425
426         return ((cfg & 0xFFF) + func - 1);
427 }
428
429 struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
430 {
431         /* Check if it is a PF or VF */
432         if (pcifunc & RVU_PFVF_FUNC_MASK)
433                 return &rvu->hwvf[rvu_get_hwvf(rvu, pcifunc)];
434         else
435                 return &rvu->pf[rvu_get_pf(pcifunc)];
436 }
437
438 static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
439 {
440         int pf, vf, nvfs;
441         u64 cfg;
442
443         pf = rvu_get_pf(pcifunc);
444         if (pf >= rvu->hw->total_pfs)
445                 return false;
446
447         if (!(pcifunc & RVU_PFVF_FUNC_MASK))
448                 return true;
449
450         /* Check if VF is within number of VFs attached to this PF */
451         vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
452         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
453         nvfs = (cfg >> 12) & 0xFF;
454         if (vf >= nvfs)
455                 return false;
456
457         return true;
458 }
459
460 bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
461 {
462         struct rvu_block *block;
463
464         if (blkaddr < BLKADDR_RVUM || blkaddr >= BLK_COUNT)
465                 return false;
466
467         block = &hw->block[blkaddr];
468         return block->implemented;
469 }
470
471 static void rvu_check_block_implemented(struct rvu *rvu)
472 {
473         struct rvu_hwinfo *hw = rvu->hw;
474         struct rvu_block *block;
475         int blkid;
476         u64 cfg;
477
478         /* For each block check if 'implemented' bit is set */
479         for (blkid = 0; blkid < BLK_COUNT; blkid++) {
480                 block = &hw->block[blkid];
481                 cfg = rvupf_read64(rvu, RVU_PF_BLOCK_ADDRX_DISC(blkid));
482                 if (cfg & BIT_ULL(11))
483                         block->implemented = true;
484         }
485 }
486
487 static void rvu_setup_rvum_blk_revid(struct rvu *rvu)
488 {
489         rvu_write64(rvu, BLKADDR_RVUM,
490                     RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM),
491                     RVU_BLK_RVUM_REVID);
492 }
493
494 static void rvu_clear_rvum_blk_revid(struct rvu *rvu)
495 {
496         rvu_write64(rvu, BLKADDR_RVUM,
497                     RVU_PRIV_BLOCK_TYPEX_REV(BLKTYPE_RVUM), 0x00);
498 }
499
500 int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf)
501 {
502         int err;
503
504         if (!block->implemented)
505                 return 0;
506
507         rvu_write64(rvu, block->addr, block->lfreset_reg, lf | BIT_ULL(12));
508         err = rvu_poll_reg(rvu, block->addr, block->lfreset_reg, BIT_ULL(12),
509                            true);
510         return err;
511 }
512
513 static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
514 {
515         struct rvu_block *block = &rvu->hw->block[blkaddr];
516         int err;
517
518         if (!block->implemented)
519                 return;
520
521         rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
522         err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
523         if (err)
524                 dev_err(rvu->dev, "HW block:%d reset failed\n", blkaddr);
525 }
526
527 static void rvu_reset_all_blocks(struct rvu *rvu)
528 {
529         /* Do a HW reset of all RVU blocks */
530         rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
531         rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
532         rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
533         rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
534         rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
535         rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
536         rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
537         rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
538         rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
539         rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
540         rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
541         rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
542         rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
543 }
544
545 static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
546 {
547         struct rvu_pfvf *pfvf;
548         u64 cfg;
549         int lf;
550
551         for (lf = 0; lf < block->lf.max; lf++) {
552                 cfg = rvu_read64(rvu, block->addr,
553                                  block->lfcfg_reg | (lf << block->lfshift));
554                 if (!(cfg & BIT_ULL(63)))
555                         continue;
556
557                 /* Set this resource as being used */
558                 __set_bit(lf, block->lf.bmap);
559
560                 /* Get, to whom this LF is attached */
561                 pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
562                 rvu_update_rsrc_map(rvu, pfvf, block,
563                                     (cfg >> 8) & 0xFFFF, lf, true);
564
565                 /* Set start MSIX vector for this LF within this PF/VF */
566                 rvu_set_msix_offset(rvu, pfvf, block, lf);
567         }
568 }
569
570 static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
571 {
572         int min_vecs;
573
574         if (!vf)
575                 goto check_pf;
576
577         if (!nvecs) {
578                 dev_warn(rvu->dev,
579                          "PF%d:VF%d is configured with zero msix vectors, %d\n",
580                          pf, vf - 1, nvecs);
581         }
582         return;
583
584 check_pf:
585         if (pf == 0)
586                 min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
587         else
588                 min_vecs = RVU_PF_INT_VEC_CNT;
589
590         if (!(nvecs < min_vecs))
591                 return;
592         dev_warn(rvu->dev,
593                  "PF%d is configured with too few vectors, %d, min is %d\n",
594                  pf, nvecs, min_vecs);
595 }
596
597 static int rvu_setup_msix_resources(struct rvu *rvu)
598 {
599         struct rvu_hwinfo *hw = rvu->hw;
600         int pf, vf, numvfs, hwvf, err;
601         int nvecs, offset, max_msix;
602         struct rvu_pfvf *pfvf;
603         u64 cfg, phy_addr;
604         dma_addr_t iova;
605
606         for (pf = 0; pf < hw->total_pfs; pf++) {
607                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
608                 /* If PF is not enabled, nothing to do */
609                 if (!((cfg >> 20) & 0x01))
610                         continue;
611
612                 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
613
614                 pfvf = &rvu->pf[pf];
615                 /* Get num of MSIX vectors attached to this PF */
616                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
617                 pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
618                 rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
619
620                 /* Alloc msix bitmap for this PF */
621                 err = rvu_alloc_bitmap(&pfvf->msix);
622                 if (err)
623                         return err;
624
625                 /* Allocate memory for MSIX vector to RVU block LF mapping */
626                 pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
627                                                 sizeof(u16), GFP_KERNEL);
628                 if (!pfvf->msix_lfmap)
629                         return -ENOMEM;
630
631                 /* For PF0 (AF) firmware will set msix vector offsets for
632                  * AF, block AF and PF0_INT vectors, so jump to VFs.
633                  */
634                 if (!pf)
635                         goto setup_vfmsix;
636
637                 /* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
638                  * These are allocated on driver init and never freed,
639                  * so no need to set 'msix_lfmap' for these.
640                  */
641                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
642                 nvecs = (cfg >> 12) & 0xFF;
643                 cfg &= ~0x7FFULL;
644                 offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
645                 rvu_write64(rvu, BLKADDR_RVUM,
646                             RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
647 setup_vfmsix:
648                 /* Alloc msix bitmap for VFs */
649                 for (vf = 0; vf < numvfs; vf++) {
650                         pfvf =  &rvu->hwvf[hwvf + vf];
651                         /* Get num of MSIX vectors attached to this VF */
652                         cfg = rvu_read64(rvu, BLKADDR_RVUM,
653                                          RVU_PRIV_PFX_MSIX_CFG(pf));
654                         pfvf->msix.max = (cfg & 0xFFF) + 1;
655                         rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
656
657                         /* Alloc msix bitmap for this VF */
658                         err = rvu_alloc_bitmap(&pfvf->msix);
659                         if (err)
660                                 return err;
661
662                         pfvf->msix_lfmap =
663                                 devm_kcalloc(rvu->dev, pfvf->msix.max,
664                                              sizeof(u16), GFP_KERNEL);
665                         if (!pfvf->msix_lfmap)
666                                 return -ENOMEM;
667
668                         /* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
669                          * These are allocated on driver init and never freed,
670                          * so no need to set 'msix_lfmap' for these.
671                          */
672                         cfg = rvu_read64(rvu, BLKADDR_RVUM,
673                                          RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
674                         nvecs = (cfg >> 12) & 0xFF;
675                         cfg &= ~0x7FFULL;
676                         offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
677                         rvu_write64(rvu, BLKADDR_RVUM,
678                                     RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
679                                     cfg | offset);
680                 }
681         }
682
683         /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
684          * create an IOMMU mapping for the physical address configured by
685          * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
686          */
687         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
688         max_msix = cfg & 0xFFFFF;
689         if (rvu->fwdata && rvu->fwdata->msixtr_base)
690                 phy_addr = rvu->fwdata->msixtr_base;
691         else
692                 phy_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE);
693
694         iova = dma_map_resource(rvu->dev, phy_addr,
695                                 max_msix * PCI_MSIX_ENTRY_SIZE,
696                                 DMA_BIDIRECTIONAL, 0);
697
698         if (dma_mapping_error(rvu->dev, iova))
699                 return -ENOMEM;
700
701         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
702         rvu->msix_base_iova = iova;
703         rvu->msixtr_base_phy = phy_addr;
704
705         return 0;
706 }
707
708 static void rvu_reset_msix(struct rvu *rvu)
709 {
710         /* Restore msixtr base register */
711         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE,
712                     rvu->msixtr_base_phy);
713 }
714
715 static void rvu_free_hw_resources(struct rvu *rvu)
716 {
717         struct rvu_hwinfo *hw = rvu->hw;
718         struct rvu_block *block;
719         struct rvu_pfvf  *pfvf;
720         int id, max_msix;
721         u64 cfg;
722
723         rvu_npa_freemem(rvu);
724         rvu_npc_freemem(rvu);
725         rvu_nix_freemem(rvu);
726
727         /* Free block LF bitmaps */
728         for (id = 0; id < BLK_COUNT; id++) {
729                 block = &hw->block[id];
730                 kfree(block->lf.bmap);
731         }
732
733         /* Free MSIX bitmaps */
734         for (id = 0; id < hw->total_pfs; id++) {
735                 pfvf = &rvu->pf[id];
736                 kfree(pfvf->msix.bmap);
737         }
738
739         for (id = 0; id < hw->total_vfs; id++) {
740                 pfvf = &rvu->hwvf[id];
741                 kfree(pfvf->msix.bmap);
742         }
743
744         /* Unmap MSIX vector base IOVA mapping */
745         if (!rvu->msix_base_iova)
746                 return;
747         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
748         max_msix = cfg & 0xFFFFF;
749         dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
750                            max_msix * PCI_MSIX_ENTRY_SIZE,
751                            DMA_BIDIRECTIONAL, 0);
752
753         rvu_reset_msix(rvu);
754         mutex_destroy(&rvu->rsrc_lock);
755 }
756
757 static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
758 {
759         struct rvu_hwinfo *hw = rvu->hw;
760         int pf, vf, numvfs, hwvf;
761         struct rvu_pfvf *pfvf;
762         u64 *mac;
763
764         for (pf = 0; pf < hw->total_pfs; pf++) {
765                 /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
766                 if (!pf)
767                         goto lbkvf;
768
769                 if (!is_pf_cgxmapped(rvu, pf))
770                         continue;
771                 /* Assign MAC address to PF */
772                 pfvf = &rvu->pf[pf];
773                 if (rvu->fwdata && pf < PF_MACNUM_MAX) {
774                         mac = &rvu->fwdata->pf_macs[pf];
775                         if (*mac)
776                                 u64_to_ether_addr(*mac, pfvf->mac_addr);
777                         else
778                                 eth_random_addr(pfvf->mac_addr);
779                 } else {
780                         eth_random_addr(pfvf->mac_addr);
781                 }
782                 ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
783
784 lbkvf:
785                 /* Assign MAC address to VFs*/
786                 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
787                 for (vf = 0; vf < numvfs; vf++, hwvf++) {
788                         pfvf = &rvu->hwvf[hwvf];
789                         if (rvu->fwdata && hwvf < VF_MACNUM_MAX) {
790                                 mac = &rvu->fwdata->vf_macs[hwvf];
791                                 if (*mac)
792                                         u64_to_ether_addr(*mac, pfvf->mac_addr);
793                                 else
794                                         eth_random_addr(pfvf->mac_addr);
795                         } else {
796                                 eth_random_addr(pfvf->mac_addr);
797                         }
798                         ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
799                 }
800         }
801 }
802
803 static int rvu_fwdata_init(struct rvu *rvu)
804 {
805         u64 fwdbase;
806         int err;
807
808         /* Get firmware data base address */
809         err = cgx_get_fwdata_base(&fwdbase);
810         if (err)
811                 goto fail;
812         rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
813         if (!rvu->fwdata)
814                 goto fail;
815         if (!is_rvu_fwdata_valid(rvu)) {
816                 dev_err(rvu->dev,
817                         "Mismatch in 'fwdata' struct btw kernel and firmware\n");
818                 iounmap(rvu->fwdata);
819                 rvu->fwdata = NULL;
820                 return -EINVAL;
821         }
822         return 0;
823 fail:
824         dev_info(rvu->dev, "Unable to fetch 'fwdata' from firmware\n");
825         return -EIO;
826 }
827
828 static void rvu_fwdata_exit(struct rvu *rvu)
829 {
830         if (rvu->fwdata)
831                 iounmap(rvu->fwdata);
832 }
833
834 static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
835 {
836         struct rvu_hwinfo *hw = rvu->hw;
837         struct rvu_block *block;
838         int blkid;
839         u64 cfg;
840
841         /* Init NIX LF's bitmap */
842         block = &hw->block[blkaddr];
843         if (!block->implemented)
844                 return 0;
845         blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
846         cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
847         block->lf.max = cfg & 0xFFF;
848         block->addr = blkaddr;
849         block->type = BLKTYPE_NIX;
850         block->lfshift = 8;
851         block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
852         block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
853         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
854         block->lfcfg_reg = NIX_PRIV_LFX_CFG;
855         block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
856         block->lfreset_reg = NIX_AF_LF_RST;
857         sprintf(block->name, "NIX%d", blkid);
858         rvu->nix_blkaddr[blkid] = blkaddr;
859         return rvu_alloc_bitmap(&block->lf);
860 }
861
862 static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
863 {
864         struct rvu_hwinfo *hw = rvu->hw;
865         struct rvu_block *block;
866         int blkid;
867         u64 cfg;
868
869         /* Init CPT LF's bitmap */
870         block = &hw->block[blkaddr];
871         if (!block->implemented)
872                 return 0;
873         blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
874         cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
875         block->lf.max = cfg & 0xFF;
876         block->addr = blkaddr;
877         block->type = BLKTYPE_CPT;
878         block->multislot = true;
879         block->lfshift = 3;
880         block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
881         block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
882         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
883         block->lfcfg_reg = CPT_PRIV_LFX_CFG;
884         block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
885         block->lfreset_reg = CPT_AF_LF_RST;
886         sprintf(block->name, "CPT%d", blkid);
887         return rvu_alloc_bitmap(&block->lf);
888 }
889
890 static void rvu_get_lbk_bufsize(struct rvu *rvu)
891 {
892         struct pci_dev *pdev = NULL;
893         void __iomem *base;
894         u64 lbk_const;
895
896         pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
897                               PCI_DEVID_OCTEONTX2_LBK, pdev);
898         if (!pdev)
899                 return;
900
901         base = pci_ioremap_bar(pdev, 0);
902         if (!base)
903                 goto err_put;
904
905         lbk_const = readq(base + LBK_CONST);
906
907         /* cache fifo size */
908         rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
909
910         iounmap(base);
911 err_put:
912         pci_dev_put(pdev);
913 }
914
915 static int rvu_setup_hw_resources(struct rvu *rvu)
916 {
917         struct rvu_hwinfo *hw = rvu->hw;
918         struct rvu_block *block;
919         int blkid, err;
920         u64 cfg;
921
922         /* Get HW supported max RVU PF & VF count */
923         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
924         hw->total_pfs = (cfg >> 32) & 0xFF;
925         hw->total_vfs = (cfg >> 20) & 0xFFF;
926         hw->max_vfs_per_pf = (cfg >> 40) & 0xFF;
927
928         /* Init NPA LF's bitmap */
929         block = &hw->block[BLKADDR_NPA];
930         if (!block->implemented)
931                 goto nix;
932         cfg = rvu_read64(rvu, BLKADDR_NPA, NPA_AF_CONST);
933         block->lf.max = (cfg >> 16) & 0xFFF;
934         block->addr = BLKADDR_NPA;
935         block->type = BLKTYPE_NPA;
936         block->lfshift = 8;
937         block->lookup_reg = NPA_AF_RVU_LF_CFG_DEBUG;
938         block->pf_lfcnt_reg = RVU_PRIV_PFX_NPA_CFG;
939         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NPA_CFG;
940         block->lfcfg_reg = NPA_PRIV_LFX_CFG;
941         block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
942         block->lfreset_reg = NPA_AF_LF_RST;
943         sprintf(block->name, "NPA");
944         err = rvu_alloc_bitmap(&block->lf);
945         if (err) {
946                 dev_err(rvu->dev,
947                         "%s: Failed to allocate NPA LF bitmap\n", __func__);
948                 return err;
949         }
950
951 nix:
952         err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
953         if (err) {
954                 dev_err(rvu->dev,
955                         "%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
956                 return err;
957         }
958
959         err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
960         if (err) {
961                 dev_err(rvu->dev,
962                         "%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
963                 return err;
964         }
965
966         /* Init SSO group's bitmap */
967         block = &hw->block[BLKADDR_SSO];
968         if (!block->implemented)
969                 goto ssow;
970         cfg = rvu_read64(rvu, BLKADDR_SSO, SSO_AF_CONST);
971         block->lf.max = cfg & 0xFFFF;
972         block->addr = BLKADDR_SSO;
973         block->type = BLKTYPE_SSO;
974         block->multislot = true;
975         block->lfshift = 3;
976         block->lookup_reg = SSO_AF_RVU_LF_CFG_DEBUG;
977         block->pf_lfcnt_reg = RVU_PRIV_PFX_SSO_CFG;
978         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSO_CFG;
979         block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
980         block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
981         block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
982         sprintf(block->name, "SSO GROUP");
983         err = rvu_alloc_bitmap(&block->lf);
984         if (err) {
985                 dev_err(rvu->dev,
986                         "%s: Failed to allocate SSO LF bitmap\n", __func__);
987                 return err;
988         }
989
990 ssow:
991         /* Init SSO workslot's bitmap */
992         block = &hw->block[BLKADDR_SSOW];
993         if (!block->implemented)
994                 goto tim;
995         block->lf.max = (cfg >> 56) & 0xFF;
996         block->addr = BLKADDR_SSOW;
997         block->type = BLKTYPE_SSOW;
998         block->multislot = true;
999         block->lfshift = 3;
1000         block->lookup_reg = SSOW_AF_RVU_LF_HWS_CFG_DEBUG;
1001         block->pf_lfcnt_reg = RVU_PRIV_PFX_SSOW_CFG;
1002         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_SSOW_CFG;
1003         block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
1004         block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
1005         block->lfreset_reg = SSOW_AF_LF_HWS_RST;
1006         sprintf(block->name, "SSOWS");
1007         err = rvu_alloc_bitmap(&block->lf);
1008         if (err) {
1009                 dev_err(rvu->dev,
1010                         "%s: Failed to allocate SSOW LF bitmap\n", __func__);
1011                 return err;
1012         }
1013
1014 tim:
1015         /* Init TIM LF's bitmap */
1016         block = &hw->block[BLKADDR_TIM];
1017         if (!block->implemented)
1018                 goto cpt;
1019         cfg = rvu_read64(rvu, BLKADDR_TIM, TIM_AF_CONST);
1020         block->lf.max = cfg & 0xFFFF;
1021         block->addr = BLKADDR_TIM;
1022         block->type = BLKTYPE_TIM;
1023         block->multislot = true;
1024         block->lfshift = 3;
1025         block->lookup_reg = TIM_AF_RVU_LF_CFG_DEBUG;
1026         block->pf_lfcnt_reg = RVU_PRIV_PFX_TIM_CFG;
1027         block->vf_lfcnt_reg = RVU_PRIV_HWVFX_TIM_CFG;
1028         block->lfcfg_reg = TIM_PRIV_LFX_CFG;
1029         block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
1030         block->lfreset_reg = TIM_AF_LF_RST;
1031         sprintf(block->name, "TIM");
1032         err = rvu_alloc_bitmap(&block->lf);
1033         if (err) {
1034                 dev_err(rvu->dev,
1035                         "%s: Failed to allocate TIM LF bitmap\n", __func__);
1036                 return err;
1037         }
1038
1039 cpt:
1040         err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
1041         if (err) {
1042                 dev_err(rvu->dev,
1043                         "%s: Failed to allocate CPT0 LF bitmap\n", __func__);
1044                 return err;
1045         }
1046         err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
1047         if (err) {
1048                 dev_err(rvu->dev,
1049                         "%s: Failed to allocate CPT1 LF bitmap\n", __func__);
1050                 return err;
1051         }
1052
1053         /* Allocate memory for PFVF data */
1054         rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
1055                                sizeof(struct rvu_pfvf), GFP_KERNEL);
1056         if (!rvu->pf) {
1057                 dev_err(rvu->dev,
1058                         "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
1059                 return -ENOMEM;
1060         }
1061
1062         rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
1063                                  sizeof(struct rvu_pfvf), GFP_KERNEL);
1064         if (!rvu->hwvf) {
1065                 dev_err(rvu->dev,
1066                         "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
1067                 return -ENOMEM;
1068         }
1069
1070         mutex_init(&rvu->rsrc_lock);
1071
1072         rvu_fwdata_init(rvu);
1073
1074         err = rvu_setup_msix_resources(rvu);
1075         if (err) {
1076                 dev_err(rvu->dev,
1077                         "%s: Failed to setup MSIX resources\n", __func__);
1078                 return err;
1079         }
1080
1081         for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1082                 block = &hw->block[blkid];
1083                 if (!block->lf.bmap)
1084                         continue;
1085
1086                 /* Allocate memory for block LF/slot to pcifunc mapping info */
1087                 block->fn_map = devm_kcalloc(rvu->dev, block->lf.max,
1088                                              sizeof(u16), GFP_KERNEL);
1089                 if (!block->fn_map) {
1090                         err = -ENOMEM;
1091                         goto msix_err;
1092                 }
1093
1094                 /* Scan all blocks to check if low level firmware has
1095                  * already provisioned any of the resources to a PF/VF.
1096                  */
1097                 rvu_scan_block(rvu, block);
1098         }
1099
1100         err = rvu_set_channels_base(rvu);
1101         if (err)
1102                 goto msix_err;
1103
1104         err = rvu_npc_init(rvu);
1105         if (err) {
1106                 dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
1107                 goto npc_err;
1108         }
1109
1110         err = rvu_cgx_init(rvu);
1111         if (err) {
1112                 dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
1113                 goto cgx_err;
1114         }
1115
1116         /* Assign MACs for CGX mapped functions */
1117         rvu_setup_pfvf_macaddress(rvu);
1118
1119         err = rvu_npa_init(rvu);
1120         if (err) {
1121                 dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
1122                 goto npa_err;
1123         }
1124
1125         rvu_get_lbk_bufsize(rvu);
1126
1127         err = rvu_nix_init(rvu);
1128         if (err) {
1129                 dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
1130                 goto nix_err;
1131         }
1132
1133         err = rvu_sdp_init(rvu);
1134         if (err) {
1135                 dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
1136                 goto nix_err;
1137         }
1138
1139         rvu_program_channels(rvu);
1140
1141         return 0;
1142
1143 nix_err:
1144         rvu_nix_freemem(rvu);
1145 npa_err:
1146         rvu_npa_freemem(rvu);
1147 cgx_err:
1148         rvu_cgx_exit(rvu);
1149 npc_err:
1150         rvu_npc_freemem(rvu);
1151         rvu_fwdata_exit(rvu);
1152 msix_err:
1153         rvu_reset_msix(rvu);
1154         return err;
1155 }
1156
1157 /* NPA and NIX admin queue APIs */
1158 void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq)
1159 {
1160         if (!aq)
1161                 return;
1162
1163         qmem_free(rvu->dev, aq->inst);
1164         qmem_free(rvu->dev, aq->res);
1165         devm_kfree(rvu->dev, aq);
1166 }
1167
1168 int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
1169                  int qsize, int inst_size, int res_size)
1170 {
1171         struct admin_queue *aq;
1172         int err;
1173
1174         *ad_queue = devm_kzalloc(rvu->dev, sizeof(*aq), GFP_KERNEL);
1175         if (!*ad_queue)
1176                 return -ENOMEM;
1177         aq = *ad_queue;
1178
1179         /* Alloc memory for instructions i.e AQ */
1180         err = qmem_alloc(rvu->dev, &aq->inst, qsize, inst_size);
1181         if (err) {
1182                 devm_kfree(rvu->dev, aq);
1183                 return err;
1184         }
1185
1186         /* Alloc memory for results */
1187         err = qmem_alloc(rvu->dev, &aq->res, qsize, res_size);
1188         if (err) {
1189                 rvu_aq_free(rvu, aq);
1190                 return err;
1191         }
1192
1193         spin_lock_init(&aq->lock);
1194         return 0;
1195 }
1196
1197 int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
1198                            struct ready_msg_rsp *rsp)
1199 {
1200         if (rvu->fwdata) {
1201                 rsp->rclk_freq = rvu->fwdata->rclk;
1202                 rsp->sclk_freq = rvu->fwdata->sclk;
1203         }
1204         return 0;
1205 }
1206
1207 /* Get current count of a RVU block's LF/slots
1208  * provisioned to a given RVU func.
1209  */
1210 u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
1211 {
1212         switch (blkaddr) {
1213         case BLKADDR_NPA:
1214                 return pfvf->npalf ? 1 : 0;
1215         case BLKADDR_NIX0:
1216         case BLKADDR_NIX1:
1217                 return pfvf->nixlf ? 1 : 0;
1218         case BLKADDR_SSO:
1219                 return pfvf->sso;
1220         case BLKADDR_SSOW:
1221                 return pfvf->ssow;
1222         case BLKADDR_TIM:
1223                 return pfvf->timlfs;
1224         case BLKADDR_CPT0:
1225                 return pfvf->cptlfs;
1226         case BLKADDR_CPT1:
1227                 return pfvf->cpt1_lfs;
1228         }
1229         return 0;
1230 }
1231
1232 /* Return true if LFs of block type are attached to pcifunc */
1233 static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
1234 {
1235         switch (blktype) {
1236         case BLKTYPE_NPA:
1237                 return pfvf->npalf ? 1 : 0;
1238         case BLKTYPE_NIX:
1239                 return pfvf->nixlf ? 1 : 0;
1240         case BLKTYPE_SSO:
1241                 return !!pfvf->sso;
1242         case BLKTYPE_SSOW:
1243                 return !!pfvf->ssow;
1244         case BLKTYPE_TIM:
1245                 return !!pfvf->timlfs;
1246         case BLKTYPE_CPT:
1247                 return pfvf->cptlfs || pfvf->cpt1_lfs;
1248         }
1249
1250         return false;
1251 }
1252
1253 bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
1254 {
1255         struct rvu_pfvf *pfvf;
1256
1257         if (!is_pf_func_valid(rvu, pcifunc))
1258                 return false;
1259
1260         pfvf = rvu_get_pfvf(rvu, pcifunc);
1261
1262         /* Check if this PFFUNC has a LF of type blktype attached */
1263         if (!is_blktype_attached(pfvf, blktype))
1264                 return false;
1265
1266         return true;
1267 }
1268
1269 static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
1270                            int pcifunc, int slot)
1271 {
1272         u64 val;
1273
1274         val = ((u64)pcifunc << 24) | (slot << 16) | (1ULL << 13);
1275         rvu_write64(rvu, block->addr, block->lookup_reg, val);
1276         /* Wait for the lookup to finish */
1277         /* TODO: put some timeout here */
1278         while (rvu_read64(rvu, block->addr, block->lookup_reg) & (1ULL << 13))
1279                 ;
1280
1281         val = rvu_read64(rvu, block->addr, block->lookup_reg);
1282
1283         /* Check LF valid bit */
1284         if (!(val & (1ULL << 12)))
1285                 return -1;
1286
1287         return (val & 0xFFF);
1288 }
1289
1290 static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
1291 {
1292         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1293         struct rvu_hwinfo *hw = rvu->hw;
1294         struct rvu_block *block;
1295         int slot, lf, num_lfs;
1296         int blkaddr;
1297
1298         blkaddr = rvu_get_blkaddr(rvu, blktype, pcifunc);
1299         if (blkaddr < 0)
1300                 return;
1301
1302         if (blktype == BLKTYPE_NIX)
1303                 rvu_nix_reset_mac(pfvf, pcifunc);
1304
1305         block = &hw->block[blkaddr];
1306
1307         num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1308         if (!num_lfs)
1309                 return;
1310
1311         for (slot = 0; slot < num_lfs; slot++) {
1312                 lf = rvu_lookup_rsrc(rvu, block, pcifunc, slot);
1313                 if (lf < 0) /* This should never happen */
1314                         continue;
1315
1316                 /* Disable the LF */
1317                 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1318                             (lf << block->lfshift), 0x00ULL);
1319
1320                 /* Update SW maintained mapping info as well */
1321                 rvu_update_rsrc_map(rvu, pfvf, block,
1322                                     pcifunc, lf, false);
1323
1324                 /* Free the resource */
1325                 rvu_free_rsrc(&block->lf, lf);
1326
1327                 /* Clear MSIX vector offset for this LF */
1328                 rvu_clear_msix_offset(rvu, pfvf, block, lf);
1329         }
1330 }
1331
1332 static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
1333                             u16 pcifunc)
1334 {
1335         struct rvu_hwinfo *hw = rvu->hw;
1336         bool detach_all = true;
1337         struct rvu_block *block;
1338         int blkid;
1339
1340         mutex_lock(&rvu->rsrc_lock);
1341
1342         /* Check for partial resource detach */
1343         if (detach && detach->partial)
1344                 detach_all = false;
1345
1346         /* Check for RVU block's LFs attached to this func,
1347          * if so, detach them.
1348          */
1349         for (blkid = 0; blkid < BLK_COUNT; blkid++) {
1350                 block = &hw->block[blkid];
1351                 if (!block->lf.bmap)
1352                         continue;
1353                 if (!detach_all && detach) {
1354                         if (blkid == BLKADDR_NPA && !detach->npalf)
1355                                 continue;
1356                         else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
1357                                 continue;
1358                         else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
1359                                 continue;
1360                         else if ((blkid == BLKADDR_SSO) && !detach->sso)
1361                                 continue;
1362                         else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
1363                                 continue;
1364                         else if ((blkid == BLKADDR_TIM) && !detach->timlfs)
1365                                 continue;
1366                         else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
1367                                 continue;
1368                         else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
1369                                 continue;
1370                 }
1371                 rvu_detach_block(rvu, pcifunc, block->type);
1372         }
1373
1374         mutex_unlock(&rvu->rsrc_lock);
1375         return 0;
1376 }
1377
1378 int rvu_mbox_handler_detach_resources(struct rvu *rvu,
1379                                       struct rsrc_detach *detach,
1380                                       struct msg_rsp *rsp)
1381 {
1382         return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
1383 }
1384
1385 int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
1386 {
1387         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1388         int blkaddr = BLKADDR_NIX0, vf;
1389         struct rvu_pfvf *pf;
1390
1391         pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1392
1393         /* All CGX mapped PFs are set with assigned NIX block during init */
1394         if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
1395                 blkaddr = pf->nix_blkaddr;
1396         } else if (is_afvf(pcifunc)) {
1397                 vf = pcifunc - 1;
1398                 /* Assign NIX based on VF number. All even numbered VFs get
1399                  * NIX0 and odd numbered gets NIX1
1400                  */
1401                 blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
1402                 /* NIX1 is not present on all silicons */
1403                 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1404                         blkaddr = BLKADDR_NIX0;
1405         }
1406
1407         /* if SDP1 then the blkaddr is NIX1 */
1408         if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
1409                 blkaddr = BLKADDR_NIX1;
1410
1411         switch (blkaddr) {
1412         case BLKADDR_NIX1:
1413                 pfvf->nix_blkaddr = BLKADDR_NIX1;
1414                 pfvf->nix_rx_intf = NIX_INTFX_RX(1);
1415                 pfvf->nix_tx_intf = NIX_INTFX_TX(1);
1416                 break;
1417         case BLKADDR_NIX0:
1418         default:
1419                 pfvf->nix_blkaddr = BLKADDR_NIX0;
1420                 pfvf->nix_rx_intf = NIX_INTFX_RX(0);
1421                 pfvf->nix_tx_intf = NIX_INTFX_TX(0);
1422                 break;
1423         }
1424
1425         return pfvf->nix_blkaddr;
1426 }
1427
1428 static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
1429                                   u16 pcifunc, struct rsrc_attach *attach)
1430 {
1431         int blkaddr;
1432
1433         switch (blktype) {
1434         case BLKTYPE_NIX:
1435                 blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
1436                 break;
1437         case BLKTYPE_CPT:
1438                 if (attach->hdr.ver < RVU_MULTI_BLK_VER)
1439                         return rvu_get_blkaddr(rvu, blktype, 0);
1440                 blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
1441                           BLKADDR_CPT0;
1442                 if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
1443                         return -ENODEV;
1444                 break;
1445         default:
1446                 return rvu_get_blkaddr(rvu, blktype, 0);
1447         }
1448
1449         if (is_block_implemented(rvu->hw, blkaddr))
1450                 return blkaddr;
1451
1452         return -ENODEV;
1453 }
1454
1455 static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
1456                              int num_lfs, struct rsrc_attach *attach)
1457 {
1458         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1459         struct rvu_hwinfo *hw = rvu->hw;
1460         struct rvu_block *block;
1461         int slot, lf;
1462         int blkaddr;
1463         u64 cfg;
1464
1465         if (!num_lfs)
1466                 return;
1467
1468         blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
1469         if (blkaddr < 0)
1470                 return;
1471
1472         block = &hw->block[blkaddr];
1473         if (!block->lf.bmap)
1474                 return;
1475
1476         for (slot = 0; slot < num_lfs; slot++) {
1477                 /* Allocate the resource */
1478                 lf = rvu_alloc_rsrc(&block->lf);
1479                 if (lf < 0)
1480                         return;
1481
1482                 cfg = (1ULL << 63) | (pcifunc << 8) | slot;
1483                 rvu_write64(rvu, blkaddr, block->lfcfg_reg |
1484                             (lf << block->lfshift), cfg);
1485                 rvu_update_rsrc_map(rvu, pfvf, block,
1486                                     pcifunc, lf, true);
1487
1488                 /* Set start MSIX vector for this LF within this PF/VF */
1489                 rvu_set_msix_offset(rvu, pfvf, block, lf);
1490         }
1491 }
1492
1493 static int rvu_check_rsrc_availability(struct rvu *rvu,
1494                                        struct rsrc_attach *req, u16 pcifunc)
1495 {
1496         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
1497         int free_lfs, mappedlfs, blkaddr;
1498         struct rvu_hwinfo *hw = rvu->hw;
1499         struct rvu_block *block;
1500
1501         /* Only one NPA LF can be attached */
1502         if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
1503                 block = &hw->block[BLKADDR_NPA];
1504                 free_lfs = rvu_rsrc_free_count(&block->lf);
1505                 if (!free_lfs)
1506                         goto fail;
1507         } else if (req->npalf) {
1508                 dev_err(&rvu->pdev->dev,
1509                         "Func 0x%x: Invalid req, already has NPA\n",
1510                          pcifunc);
1511                 return -EINVAL;
1512         }
1513
1514         /* Only one NIX LF can be attached */
1515         if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
1516                 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
1517                                                  pcifunc, req);
1518                 if (blkaddr < 0)
1519                         return blkaddr;
1520                 block = &hw->block[blkaddr];
1521                 free_lfs = rvu_rsrc_free_count(&block->lf);
1522                 if (!free_lfs)
1523                         goto fail;
1524         } else if (req->nixlf) {
1525                 dev_err(&rvu->pdev->dev,
1526                         "Func 0x%x: Invalid req, already has NIX\n",
1527                         pcifunc);
1528                 return -EINVAL;
1529         }
1530
1531         if (req->sso) {
1532                 block = &hw->block[BLKADDR_SSO];
1533                 /* Is request within limits ? */
1534                 if (req->sso > block->lf.max) {
1535                         dev_err(&rvu->pdev->dev,
1536                                 "Func 0x%x: Invalid SSO req, %d > max %d\n",
1537                                  pcifunc, req->sso, block->lf.max);
1538                         return -EINVAL;
1539                 }
1540                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1541                 free_lfs = rvu_rsrc_free_count(&block->lf);
1542                 /* Check if additional resources are available */
1543                 if (req->sso > mappedlfs &&
1544                     ((req->sso - mappedlfs) > free_lfs))
1545                         goto fail;
1546         }
1547
1548         if (req->ssow) {
1549                 block = &hw->block[BLKADDR_SSOW];
1550                 if (req->ssow > block->lf.max) {
1551                         dev_err(&rvu->pdev->dev,
1552                                 "Func 0x%x: Invalid SSOW req, %d > max %d\n",
1553                                  pcifunc, req->sso, block->lf.max);
1554                         return -EINVAL;
1555                 }
1556                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1557                 free_lfs = rvu_rsrc_free_count(&block->lf);
1558                 if (req->ssow > mappedlfs &&
1559                     ((req->ssow - mappedlfs) > free_lfs))
1560                         goto fail;
1561         }
1562
1563         if (req->timlfs) {
1564                 block = &hw->block[BLKADDR_TIM];
1565                 if (req->timlfs > block->lf.max) {
1566                         dev_err(&rvu->pdev->dev,
1567                                 "Func 0x%x: Invalid TIMLF req, %d > max %d\n",
1568                                  pcifunc, req->timlfs, block->lf.max);
1569                         return -EINVAL;
1570                 }
1571                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1572                 free_lfs = rvu_rsrc_free_count(&block->lf);
1573                 if (req->timlfs > mappedlfs &&
1574                     ((req->timlfs - mappedlfs) > free_lfs))
1575                         goto fail;
1576         }
1577
1578         if (req->cptlfs) {
1579                 blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
1580                                                  pcifunc, req);
1581                 if (blkaddr < 0)
1582                         return blkaddr;
1583                 block = &hw->block[blkaddr];
1584                 if (req->cptlfs > block->lf.max) {
1585                         dev_err(&rvu->pdev->dev,
1586                                 "Func 0x%x: Invalid CPTLF req, %d > max %d\n",
1587                                  pcifunc, req->cptlfs, block->lf.max);
1588                         return -EINVAL;
1589                 }
1590                 mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
1591                 free_lfs = rvu_rsrc_free_count(&block->lf);
1592                 if (req->cptlfs > mappedlfs &&
1593                     ((req->cptlfs - mappedlfs) > free_lfs))
1594                         goto fail;
1595         }
1596
1597         return 0;
1598
1599 fail:
1600         dev_info(rvu->dev, "Request for %s failed\n", block->name);
1601         return -ENOSPC;
1602 }
1603
1604 static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
1605                                        struct rsrc_attach *attach)
1606 {
1607         int blkaddr, num_lfs;
1608
1609         blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
1610                                          attach->hdr.pcifunc, attach);
1611         if (blkaddr < 0)
1612                 return false;
1613
1614         num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
1615                                         blkaddr);
1616         /* Requester already has LFs from given block ? */
1617         return !!num_lfs;
1618 }
1619
1620 int rvu_mbox_handler_attach_resources(struct rvu *rvu,
1621                                       struct rsrc_attach *attach,
1622                                       struct msg_rsp *rsp)
1623 {
1624         u16 pcifunc = attach->hdr.pcifunc;
1625         int err;
1626
1627         /* If first request, detach all existing attached resources */
1628         if (!attach->modify)
1629                 rvu_detach_rsrcs(rvu, NULL, pcifunc);
1630
1631         mutex_lock(&rvu->rsrc_lock);
1632
1633         /* Check if the request can be accommodated */
1634         err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
1635         if (err)
1636                 goto exit;
1637
1638         /* Now attach the requested resources */
1639         if (attach->npalf)
1640                 rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
1641
1642         if (attach->nixlf)
1643                 rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
1644
1645         if (attach->sso) {
1646                 /* RVU func doesn't know which exact LF or slot is attached
1647                  * to it, it always sees as slot 0,1,2. So for a 'modify'
1648                  * request, simply detach all existing attached LFs/slots
1649                  * and attach a fresh.
1650                  */
1651                 if (attach->modify)
1652                         rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
1653                 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
1654                                  attach->sso, attach);
1655         }
1656
1657         if (attach->ssow) {
1658                 if (attach->modify)
1659                         rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
1660                 rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
1661                                  attach->ssow, attach);
1662         }
1663
1664         if (attach->timlfs) {
1665                 if (attach->modify)
1666                         rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
1667                 rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
1668                                  attach->timlfs, attach);
1669         }
1670
1671         if (attach->cptlfs) {
1672                 if (attach->modify &&
1673                     rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
1674                         rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
1675                 rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
1676                                  attach->cptlfs, attach);
1677         }
1678
1679 exit:
1680         mutex_unlock(&rvu->rsrc_lock);
1681         return err;
1682 }
1683
1684 static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1685                                int blkaddr, int lf)
1686 {
1687         u16 vec;
1688
1689         if (lf < 0)
1690                 return MSIX_VECTOR_INVALID;
1691
1692         for (vec = 0; vec < pfvf->msix.max; vec++) {
1693                 if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
1694                         return vec;
1695         }
1696         return MSIX_VECTOR_INVALID;
1697 }
1698
1699 static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1700                                 struct rvu_block *block, int lf)
1701 {
1702         u16 nvecs, vec, offset;
1703         u64 cfg;
1704
1705         cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1706                          (lf << block->lfshift));
1707         nvecs = (cfg >> 12) & 0xFF;
1708
1709         /* Check and alloc MSIX vectors, must be contiguous */
1710         if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
1711                 return;
1712
1713         offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
1714
1715         /* Config MSIX offset in LF */
1716         rvu_write64(rvu, block->addr, block->msixcfg_reg |
1717                     (lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
1718
1719         /* Update the bitmap as well */
1720         for (vec = 0; vec < nvecs; vec++)
1721                 pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
1722 }
1723
1724 static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
1725                                   struct rvu_block *block, int lf)
1726 {
1727         u16 nvecs, vec, offset;
1728         u64 cfg;
1729
1730         cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
1731                          (lf << block->lfshift));
1732         nvecs = (cfg >> 12) & 0xFF;
1733
1734         /* Clear MSIX offset in LF */
1735         rvu_write64(rvu, block->addr, block->msixcfg_reg |
1736                     (lf << block->lfshift), cfg & ~0x7FFULL);
1737
1738         offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
1739
1740         /* Update the mapping */
1741         for (vec = 0; vec < nvecs; vec++)
1742                 pfvf->msix_lfmap[offset + vec] = 0;
1743
1744         /* Free the same in MSIX bitmap */
1745         rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
1746 }
1747
1748 int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
1749                                  struct msix_offset_rsp *rsp)
1750 {
1751         struct rvu_hwinfo *hw = rvu->hw;
1752         u16 pcifunc = req->hdr.pcifunc;
1753         struct rvu_pfvf *pfvf;
1754         int lf, slot, blkaddr;
1755
1756         pfvf = rvu_get_pfvf(rvu, pcifunc);
1757         if (!pfvf->msix.bmap)
1758                 return 0;
1759
1760         /* Set MSIX offsets for each block's LFs attached to this PF/VF */
1761         lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
1762         rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
1763
1764         /* Get BLKADDR from which LFs are attached to pcifunc */
1765         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1766         if (blkaddr < 0) {
1767                 rsp->nix_msixoff = MSIX_VECTOR_INVALID;
1768         } else {
1769                 lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1770                 rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
1771         }
1772
1773         rsp->sso = pfvf->sso;
1774         for (slot = 0; slot < rsp->sso; slot++) {
1775                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
1776                 rsp->sso_msixoff[slot] =
1777                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
1778         }
1779
1780         rsp->ssow = pfvf->ssow;
1781         for (slot = 0; slot < rsp->ssow; slot++) {
1782                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
1783                 rsp->ssow_msixoff[slot] =
1784                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
1785         }
1786
1787         rsp->timlfs = pfvf->timlfs;
1788         for (slot = 0; slot < rsp->timlfs; slot++) {
1789                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
1790                 rsp->timlf_msixoff[slot] =
1791                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
1792         }
1793
1794         rsp->cptlfs = pfvf->cptlfs;
1795         for (slot = 0; slot < rsp->cptlfs; slot++) {
1796                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
1797                 rsp->cptlf_msixoff[slot] =
1798                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
1799         }
1800
1801         rsp->cpt1_lfs = pfvf->cpt1_lfs;
1802         for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
1803                 lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
1804                 rsp->cpt1_lf_msixoff[slot] =
1805                         rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
1806         }
1807
1808         return 0;
1809 }
1810
1811 int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req,
1812                                    struct free_rsrcs_rsp *rsp)
1813 {
1814         struct rvu_hwinfo *hw = rvu->hw;
1815         struct rvu_block *block;
1816         struct nix_txsch *txsch;
1817         struct nix_hw *nix_hw;
1818
1819         mutex_lock(&rvu->rsrc_lock);
1820
1821         block = &hw->block[BLKADDR_NPA];
1822         rsp->npa = rvu_rsrc_free_count(&block->lf);
1823
1824         block = &hw->block[BLKADDR_NIX0];
1825         rsp->nix = rvu_rsrc_free_count(&block->lf);
1826
1827         block = &hw->block[BLKADDR_NIX1];
1828         rsp->nix1 = rvu_rsrc_free_count(&block->lf);
1829
1830         block = &hw->block[BLKADDR_SSO];
1831         rsp->sso = rvu_rsrc_free_count(&block->lf);
1832
1833         block = &hw->block[BLKADDR_SSOW];
1834         rsp->ssow = rvu_rsrc_free_count(&block->lf);
1835
1836         block = &hw->block[BLKADDR_TIM];
1837         rsp->tim = rvu_rsrc_free_count(&block->lf);
1838
1839         block = &hw->block[BLKADDR_CPT0];
1840         rsp->cpt = rvu_rsrc_free_count(&block->lf);
1841
1842         block = &hw->block[BLKADDR_CPT1];
1843         rsp->cpt1 = rvu_rsrc_free_count(&block->lf);
1844
1845         if (rvu->hw->cap.nix_fixed_txschq_mapping) {
1846                 rsp->schq[NIX_TXSCH_LVL_SMQ] = 1;
1847                 rsp->schq[NIX_TXSCH_LVL_TL4] = 1;
1848                 rsp->schq[NIX_TXSCH_LVL_TL3] = 1;
1849                 rsp->schq[NIX_TXSCH_LVL_TL2] = 1;
1850                 /* NIX1 */
1851                 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1852                         goto out;
1853                 rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1;
1854                 rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1;
1855                 rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1;
1856                 rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1;
1857         } else {
1858                 nix_hw = get_nix_hw(hw, BLKADDR_NIX0);
1859                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1860                 rsp->schq[NIX_TXSCH_LVL_SMQ] =
1861                                 rvu_rsrc_free_count(&txsch->schq);
1862
1863                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1864                 rsp->schq[NIX_TXSCH_LVL_TL4] =
1865                                 rvu_rsrc_free_count(&txsch->schq);
1866
1867                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1868                 rsp->schq[NIX_TXSCH_LVL_TL3] =
1869                                 rvu_rsrc_free_count(&txsch->schq);
1870
1871                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1872                 rsp->schq[NIX_TXSCH_LVL_TL2] =
1873                                 rvu_rsrc_free_count(&txsch->schq);
1874
1875                 if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
1876                         goto out;
1877
1878                 nix_hw = get_nix_hw(hw, BLKADDR_NIX1);
1879                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1880                 rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] =
1881                                 rvu_rsrc_free_count(&txsch->schq);
1882
1883                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4];
1884                 rsp->schq_nix1[NIX_TXSCH_LVL_TL4] =
1885                                 rvu_rsrc_free_count(&txsch->schq);
1886
1887                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3];
1888                 rsp->schq_nix1[NIX_TXSCH_LVL_TL3] =
1889                                 rvu_rsrc_free_count(&txsch->schq);
1890
1891                 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
1892                 rsp->schq_nix1[NIX_TXSCH_LVL_TL2] =
1893                                 rvu_rsrc_free_count(&txsch->schq);
1894         }
1895
1896         rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1;
1897 out:
1898         rsp->schq[NIX_TXSCH_LVL_TL1] = 1;
1899         mutex_unlock(&rvu->rsrc_lock);
1900
1901         return 0;
1902 }
1903
1904 int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
1905                             struct msg_rsp *rsp)
1906 {
1907         u16 pcifunc = req->hdr.pcifunc;
1908         u16 vf, numvfs;
1909         u64 cfg;
1910
1911         vf = pcifunc & RVU_PFVF_FUNC_MASK;
1912         cfg = rvu_read64(rvu, BLKADDR_RVUM,
1913                          RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
1914         numvfs = (cfg >> 12) & 0xFF;
1915
1916         if (vf && vf <= numvfs)
1917                 __rvu_flr_handler(rvu, pcifunc);
1918         else
1919                 return RVU_INVALID_VF_ID;
1920
1921         return 0;
1922 }
1923
1924 int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
1925                                 struct get_hw_cap_rsp *rsp)
1926 {
1927         struct rvu_hwinfo *hw = rvu->hw;
1928
1929         rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
1930         rsp->nix_shaping = hw->cap.nix_shaping;
1931
1932         return 0;
1933 }
1934
1935 int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
1936                                  struct msg_rsp *rsp)
1937 {
1938         struct rvu_hwinfo *hw = rvu->hw;
1939         u16 pcifunc = req->hdr.pcifunc;
1940         struct rvu_pfvf *pfvf;
1941         int blkaddr, nixlf;
1942         u16 target;
1943
1944         /* Only PF can add VF permissions */
1945         if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
1946                 return -EOPNOTSUPP;
1947
1948         target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
1949         pfvf = rvu_get_pfvf(rvu, target);
1950
1951         if (req->flags & RESET_VF_PERM) {
1952                 pfvf->flags &= RVU_CLEAR_VF_PERM;
1953         } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
1954                  (req->flags & VF_TRUSTED)) {
1955                 change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
1956                 /* disable multicast and promisc entries */
1957                 if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
1958                         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
1959                         if (blkaddr < 0)
1960                                 return 0;
1961                         nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1962                                            target, 0);
1963                         if (nixlf < 0)
1964                                 return 0;
1965                         npc_enadis_default_mce_entry(rvu, target, nixlf,
1966                                                      NIXLF_ALLMULTI_ENTRY,
1967                                                      false);
1968                         npc_enadis_default_mce_entry(rvu, target, nixlf,
1969                                                      NIXLF_PROMISC_ENTRY,
1970                                                      false);
1971                 }
1972         }
1973
1974         return 0;
1975 }
1976
1977 static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
1978                                 struct mbox_msghdr *req)
1979 {
1980         struct rvu *rvu = pci_get_drvdata(mbox->pdev);
1981
1982         /* Check if valid, if not reply with a invalid msg */
1983         if (req->sig != OTX2_MBOX_REQ_SIG)
1984                 goto bad_message;
1985
1986         switch (req->id) {
1987 #define M(_name, _id, _fn_name, _req_type, _rsp_type)                   \
1988         case _id: {                                                     \
1989                 struct _rsp_type *rsp;                                  \
1990                 int err;                                                \
1991                                                                         \
1992                 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(          \
1993                         mbox, devid,                                    \
1994                         sizeof(struct _rsp_type));                      \
1995                 /* some handlers should complete even if reply */       \
1996                 /* could not be allocated */                            \
1997                 if (!rsp &&                                             \
1998                     _id != MBOX_MSG_DETACH_RESOURCES &&                 \
1999                     _id != MBOX_MSG_NIX_TXSCH_FREE &&                   \
2000                     _id != MBOX_MSG_VF_FLR)                             \
2001                         return -ENOMEM;                                 \
2002                 if (rsp) {                                              \
2003                         rsp->hdr.id = _id;                              \
2004                         rsp->hdr.sig = OTX2_MBOX_RSP_SIG;               \
2005                         rsp->hdr.pcifunc = req->pcifunc;                \
2006                         rsp->hdr.rc = 0;                                \
2007                 }                                                       \
2008                                                                         \
2009                 err = rvu_mbox_handler_ ## _fn_name(rvu,                \
2010                                                     (struct _req_type *)req, \
2011                                                     rsp);               \
2012                 if (rsp && err)                                         \
2013                         rsp->hdr.rc = err;                              \
2014                                                                         \
2015                 trace_otx2_msg_process(mbox->pdev, _id, err);           \
2016                 return rsp ? err : -ENOMEM;                             \
2017         }
2018 MBOX_MESSAGES
2019 #undef M
2020
2021 bad_message:
2022         default:
2023                 otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
2024                 return -ENODEV;
2025         }
2026 }
2027
2028 static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
2029 {
2030         struct rvu *rvu = mwork->rvu;
2031         int offset, err, id, devid;
2032         struct otx2_mbox_dev *mdev;
2033         struct mbox_hdr *req_hdr;
2034         struct mbox_msghdr *msg;
2035         struct mbox_wq_info *mw;
2036         struct otx2_mbox *mbox;
2037
2038         switch (type) {
2039         case TYPE_AFPF:
2040                 mw = &rvu->afpf_wq_info;
2041                 break;
2042         case TYPE_AFVF:
2043                 mw = &rvu->afvf_wq_info;
2044                 break;
2045         default:
2046                 return;
2047         }
2048
2049         devid = mwork - mw->mbox_wrk;
2050         mbox = &mw->mbox;
2051         mdev = &mbox->dev[devid];
2052
2053         /* Process received mbox messages */
2054         req_hdr = mdev->mbase + mbox->rx_start;
2055         if (mw->mbox_wrk[devid].num_msgs == 0)
2056                 return;
2057
2058         offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
2059
2060         for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
2061                 msg = mdev->mbase + offset;
2062
2063                 /* Set which PF/VF sent this message based on mbox IRQ */
2064                 switch (type) {
2065                 case TYPE_AFPF:
2066                         msg->pcifunc &=
2067                                 ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
2068                         msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
2069                         break;
2070                 case TYPE_AFVF:
2071                         msg->pcifunc &=
2072                                 ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
2073                         msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
2074                         break;
2075                 }
2076
2077                 err = rvu_process_mbox_msg(mbox, devid, msg);
2078                 if (!err) {
2079                         offset = mbox->rx_start + msg->next_msgoff;
2080                         continue;
2081                 }
2082
2083                 if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
2084                         dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
2085                                  err, otx2_mbox_id2name(msg->id),
2086                                  msg->id, rvu_get_pf(msg->pcifunc),
2087                                  (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2088                 else
2089                         dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
2090                                  err, otx2_mbox_id2name(msg->id),
2091                                  msg->id, devid);
2092         }
2093         mw->mbox_wrk[devid].num_msgs = 0;
2094
2095         /* Send mbox responses to VF/PF */
2096         otx2_mbox_msg_send(mbox, devid);
2097 }
2098
2099 static inline void rvu_afpf_mbox_handler(struct work_struct *work)
2100 {
2101         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2102
2103         __rvu_mbox_handler(mwork, TYPE_AFPF);
2104 }
2105
2106 static inline void rvu_afvf_mbox_handler(struct work_struct *work)
2107 {
2108         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2109
2110         __rvu_mbox_handler(mwork, TYPE_AFVF);
2111 }
2112
2113 static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
2114 {
2115         struct rvu *rvu = mwork->rvu;
2116         struct otx2_mbox_dev *mdev;
2117         struct mbox_hdr *rsp_hdr;
2118         struct mbox_msghdr *msg;
2119         struct mbox_wq_info *mw;
2120         struct otx2_mbox *mbox;
2121         int offset, id, devid;
2122
2123         switch (type) {
2124         case TYPE_AFPF:
2125                 mw = &rvu->afpf_wq_info;
2126                 break;
2127         case TYPE_AFVF:
2128                 mw = &rvu->afvf_wq_info;
2129                 break;
2130         default:
2131                 return;
2132         }
2133
2134         devid = mwork - mw->mbox_wrk_up;
2135         mbox = &mw->mbox_up;
2136         mdev = &mbox->dev[devid];
2137
2138         rsp_hdr = mdev->mbase + mbox->rx_start;
2139         if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
2140                 dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
2141                 return;
2142         }
2143
2144         offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
2145
2146         for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
2147                 msg = mdev->mbase + offset;
2148
2149                 if (msg->id >= MBOX_MSG_MAX) {
2150                         dev_err(rvu->dev,
2151                                 "Mbox msg with unknown ID 0x%x\n", msg->id);
2152                         goto end;
2153                 }
2154
2155                 if (msg->sig != OTX2_MBOX_RSP_SIG) {
2156                         dev_err(rvu->dev,
2157                                 "Mbox msg with wrong signature %x, ID 0x%x\n",
2158                                 msg->sig, msg->id);
2159                         goto end;
2160                 }
2161
2162                 switch (msg->id) {
2163                 case MBOX_MSG_CGX_LINK_EVENT:
2164                         break;
2165                 default:
2166                         if (msg->rc)
2167                                 dev_err(rvu->dev,
2168                                         "Mbox msg response has err %d, ID 0x%x\n",
2169                                         msg->rc, msg->id);
2170                         break;
2171                 }
2172 end:
2173                 offset = mbox->rx_start + msg->next_msgoff;
2174                 mdev->msgs_acked++;
2175         }
2176         mw->mbox_wrk_up[devid].up_num_msgs = 0;
2177
2178         otx2_mbox_reset(mbox, devid);
2179 }
2180
2181 static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
2182 {
2183         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2184
2185         __rvu_mbox_up_handler(mwork, TYPE_AFPF);
2186 }
2187
2188 static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
2189 {
2190         struct rvu_work *mwork = container_of(work, struct rvu_work, work);
2191
2192         __rvu_mbox_up_handler(mwork, TYPE_AFVF);
2193 }
2194
2195 static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
2196                                 int num, int type)
2197 {
2198         struct rvu_hwinfo *hw = rvu->hw;
2199         int region;
2200         u64 bar4;
2201
2202         /* For cn10k platform VF mailbox regions of a PF follows after the
2203          * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
2204          * RVU_PF_VF_BAR4_ADDR register.
2205          */
2206         if (type == TYPE_AFVF) {
2207                 for (region = 0; region < num; region++) {
2208                         if (hw->cap.per_pf_mbox_regs) {
2209                                 bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2210                                                   RVU_AF_PFX_BAR4_ADDR(0)) +
2211                                                   MBOX_SIZE;
2212                                 bar4 += region * MBOX_SIZE;
2213                         } else {
2214                                 bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
2215                                 bar4 += region * MBOX_SIZE;
2216                         }
2217                         mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2218                         if (!mbox_addr[region])
2219                                 goto error;
2220                 }
2221                 return 0;
2222         }
2223
2224         /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
2225          * PF registers. Whereas for Octeontx2 it is read from
2226          * RVU_AF_PF_BAR4_ADDR register.
2227          */
2228         for (region = 0; region < num; region++) {
2229                 if (hw->cap.per_pf_mbox_regs) {
2230                         bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2231                                           RVU_AF_PFX_BAR4_ADDR(region));
2232                 } else {
2233                         bar4 = rvu_read64(rvu, BLKADDR_RVUM,
2234                                           RVU_AF_PF_BAR4_ADDR);
2235                         bar4 += region * MBOX_SIZE;
2236                 }
2237                 mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
2238                 if (!mbox_addr[region])
2239                         goto error;
2240         }
2241         return 0;
2242
2243 error:
2244         while (region--)
2245                 iounmap((void __iomem *)mbox_addr[region]);
2246         return -ENOMEM;
2247 }
2248
2249 static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
2250                          int type, int num,
2251                          void (mbox_handler)(struct work_struct *),
2252                          void (mbox_up_handler)(struct work_struct *))
2253 {
2254         int err = -EINVAL, i, dir, dir_up;
2255         void __iomem *reg_base;
2256         struct rvu_work *mwork;
2257         void **mbox_regions;
2258         const char *name;
2259
2260         mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
2261         if (!mbox_regions)
2262                 return -ENOMEM;
2263
2264         switch (type) {
2265         case TYPE_AFPF:
2266                 name = "rvu_afpf_mailbox";
2267                 dir = MBOX_DIR_AFPF;
2268                 dir_up = MBOX_DIR_AFPF_UP;
2269                 reg_base = rvu->afreg_base;
2270                 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
2271                 if (err)
2272                         goto free_regions;
2273                 break;
2274         case TYPE_AFVF:
2275                 name = "rvu_afvf_mailbox";
2276                 dir = MBOX_DIR_PFVF;
2277                 dir_up = MBOX_DIR_PFVF_UP;
2278                 reg_base = rvu->pfreg_base;
2279                 err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
2280                 if (err)
2281                         goto free_regions;
2282                 break;
2283         default:
2284                 return err;
2285         }
2286
2287         mw->mbox_wq = alloc_workqueue(name,
2288                                       WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2289                                       num);
2290         if (!mw->mbox_wq) {
2291                 err = -ENOMEM;
2292                 goto unmap_regions;
2293         }
2294
2295         mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
2296                                     sizeof(struct rvu_work), GFP_KERNEL);
2297         if (!mw->mbox_wrk) {
2298                 err = -ENOMEM;
2299                 goto exit;
2300         }
2301
2302         mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
2303                                        sizeof(struct rvu_work), GFP_KERNEL);
2304         if (!mw->mbox_wrk_up) {
2305                 err = -ENOMEM;
2306                 goto exit;
2307         }
2308
2309         err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
2310                                      reg_base, dir, num);
2311         if (err)
2312                 goto exit;
2313
2314         err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
2315                                      reg_base, dir_up, num);
2316         if (err)
2317                 goto exit;
2318
2319         for (i = 0; i < num; i++) {
2320                 mwork = &mw->mbox_wrk[i];
2321                 mwork->rvu = rvu;
2322                 INIT_WORK(&mwork->work, mbox_handler);
2323
2324                 mwork = &mw->mbox_wrk_up[i];
2325                 mwork->rvu = rvu;
2326                 INIT_WORK(&mwork->work, mbox_up_handler);
2327         }
2328         kfree(mbox_regions);
2329         return 0;
2330
2331 exit:
2332         destroy_workqueue(mw->mbox_wq);
2333 unmap_regions:
2334         while (num--)
2335                 iounmap((void __iomem *)mbox_regions[num]);
2336 free_regions:
2337         kfree(mbox_regions);
2338         return err;
2339 }
2340
2341 static void rvu_mbox_destroy(struct mbox_wq_info *mw)
2342 {
2343         struct otx2_mbox *mbox = &mw->mbox;
2344         struct otx2_mbox_dev *mdev;
2345         int devid;
2346
2347         if (mw->mbox_wq) {
2348                 flush_workqueue(mw->mbox_wq);
2349                 destroy_workqueue(mw->mbox_wq);
2350                 mw->mbox_wq = NULL;
2351         }
2352
2353         for (devid = 0; devid < mbox->ndevs; devid++) {
2354                 mdev = &mbox->dev[devid];
2355                 if (mdev->hwbase)
2356                         iounmap((void __iomem *)mdev->hwbase);
2357         }
2358
2359         otx2_mbox_destroy(&mw->mbox);
2360         otx2_mbox_destroy(&mw->mbox_up);
2361 }
2362
2363 static void rvu_queue_work(struct mbox_wq_info *mw, int first,
2364                            int mdevs, u64 intr)
2365 {
2366         struct otx2_mbox_dev *mdev;
2367         struct otx2_mbox *mbox;
2368         struct mbox_hdr *hdr;
2369         int i;
2370
2371         for (i = first; i < mdevs; i++) {
2372                 /* start from 0 */
2373                 if (!(intr & BIT_ULL(i - first)))
2374                         continue;
2375
2376                 mbox = &mw->mbox;
2377                 mdev = &mbox->dev[i];
2378                 hdr = mdev->mbase + mbox->rx_start;
2379
2380                 /*The hdr->num_msgs is set to zero immediately in the interrupt
2381                  * handler to  ensure that it holds a correct value next time
2382                  * when the interrupt handler is called.
2383                  * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
2384                  * pf>mbox.up_num_msgs holds the data for use in
2385                  * pfaf_mbox_up_handler.
2386                  */
2387
2388                 if (hdr->num_msgs) {
2389                         mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
2390                         hdr->num_msgs = 0;
2391                         queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
2392                 }
2393                 mbox = &mw->mbox_up;
2394                 mdev = &mbox->dev[i];
2395                 hdr = mdev->mbase + mbox->rx_start;
2396                 if (hdr->num_msgs) {
2397                         mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
2398                         hdr->num_msgs = 0;
2399                         queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
2400                 }
2401         }
2402 }
2403
2404 static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
2405 {
2406         struct rvu *rvu = (struct rvu *)rvu_irq;
2407         int vfs = rvu->vfs;
2408         u64 intr;
2409
2410         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
2411         /* Clear interrupts */
2412         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
2413         if (intr)
2414                 trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
2415
2416         /* Sync with mbox memory region */
2417         rmb();
2418
2419         rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
2420
2421         /* Handle VF interrupts */
2422         if (vfs > 64) {
2423                 intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
2424                 rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
2425
2426                 rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
2427                 vfs -= 64;
2428         }
2429
2430         intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
2431         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
2432         if (intr)
2433                 trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
2434
2435         rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
2436
2437         return IRQ_HANDLED;
2438 }
2439
2440 static void rvu_enable_mbox_intr(struct rvu *rvu)
2441 {
2442         struct rvu_hwinfo *hw = rvu->hw;
2443
2444         /* Clear spurious irqs, if any */
2445         rvu_write64(rvu, BLKADDR_RVUM,
2446                     RVU_AF_PFAF_MBOX_INT, INTR_MASK(hw->total_pfs));
2447
2448         /* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
2449         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1S,
2450                     INTR_MASK(hw->total_pfs) & ~1ULL);
2451 }
2452
2453 static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
2454 {
2455         struct rvu_block *block;
2456         int slot, lf, num_lfs;
2457         int err;
2458
2459         block = &rvu->hw->block[blkaddr];
2460         num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
2461                                         block->addr);
2462         if (!num_lfs)
2463                 return;
2464         for (slot = 0; slot < num_lfs; slot++) {
2465                 lf = rvu_get_lf(rvu, block, pcifunc, slot);
2466                 if (lf < 0)
2467                         continue;
2468
2469                 /* Cleanup LF and reset it */
2470                 if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
2471                         rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
2472                 else if (block->addr == BLKADDR_NPA)
2473                         rvu_npa_lf_teardown(rvu, pcifunc, lf);
2474                 else if ((block->addr == BLKADDR_CPT0) ||
2475                          (block->addr == BLKADDR_CPT1))
2476                         rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot);
2477
2478                 err = rvu_lf_reset(rvu, block, lf);
2479                 if (err) {
2480                         dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
2481                                 block->addr, lf);
2482                 }
2483         }
2484 }
2485
2486 static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
2487 {
2488         mutex_lock(&rvu->flr_lock);
2489         /* Reset order should reflect inter-block dependencies:
2490          * 1. Reset any packet/work sources (NIX, CPT, TIM)
2491          * 2. Flush and reset SSO/SSOW
2492          * 3. Cleanup pools (NPA)
2493          */
2494         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
2495         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
2496         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
2497         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
2498         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
2499         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
2500         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
2501         rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
2502         rvu_reset_lmt_map_tbl(rvu, pcifunc);
2503         rvu_detach_rsrcs(rvu, NULL, pcifunc);
2504         mutex_unlock(&rvu->flr_lock);
2505 }
2506
2507 static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
2508 {
2509         int reg = 0;
2510
2511         /* pcifunc = 0(PF0) | (vf + 1) */
2512         __rvu_flr_handler(rvu, vf + 1);
2513
2514         if (vf >= 64) {
2515                 reg = 1;
2516                 vf = vf - 64;
2517         }
2518
2519         /* Signal FLR finish and enable IRQ */
2520         rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
2521         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
2522 }
2523
2524 static void rvu_flr_handler(struct work_struct *work)
2525 {
2526         struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
2527         struct rvu *rvu = flrwork->rvu;
2528         u16 pcifunc, numvfs, vf;
2529         u64 cfg;
2530         int pf;
2531
2532         pf = flrwork - rvu->flr_wrk;
2533         if (pf >= rvu->hw->total_pfs) {
2534                 rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
2535                 return;
2536         }
2537
2538         cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2539         numvfs = (cfg >> 12) & 0xFF;
2540         pcifunc  = pf << RVU_PFVF_PF_SHIFT;
2541
2542         for (vf = 0; vf < numvfs; vf++)
2543                 __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
2544
2545         __rvu_flr_handler(rvu, pcifunc);
2546
2547         /* Signal FLR finish */
2548         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
2549
2550         /* Enable interrupt */
2551         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,  BIT_ULL(pf));
2552 }
2553
2554 static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
2555 {
2556         int dev, vf, reg = 0;
2557         u64 intr;
2558
2559         if (start_vf >= 64)
2560                 reg = 1;
2561
2562         intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
2563         if (!intr)
2564                 return;
2565
2566         for (vf = 0; vf < numvfs; vf++) {
2567                 if (!(intr & BIT_ULL(vf)))
2568                         continue;
2569                 /* Clear and disable the interrupt */
2570                 rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
2571                 rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
2572
2573                 dev = vf + start_vf + rvu->hw->total_pfs;
2574                 queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
2575         }
2576 }
2577
2578 static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
2579 {
2580         struct rvu *rvu = (struct rvu *)rvu_irq;
2581         u64 intr;
2582         u8  pf;
2583
2584         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
2585         if (!intr)
2586                 goto afvf_flr;
2587
2588         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2589                 if (intr & (1ULL << pf)) {
2590                         /* clear interrupt */
2591                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
2592                                     BIT_ULL(pf));
2593                         /* Disable the interrupt */
2594                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2595                                     BIT_ULL(pf));
2596                         /* PF is already dead do only AF related operations */
2597                         queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
2598                 }
2599         }
2600
2601 afvf_flr:
2602         rvu_afvf_queue_flr_work(rvu, 0, 64);
2603         if (rvu->vfs > 64)
2604                 rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
2605
2606         return IRQ_HANDLED;
2607 }
2608
2609 static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
2610 {
2611         int vf;
2612
2613         /* Nothing to be done here other than clearing the
2614          * TRPEND bit.
2615          */
2616         for (vf = 0; vf < 64; vf++) {
2617                 if (intr & (1ULL << vf)) {
2618                         /* clear the trpend due to ME(master enable) */
2619                         rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
2620                         /* clear interrupt */
2621                         rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
2622                 }
2623         }
2624 }
2625
2626 /* Handles ME interrupts from VFs of AF */
2627 static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
2628 {
2629         struct rvu *rvu = (struct rvu *)rvu_irq;
2630         int vfset;
2631         u64 intr;
2632
2633         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2634
2635         for (vfset = 0; vfset <= 1; vfset++) {
2636                 intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
2637                 if (intr)
2638                         rvu_me_handle_vfset(rvu, vfset, intr);
2639         }
2640
2641         return IRQ_HANDLED;
2642 }
2643
2644 /* Handles ME interrupts from PFs */
2645 static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
2646 {
2647         struct rvu *rvu = (struct rvu *)rvu_irq;
2648         u64 intr;
2649         u8  pf;
2650
2651         intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
2652
2653         /* Nothing to be done here other than clearing the
2654          * TRPEND bit.
2655          */
2656         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2657                 if (intr & (1ULL << pf)) {
2658                         /* clear the trpend due to ME(master enable) */
2659                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
2660                                     BIT_ULL(pf));
2661                         /* clear interrupt */
2662                         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
2663                                     BIT_ULL(pf));
2664                 }
2665         }
2666
2667         return IRQ_HANDLED;
2668 }
2669
2670 static void rvu_unregister_interrupts(struct rvu *rvu)
2671 {
2672         int irq;
2673
2674         /* Disable the Mbox interrupt */
2675         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
2676                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2677
2678         /* Disable the PF FLR interrupt */
2679         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
2680                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2681
2682         /* Disable the PF ME interrupt */
2683         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
2684                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2685
2686         for (irq = 0; irq < rvu->num_vec; irq++) {
2687                 if (rvu->irq_allocated[irq]) {
2688                         free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
2689                         rvu->irq_allocated[irq] = false;
2690                 }
2691         }
2692
2693         pci_free_irq_vectors(rvu->pdev);
2694         rvu->num_vec = 0;
2695 }
2696
2697 static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
2698 {
2699         struct rvu_pfvf *pfvf = &rvu->pf[0];
2700         int offset;
2701
2702         pfvf = &rvu->pf[0];
2703         offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2704
2705         /* Make sure there are enough MSIX vectors configured so that
2706          * VF interrupts can be handled. Offset equal to zero means
2707          * that PF vectors are not configured and overlapping AF vectors.
2708          */
2709         return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
2710                offset;
2711 }
2712
2713 static int rvu_register_interrupts(struct rvu *rvu)
2714 {
2715         int ret, offset, pf_vec_start;
2716
2717         rvu->num_vec = pci_msix_vec_count(rvu->pdev);
2718
2719         rvu->irq_name = devm_kmalloc_array(rvu->dev, rvu->num_vec,
2720                                            NAME_SIZE, GFP_KERNEL);
2721         if (!rvu->irq_name)
2722                 return -ENOMEM;
2723
2724         rvu->irq_allocated = devm_kcalloc(rvu->dev, rvu->num_vec,
2725                                           sizeof(bool), GFP_KERNEL);
2726         if (!rvu->irq_allocated)
2727                 return -ENOMEM;
2728
2729         /* Enable MSI-X */
2730         ret = pci_alloc_irq_vectors(rvu->pdev, rvu->num_vec,
2731                                     rvu->num_vec, PCI_IRQ_MSIX);
2732         if (ret < 0) {
2733                 dev_err(rvu->dev,
2734                         "RVUAF: Request for %d msix vectors failed, ret %d\n",
2735                         rvu->num_vec, ret);
2736                 return ret;
2737         }
2738
2739         /* Register mailbox interrupt handler */
2740         sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
2741         ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
2742                           rvu_mbox_intr_handler, 0,
2743                           &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
2744         if (ret) {
2745                 dev_err(rvu->dev,
2746                         "RVUAF: IRQ registration failed for mbox irq\n");
2747                 goto fail;
2748         }
2749
2750         rvu->irq_allocated[RVU_AF_INT_VEC_MBOX] = true;
2751
2752         /* Enable mailbox interrupts from all PFs */
2753         rvu_enable_mbox_intr(rvu);
2754
2755         /* Register FLR interrupt handler */
2756         sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2757                 "RVUAF FLR");
2758         ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
2759                           rvu_flr_intr_handler, 0,
2760                           &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
2761                           rvu);
2762         if (ret) {
2763                 dev_err(rvu->dev,
2764                         "RVUAF: IRQ registration failed for FLR\n");
2765                 goto fail;
2766         }
2767         rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
2768
2769         /* Enable FLR interrupt for all PFs*/
2770         rvu_write64(rvu, BLKADDR_RVUM,
2771                     RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
2772
2773         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
2774                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2775
2776         /* Register ME interrupt handler */
2777         sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2778                 "RVUAF ME");
2779         ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
2780                           rvu_me_pf_intr_handler, 0,
2781                           &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
2782                           rvu);
2783         if (ret) {
2784                 dev_err(rvu->dev,
2785                         "RVUAF: IRQ registration failed for ME\n");
2786         }
2787         rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
2788
2789         /* Clear TRPEND bit for all PF */
2790         rvu_write64(rvu, BLKADDR_RVUM,
2791                     RVU_AF_PFTRPEND, INTR_MASK(rvu->hw->total_pfs));
2792         /* Enable ME interrupt for all PFs*/
2793         rvu_write64(rvu, BLKADDR_RVUM,
2794                     RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
2795
2796         rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
2797                     INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
2798
2799         if (!rvu_afvf_msix_vectors_num_ok(rvu))
2800                 return 0;
2801
2802         /* Get PF MSIX vectors offset. */
2803         pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
2804                                   RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
2805
2806         /* Register MBOX0 interrupt. */
2807         offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
2808         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
2809         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2810                           rvu_mbox_intr_handler, 0,
2811                           &rvu->irq_name[offset * NAME_SIZE],
2812                           rvu);
2813         if (ret)
2814                 dev_err(rvu->dev,
2815                         "RVUAF: IRQ registration failed for Mbox0\n");
2816
2817         rvu->irq_allocated[offset] = true;
2818
2819         /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
2820          * simply increment current offset by 1.
2821          */
2822         offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
2823         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
2824         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2825                           rvu_mbox_intr_handler, 0,
2826                           &rvu->irq_name[offset * NAME_SIZE],
2827                           rvu);
2828         if (ret)
2829                 dev_err(rvu->dev,
2830                         "RVUAF: IRQ registration failed for Mbox1\n");
2831
2832         rvu->irq_allocated[offset] = true;
2833
2834         /* Register FLR interrupt handler for AF's VFs */
2835         offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
2836         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
2837         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2838                           rvu_flr_intr_handler, 0,
2839                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2840         if (ret) {
2841                 dev_err(rvu->dev,
2842                         "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
2843                 goto fail;
2844         }
2845         rvu->irq_allocated[offset] = true;
2846
2847         offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
2848         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
2849         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2850                           rvu_flr_intr_handler, 0,
2851                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2852         if (ret) {
2853                 dev_err(rvu->dev,
2854                         "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
2855                 goto fail;
2856         }
2857         rvu->irq_allocated[offset] = true;
2858
2859         /* Register ME interrupt handler for AF's VFs */
2860         offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
2861         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
2862         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2863                           rvu_me_vf_intr_handler, 0,
2864                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2865         if (ret) {
2866                 dev_err(rvu->dev,
2867                         "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
2868                 goto fail;
2869         }
2870         rvu->irq_allocated[offset] = true;
2871
2872         offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
2873         sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
2874         ret = request_irq(pci_irq_vector(rvu->pdev, offset),
2875                           rvu_me_vf_intr_handler, 0,
2876                           &rvu->irq_name[offset * NAME_SIZE], rvu);
2877         if (ret) {
2878                 dev_err(rvu->dev,
2879                         "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
2880                 goto fail;
2881         }
2882         rvu->irq_allocated[offset] = true;
2883         return 0;
2884
2885 fail:
2886         rvu_unregister_interrupts(rvu);
2887         return ret;
2888 }
2889
2890 static void rvu_flr_wq_destroy(struct rvu *rvu)
2891 {
2892         if (rvu->flr_wq) {
2893                 flush_workqueue(rvu->flr_wq);
2894                 destroy_workqueue(rvu->flr_wq);
2895                 rvu->flr_wq = NULL;
2896         }
2897 }
2898
2899 static int rvu_flr_init(struct rvu *rvu)
2900 {
2901         int dev, num_devs;
2902         u64 cfg;
2903         int pf;
2904
2905         /* Enable FLR for all PFs*/
2906         for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2907                 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2908                 rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
2909                             cfg | BIT_ULL(22));
2910         }
2911
2912         rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
2913                                       WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
2914                                        1);
2915         if (!rvu->flr_wq)
2916                 return -ENOMEM;
2917
2918         num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
2919         rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
2920                                     sizeof(struct rvu_work), GFP_KERNEL);
2921         if (!rvu->flr_wrk) {
2922                 destroy_workqueue(rvu->flr_wq);
2923                 return -ENOMEM;
2924         }
2925
2926         for (dev = 0; dev < num_devs; dev++) {
2927                 rvu->flr_wrk[dev].rvu = rvu;
2928                 INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
2929         }
2930
2931         mutex_init(&rvu->flr_lock);
2932
2933         return 0;
2934 }
2935
2936 static void rvu_disable_afvf_intr(struct rvu *rvu)
2937 {
2938         int vfs = rvu->vfs;
2939
2940         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
2941         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
2942         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
2943         if (vfs <= 64)
2944                 return;
2945
2946         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
2947                       INTR_MASK(vfs - 64));
2948         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2949         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
2950 }
2951
2952 static void rvu_enable_afvf_intr(struct rvu *rvu)
2953 {
2954         int vfs = rvu->vfs;
2955
2956         /* Clear any pending interrupts and enable AF VF interrupts for
2957          * the first 64 VFs.
2958          */
2959         /* Mbox */
2960         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
2961         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
2962
2963         /* FLR */
2964         rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
2965         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
2966         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
2967
2968         /* Same for remaining VFs, if any. */
2969         if (vfs <= 64)
2970                 return;
2971
2972         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
2973         rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
2974                       INTR_MASK(vfs - 64));
2975
2976         rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
2977         rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2978         rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
2979 }
2980
2981 int rvu_get_num_lbk_chans(void)
2982 {
2983         struct pci_dev *pdev;
2984         void __iomem *base;
2985         int ret = -EIO;
2986
2987         pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
2988                               NULL);
2989         if (!pdev)
2990                 goto err;
2991
2992         base = pci_ioremap_bar(pdev, 0);
2993         if (!base)
2994                 goto err_put;
2995
2996         /* Read number of available LBK channels from LBK(0)_CONST register. */
2997         ret = (readq(base + 0x10) >> 32) & 0xffff;
2998         iounmap(base);
2999 err_put:
3000         pci_dev_put(pdev);
3001 err:
3002         return ret;
3003 }
3004
3005 static int rvu_enable_sriov(struct rvu *rvu)
3006 {
3007         struct pci_dev *pdev = rvu->pdev;
3008         int err, chans, vfs;
3009
3010         if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
3011                 dev_warn(&pdev->dev,
3012                          "Skipping SRIOV enablement since not enough IRQs are available\n");
3013                 return 0;
3014         }
3015
3016         chans = rvu_get_num_lbk_chans();
3017         if (chans < 0)
3018                 return chans;
3019
3020         vfs = pci_sriov_get_totalvfs(pdev);
3021
3022         /* Limit VFs in case we have more VFs than LBK channels available. */
3023         if (vfs > chans)
3024                 vfs = chans;
3025
3026         if (!vfs)
3027                 return 0;
3028
3029         /* LBK channel number 63 is used for switching packets between
3030          * CGX mapped VFs. Hence limit LBK pairs till 62 only.
3031          */
3032         if (vfs > 62)
3033                 vfs = 62;
3034
3035         /* Save VFs number for reference in VF interrupts handlers.
3036          * Since interrupts might start arriving during SRIOV enablement
3037          * ordinary API cannot be used to get number of enabled VFs.
3038          */
3039         rvu->vfs = vfs;
3040
3041         err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
3042                             rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
3043         if (err)
3044                 return err;
3045
3046         rvu_enable_afvf_intr(rvu);
3047         /* Make sure IRQs are enabled before SRIOV. */
3048         mb();
3049
3050         err = pci_enable_sriov(pdev, vfs);
3051         if (err) {
3052                 rvu_disable_afvf_intr(rvu);
3053                 rvu_mbox_destroy(&rvu->afvf_wq_info);
3054                 return err;
3055         }
3056
3057         return 0;
3058 }
3059
3060 static void rvu_disable_sriov(struct rvu *rvu)
3061 {
3062         rvu_disable_afvf_intr(rvu);
3063         rvu_mbox_destroy(&rvu->afvf_wq_info);
3064         pci_disable_sriov(rvu->pdev);
3065 }
3066
3067 static void rvu_update_module_params(struct rvu *rvu)
3068 {
3069         const char *default_pfl_name = "default";
3070
3071         strscpy(rvu->mkex_pfl_name,
3072                 mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
3073         strscpy(rvu->kpu_pfl_name,
3074                 kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
3075 }
3076
3077 static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3078 {
3079         struct device *dev = &pdev->dev;
3080         struct rvu *rvu;
3081         int    err;
3082
3083         rvu = devm_kzalloc(dev, sizeof(*rvu), GFP_KERNEL);
3084         if (!rvu)
3085                 return -ENOMEM;
3086
3087         rvu->hw = devm_kzalloc(dev, sizeof(struct rvu_hwinfo), GFP_KERNEL);
3088         if (!rvu->hw) {
3089                 devm_kfree(dev, rvu);
3090                 return -ENOMEM;
3091         }
3092
3093         pci_set_drvdata(pdev, rvu);
3094         rvu->pdev = pdev;
3095         rvu->dev = &pdev->dev;
3096
3097         err = pci_enable_device(pdev);
3098         if (err) {
3099                 dev_err(dev, "Failed to enable PCI device\n");
3100                 goto err_freemem;
3101         }
3102
3103         err = pci_request_regions(pdev, DRV_NAME);
3104         if (err) {
3105                 dev_err(dev, "PCI request regions failed 0x%x\n", err);
3106                 goto err_disable_device;
3107         }
3108
3109         err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
3110         if (err) {
3111                 dev_err(dev, "DMA mask config failed, abort\n");
3112                 goto err_release_regions;
3113         }
3114
3115         pci_set_master(pdev);
3116
3117         rvu->ptp = ptp_get();
3118         if (IS_ERR(rvu->ptp)) {
3119                 err = PTR_ERR(rvu->ptp);
3120                 if (err == -EPROBE_DEFER)
3121                         goto err_release_regions;
3122                 rvu->ptp = NULL;
3123         }
3124
3125         /* Map Admin function CSRs */
3126         rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
3127         rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
3128         if (!rvu->afreg_base || !rvu->pfreg_base) {
3129                 dev_err(dev, "Unable to map admin function CSRs, aborting\n");
3130                 err = -ENOMEM;
3131                 goto err_put_ptp;
3132         }
3133
3134         /* Store module params in rvu structure */
3135         rvu_update_module_params(rvu);
3136
3137         /* Check which blocks the HW supports */
3138         rvu_check_block_implemented(rvu);
3139
3140         rvu_reset_all_blocks(rvu);
3141
3142         rvu_setup_hw_capabilities(rvu);
3143
3144         err = rvu_setup_hw_resources(rvu);
3145         if (err)
3146                 goto err_put_ptp;
3147
3148         /* Init mailbox btw AF and PFs */
3149         err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
3150                             rvu->hw->total_pfs, rvu_afpf_mbox_handler,
3151                             rvu_afpf_mbox_up_handler);
3152         if (err) {
3153                 dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
3154                 goto err_hwsetup;
3155         }
3156
3157         err = rvu_flr_init(rvu);
3158         if (err) {
3159                 dev_err(dev, "%s: Failed to initialize flr\n", __func__);
3160                 goto err_mbox;
3161         }
3162
3163         err = rvu_register_interrupts(rvu);
3164         if (err) {
3165                 dev_err(dev, "%s: Failed to register interrupts\n", __func__);
3166                 goto err_flr;
3167         }
3168
3169         err = rvu_register_dl(rvu);
3170         if (err) {
3171                 dev_err(dev, "%s: Failed to register devlink\n", __func__);
3172                 goto err_irq;
3173         }
3174
3175         rvu_setup_rvum_blk_revid(rvu);
3176
3177         /* Enable AF's VFs (if any) */
3178         err = rvu_enable_sriov(rvu);
3179         if (err) {
3180                 dev_err(dev, "%s: Failed to enable sriov\n", __func__);
3181                 goto err_dl;
3182         }
3183
3184         /* Initialize debugfs */
3185         rvu_dbg_init(rvu);
3186
3187         mutex_init(&rvu->rswitch.switch_lock);
3188
3189         return 0;
3190 err_dl:
3191         rvu_unregister_dl(rvu);
3192 err_irq:
3193         rvu_unregister_interrupts(rvu);
3194 err_flr:
3195         rvu_flr_wq_destroy(rvu);
3196 err_mbox:
3197         rvu_mbox_destroy(&rvu->afpf_wq_info);
3198 err_hwsetup:
3199         rvu_cgx_exit(rvu);
3200         rvu_fwdata_exit(rvu);
3201         rvu_reset_all_blocks(rvu);
3202         rvu_free_hw_resources(rvu);
3203         rvu_clear_rvum_blk_revid(rvu);
3204 err_put_ptp:
3205         ptp_put(rvu->ptp);
3206 err_release_regions:
3207         pci_release_regions(pdev);
3208 err_disable_device:
3209         pci_disable_device(pdev);
3210 err_freemem:
3211         pci_set_drvdata(pdev, NULL);
3212         devm_kfree(&pdev->dev, rvu->hw);
3213         devm_kfree(dev, rvu);
3214         return err;
3215 }
3216
3217 static void rvu_remove(struct pci_dev *pdev)
3218 {
3219         struct rvu *rvu = pci_get_drvdata(pdev);
3220
3221         rvu_dbg_exit(rvu);
3222         rvu_unregister_dl(rvu);
3223         rvu_unregister_interrupts(rvu);
3224         rvu_flr_wq_destroy(rvu);
3225         rvu_cgx_exit(rvu);
3226         rvu_fwdata_exit(rvu);
3227         rvu_mbox_destroy(&rvu->afpf_wq_info);
3228         rvu_disable_sriov(rvu);
3229         rvu_reset_all_blocks(rvu);
3230         rvu_free_hw_resources(rvu);
3231         rvu_clear_rvum_blk_revid(rvu);
3232         ptp_put(rvu->ptp);
3233         pci_release_regions(pdev);
3234         pci_disable_device(pdev);
3235         pci_set_drvdata(pdev, NULL);
3236
3237         devm_kfree(&pdev->dev, rvu->hw);
3238         devm_kfree(&pdev->dev, rvu);
3239 }
3240
3241 static struct pci_driver rvu_driver = {
3242         .name = DRV_NAME,
3243         .id_table = rvu_id_table,
3244         .probe = rvu_probe,
3245         .remove = rvu_remove,
3246 };
3247
3248 static int __init rvu_init_module(void)
3249 {
3250         int err;
3251
3252         pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
3253
3254         err = pci_register_driver(&cgx_driver);
3255         if (err < 0)
3256                 return err;
3257
3258         err = pci_register_driver(&ptp_driver);
3259         if (err < 0)
3260                 goto ptp_err;
3261
3262         err =  pci_register_driver(&rvu_driver);
3263         if (err < 0)
3264                 goto rvu_err;
3265
3266         return 0;
3267 rvu_err:
3268         pci_unregister_driver(&ptp_driver);
3269 ptp_err:
3270         pci_unregister_driver(&cgx_driver);
3271
3272         return err;
3273 }
3274
3275 static void __exit rvu_cleanup_module(void)
3276 {
3277         pci_unregister_driver(&rvu_driver);
3278         pci_unregister_driver(&ptp_driver);
3279         pci_unregister_driver(&cgx_driver);
3280 }
3281
3282 module_init(rvu_init_module);
3283 module_exit(rvu_cleanup_module);