1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RPM CN10K driver
4 * Copyright (C) 2020 Marvell.
7 #include <linux/bitfield.h>
14 #define LMT_TBL_OP_READ 0
15 #define LMT_TBL_OP_WRITE 1
16 #define LMT_MAP_TABLE_SIZE (128 * 1024)
17 #define LMT_MAPTBL_ENTRY_SIZE 16
19 /* Function to perform operations (read/write) on lmtst map table */
20 static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
23 void __iomem *lmt_map_base;
26 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
28 lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
30 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
34 if (lmt_tbl_op == LMT_TBL_OP_READ) {
35 *val = readq(lmt_map_base + index);
37 writeq((*val), (lmt_map_base + index));
38 /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
39 * changes effective. Write 1 for flush and read is being used as a
40 * barrier and sets up a data dependency. Write to 0 after a write
41 * to 1 to complete the flush.
43 rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
44 rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
45 rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
48 iounmap(lmt_map_base);
52 #define LMT_MAP_TBL_W1_OFF 8
53 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
55 return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
56 (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
59 static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
60 u64 iova, u64 *lmt_addr)
66 dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
70 mutex_lock(&rvu->rsrc_lock);
71 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
72 pf = rvu_get_pf(pcifunc) & 0x1F;
73 val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
74 ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
75 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
77 err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
79 dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
82 val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
84 dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
88 /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
89 * PA[11:0] = IOVA[11:0]
91 pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
92 pa &= GENMASK_ULL(39, 0);
93 *lmt_addr = (pa << 12) | (iova & 0xFFF);
95 mutex_unlock(&rvu->rsrc_lock);
99 static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
101 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
106 /* Read the current lmt addr of pcifunc */
107 tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
108 err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
111 "Failed to read LMT map table: index 0x%x err %d\n",
116 /* Storing the seondary's lmt base address as this needs to be
117 * reverted in FLR. Also making sure this default value doesn't
118 * get overwritten on multiple calls to this mailbox.
120 if (!pfvf->lmt_base_addr)
121 pfvf->lmt_base_addr = val;
123 /* Update the LMT table with new addr */
124 err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
127 "Failed to update LMT map table: index 0x%x err %d\n",
134 int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
135 struct lmtst_tbl_setup_req *req,
138 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
139 u32 pri_tbl_idx, tbl_idx;
144 /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
145 * region, if so, convert that IOVA to physical address and
146 * populate LMT table with that address
148 if (req->use_local_lmt_region) {
149 err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
150 req->lmt_iova, &lmt_addr);
154 /* Update the lmt addr for this PFFUNC in the LMT table */
155 err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
160 /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
161 * multiple PF_FUNCs to share an LMTLINE region, so primary/base
162 * pcifunc (which is passed as an argument to mailbox) is the one
163 * whose lmt base address will be shared among other secondary
164 * pcifunc (will be the one who is calling this mailbox).
166 if (req->base_pcifunc) {
167 /* Calculating the LMT table index equivalent to primary
170 pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
172 /* Read the base lmt addr of the primary pcifunc */
173 err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
177 "Failed to read LMT map table: index 0x%x err %d\n",
182 /* Update the base lmt addr of secondary with primary's base
185 err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
190 /* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S
191 * like enabling scheduled LMTST, disable LMTLINE prefetch, disable
192 * early completion for ordered LMTST.
194 if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) {
195 tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc);
196 err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
197 &val, LMT_TBL_OP_READ);
200 "Failed to read LMT map table: index 0x%x err %d\n",
201 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
205 /* Storing lmt map table entry word1 default value as this needs
206 * to be reverted in FLR. Also making sure this default value
207 * doesn't get overwritten on multiple calls to this mailbox.
209 if (!pfvf->lmt_map_ent_w1)
210 pfvf->lmt_map_ent_w1 = val;
212 /* Disable early completion for Ordered LMTSTs. */
213 if (req->dis_sched_early_comp)
214 val |= (req->dis_sched_early_comp <<
215 APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT);
216 /* Enable scheduled LMTST */
218 val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) |
220 /* Disables LMTLINE prefetch before receiving store data. */
221 if (req->dis_line_pref)
222 val |= (req->dis_line_pref <<
223 APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT);
225 err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
226 &val, LMT_TBL_OP_WRITE);
229 "Failed to update LMT map table: index 0x%x err %d\n",
230 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
239 /* Resetting the lmtst map table to original base addresses */
240 void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
242 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
246 if (is_rvu_otx2(rvu))
249 if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) {
250 /* This corresponds to lmt map table index */
251 tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
252 /* Reverting back original lmt base addr for respective
255 if (pfvf->lmt_base_addr) {
256 err = lmtst_map_table_ops(rvu, tbl_idx,
257 &pfvf->lmt_base_addr,
261 "Failed to update LMT map table: index 0x%x err %d\n",
263 pfvf->lmt_base_addr = 0;
265 /* Reverting back to orginal word1 val of lmtst map table entry
266 * which underwent changes.
268 if (pfvf->lmt_map_ent_w1) {
269 err = lmtst_map_table_ops(rvu,
270 tbl_idx + LMT_MAP_TBL_W1_OFF,
271 &pfvf->lmt_map_ent_w1,
275 "Failed to update LMT map table: index 0x%x err %d\n",
276 tbl_idx + LMT_MAP_TBL_W1_OFF, err);
277 pfvf->lmt_map_ent_w1 = 0;
282 int rvu_set_channels_base(struct rvu *rvu)
284 u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
285 u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
286 struct rvu_hwinfo *hw = rvu->hw;
287 u64 nix_const, nix_const1;
290 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
294 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
295 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
297 hw->cgx = (nix_const >> 12) & 0xFULL;
298 hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
299 hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
300 hw->lbk_links = (nix_const >> 24) & 0xFULL;
301 hw->cpt_links = (nix_const >> 44) & 0xFULL;
304 hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
305 hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
306 hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
308 /* No Programmable channels */
309 if (!(nix_const & BIT_ULL(60)))
312 hw->cap.programmable_chans = true;
314 /* If programmable channels are present then configure
315 * channels such that all channel numbers are contiguous
316 * leaving no holes. This way the new CPT channels can be
317 * accomodated. The order of channel numbers assigned is
318 * LBK, SDP, CGX and CPT. Also the base channel number
319 * of a block must be multiple of number of channels
322 nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
323 nr_sdp_chans = nix_const1 & 0xFFFULL;
324 nr_cgx_chans = nix_const & 0xFFULL;
325 nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
327 sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
328 /* Round up base channel to multiple of number of channels */
329 hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
331 cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
332 hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
334 cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
335 hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
337 /* Out of 4096 channels start CPT from 2048 so
338 * that MSB for CPT channels is always set
340 if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
341 hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
344 "CPT channels could not fit in the range 2048-4095\n");
351 #define LBK_CONNECT_NIXX(a) (0x0 + (a))
353 static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
354 u64 offset, int lbkid, u16 chans)
356 struct rvu_hwinfo *hw = rvu->hw;
359 cfg = readq(base + offset);
360 cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
361 LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
362 cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
363 cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
364 cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
366 writeq(cfg, base + offset);
369 static void rvu_lbk_set_channels(struct rvu *rvu)
371 struct pci_dev *pdev = NULL;
377 /* To loopback packets between multiple NIX blocks
378 * mutliple LBK blocks are needed. With two NIX blocks,
379 * four LBK blocks are needed and each LBK block
380 * source and destination are as follows:
381 * LBK0 - source NIX0 and destination NIX1
382 * LBK1 - source NIX0 and destination NIX1
383 * LBK2 - source NIX1 and destination NIX0
384 * LBK3 - source NIX1 and destination NIX1
385 * As per the HRM channel numbers should be programmed as:
386 * P2X and X2P of LBK0 as same
387 * P2X and X2P of LBK3 as same
388 * P2X of LBK1 and X2P of LBK2 as same
389 * P2X of LBK2 and X2P of LBK1 as same
392 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
393 PCI_DEVID_OCTEONTX2_LBK, pdev);
397 base = pci_ioremap_bar(pdev, 0);
401 lbk_const = readq(base + LBK_CONST);
402 chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
403 dst = FIELD_GET(LBK_CONST_DST, lbk_const);
404 src = FIELD_GET(LBK_CONST_SRC, lbk_const);
407 if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
408 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
410 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
412 } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
413 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
415 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
419 if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
420 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
422 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
424 } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
425 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
427 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
437 static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
439 u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
440 u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
441 u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
442 struct rvu_hwinfo *hw = rvu->hw;
443 int link, nix_link = 0;
447 cgx_chans = nix_const & 0xFFULL;
448 lbk_chans = (nix_const >> 16) & 0xFFULL;
449 sdp_chans = nix_const1 & 0xFFFULL;
450 cpt_chans = (nix_const >> 32) & 0xFFFULL;
452 start = hw->cgx_chan_base;
453 for (link = 0; link < hw->cgx_links; link++, nix_link++) {
454 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
455 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
456 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
457 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
458 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
462 start = hw->lbk_chan_base;
463 for (link = 0; link < hw->lbk_links; link++, nix_link++) {
464 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
465 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
466 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
467 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
468 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
472 start = hw->sdp_chan_base;
473 for (link = 0; link < hw->sdp_links; link++, nix_link++) {
474 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
475 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
476 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
477 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
478 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
482 start = hw->cpt_chan_base;
483 for (link = 0; link < hw->cpt_links; link++, nix_link++) {
484 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
485 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
486 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
487 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
488 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
493 static void rvu_nix_set_channels(struct rvu *rvu)
497 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
499 __rvu_nix_set_channels(rvu, blkaddr);
500 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
504 static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
508 cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
509 cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
511 /* There is no read-only constant register to read
512 * the number of channels for LMAC and it is always 16.
514 cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
515 cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
516 cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
519 static void rvu_rpm_set_channels(struct rvu *rvu)
521 struct rvu_hwinfo *hw = rvu->hw;
522 u16 base = hw->cgx_chan_base;
525 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
526 for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
527 __rvu_rpm_set_channels(cgx, lmac, base);
533 void rvu_program_channels(struct rvu *rvu)
535 struct rvu_hwinfo *hw = rvu->hw;
537 if (!hw->cap.programmable_chans)
540 rvu_nix_set_channels(rvu);
541 rvu_lbk_set_channels(rvu);
542 rvu_rpm_set_channels(rvu);
545 void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
547 int blkaddr = nix_hw->blkaddr;
550 /* Set AF vWQE timer interval to a LF configurable range of
553 rvu_write64(rvu, blkaddr, NIX_AF_VWQE_TIMER, 0x3FULL);
555 /* Enable NIX RX stream and global conditional clock to
556 * avoild multiple free of NPA buffers.
558 cfg = rvu_read64(rvu, blkaddr, NIX_AF_CFG);
559 cfg |= BIT_ULL(1) | BIT_ULL(2);
560 rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
563 void rvu_apr_block_cn10k_init(struct rvu *rvu)
567 reg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG);
568 reg |= FIELD_PREP(LMTST_THROTTLE_MASK, LMTST_WR_PEND_MAX);
569 rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CFG, reg);