1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RPM CN10K driver
4 * Copyright (C) 2020 Marvell.
7 #include <linux/bitfield.h>
13 int rvu_set_channels_base(struct rvu *rvu)
15 struct rvu_hwinfo *hw = rvu->hw;
20 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
24 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
26 hw->cgx = (nix_const >> 12) & 0xFULL;
27 hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
28 hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
29 hw->lbk_links = (nix_const >> 24) & 0xFULL;
30 hw->cpt_links = (nix_const >> 44) & 0xFULL;
33 hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
34 hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
35 hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
37 /* No Programmable channels */
38 if (!(nix_const & BIT_ULL(60)))
41 hw->cap.programmable_chans = true;
43 /* If programmable channels are present then configure
44 * channels such that all channel numbers are contiguous
45 * leaving no holes. This way the new CPT channels can be
46 * accomodated. The order of channel numbers assigned is
47 * LBK, SDP, CGX and CPT.
49 hw->sdp_chan_base = hw->lbk_chan_base + hw->lbk_links *
50 ((nix_const >> 16) & 0xFFULL);
51 hw->cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * SDP_CHANNELS;
53 cpt_chan_base = hw->cgx_chan_base + hw->cgx_links *
54 (nix_const & 0xFFULL);
56 /* Out of 4096 channels start CPT from 2048 so
57 * that MSB for CPT channels is always set
59 if (cpt_chan_base <= 0x800) {
60 hw->cpt_chan_base = 0x800;
63 "CPT channels could not fit in the range 2048-4095\n");
70 #define LBK_CONNECT_NIXX(a) (0x0 + (a))
72 static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
73 u64 offset, int lbkid, u16 chans)
75 struct rvu_hwinfo *hw = rvu->hw;
78 cfg = readq(base + offset);
79 cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
80 LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
81 cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
82 cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
83 cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
85 writeq(cfg, base + offset);
88 static void rvu_lbk_set_channels(struct rvu *rvu)
90 struct pci_dev *pdev = NULL;
96 /* To loopback packets between multiple NIX blocks
97 * mutliple LBK blocks are needed. With two NIX blocks,
98 * four LBK blocks are needed and each LBK block
99 * source and destination are as follows:
100 * LBK0 - source NIX0 and destination NIX1
101 * LBK1 - source NIX0 and destination NIX1
102 * LBK2 - source NIX1 and destination NIX0
103 * LBK3 - source NIX1 and destination NIX1
104 * As per the HRM channel numbers should be programmed as:
105 * P2X and X2P of LBK0 as same
106 * P2X and X2P of LBK3 as same
107 * P2X of LBK1 and X2P of LBK2 as same
108 * P2X of LBK2 and X2P of LBK1 as same
111 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
112 PCI_DEVID_OCTEONTX2_LBK, pdev);
116 base = pci_ioremap_bar(pdev, 0);
120 lbk_const = readq(base + LBK_CONST);
121 chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
122 dst = FIELD_GET(LBK_CONST_DST, lbk_const);
123 src = FIELD_GET(LBK_CONST_SRC, lbk_const);
126 if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
127 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
129 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
131 } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
132 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
134 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
138 if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
139 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
141 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
143 } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
144 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
146 __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
156 static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
158 u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
159 u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
160 struct rvu_hwinfo *hw = rvu->hw;
161 int link, nix_link = 0;
165 cgx_chans = nix_const & 0xFFULL;
166 lbk_chans = (nix_const >> 16) & 0xFFULL;
167 sdp_chans = SDP_CHANNELS;
168 cpt_chans = (nix_const >> 32) & 0xFFFULL;
170 start = hw->cgx_chan_base;
171 for (link = 0; link < hw->cgx_links; link++, nix_link++) {
172 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
173 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
174 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
175 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
176 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
180 start = hw->lbk_chan_base;
181 for (link = 0; link < hw->lbk_links; link++, nix_link++) {
182 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
183 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
184 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
185 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
186 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
190 start = hw->sdp_chan_base;
191 for (link = 0; link < hw->sdp_links; link++, nix_link++) {
192 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
193 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
194 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
195 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
196 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
200 start = hw->cpt_chan_base;
201 for (link = 0; link < hw->cpt_links; link++, nix_link++) {
202 cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
203 cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
204 cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
205 cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
206 rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
211 static void rvu_nix_set_channels(struct rvu *rvu)
215 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
217 __rvu_nix_set_channels(rvu, blkaddr);
218 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
222 static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
226 cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
227 cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
229 /* There is no read-only constant register to read
230 * the number of channels for LMAC and it is always 16.
232 cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
233 cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
234 cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
237 static void rvu_rpm_set_channels(struct rvu *rvu)
239 struct rvu_hwinfo *hw = rvu->hw;
240 u16 base = hw->cgx_chan_base;
243 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
244 for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
245 __rvu_rpm_set_channels(cgx, lmac, base);
251 void rvu_program_channels(struct rvu *rvu)
253 struct rvu_hwinfo *hw = rvu->hw;
255 if (!hw->cap.programmable_chans)
258 rvu_nix_set_channels(rvu);
259 rvu_lbk_set_channels(rvu);
260 rvu_rpm_set_channels(rvu);