1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2022 Marvell.
7 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
16 #define DRV_NAME "Marvell MCS Driver"
18 #define PCI_CFG_REG_BAR_NUM 0
20 static const struct pci_device_id mcs_id_table[] = {
21 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
22 { 0, } /* end of table */
25 static LIST_HEAD(mcs_list);
27 void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
31 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id);
32 stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
34 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id);
35 stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
37 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id);
38 stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
40 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id);
41 stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
43 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id);
44 stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
46 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id);
47 stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
49 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id);
50 stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
52 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id);
53 stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
55 reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id);
56 stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg);
58 reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id);
59 stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
61 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id);
62 stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg);
64 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id);
65 stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg);
67 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id);
68 stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg);
71 void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
75 reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id);
76 stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
78 reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id);
79 stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
81 reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id);
82 stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
84 reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id);
85 stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
87 reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id);
88 stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
90 reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id);
91 stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
93 reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id);
94 stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
96 reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id);
97 stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
99 reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id);
100 stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg);
102 reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id);
103 stats->octet_validated_cnt = mcs_reg_read(mcs, reg);
105 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id);
106 stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg);
108 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id);
109 stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg);
111 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id);
112 stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
114 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id);
115 stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
117 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
118 stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
120 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
121 stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
123 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
124 stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
126 if (mcs->hw->mcs_blks > 1) {
127 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id);
128 stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
132 void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
138 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id);
140 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id);
142 stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
145 void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
151 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id);
152 stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
154 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id);
155 stats->parser_err_cnt = mcs_reg_read(mcs, reg);
156 if (mcs->hw->mcs_blks > 1) {
157 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id);
158 stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
161 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id);
162 stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
164 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id);
165 stats->parser_err_cnt = mcs_reg_read(mcs, reg);
167 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id);
168 stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
172 void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
177 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id);
178 stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
180 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id);
181 stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
183 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id);
184 stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
186 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id);
187 stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
189 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id);
190 stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
192 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id);
193 stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
195 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id);
196 stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
200 void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
206 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id);
207 stats->hit_cnt = mcs_reg_read(mcs, reg);
209 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id);
210 stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
212 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id);
213 stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
215 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
216 stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
218 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
219 stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
221 if (mcs->hw->mcs_blks > 1) {
222 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id);
223 stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
225 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id);
226 stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
228 if (mcs->hw->mcs_blks == 1) {
229 reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id);
230 stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
232 reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id);
233 stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
236 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id);
237 stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
239 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id);
240 stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
242 if (mcs->hw->mcs_blks == 1) {
243 reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id);
244 stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
246 reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id);
247 stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
252 void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
254 struct mcs_flowid_stats flowid_st;
255 struct mcs_port_stats port_st;
256 struct mcs_secy_stats secy_st;
257 struct mcs_sc_stats sc_st;
258 struct mcs_sa_stats sa_st;
262 reg = MCSX_CSE_RX_SLAVE_CTRL;
264 reg = MCSX_CSE_TX_SLAVE_CTRL;
266 mcs_reg_write(mcs, reg, BIT_ULL(0));
269 case MCS_FLOWID_STATS:
270 mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
274 mcs_get_rx_secy_stats(mcs, &secy_st, id);
276 mcs_get_tx_secy_stats(mcs, &secy_st, id);
279 mcs_get_sc_stats(mcs, &sc_st, id, dir);
282 mcs_get_sa_stats(mcs, &sa_st, id, dir);
285 mcs_get_port_stats(mcs, &port_st, id, dir);
289 mcs_reg_write(mcs, reg, 0x0);
292 int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
294 struct mcs_rsrc_map *map;
302 /* Clear FLOWID stats */
303 for (id = 0; id < map->flow_ids.max; id++) {
304 if (map->flowid2pf_map[id] != pcifunc)
306 mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
309 /* Clear SECY stats */
310 for (id = 0; id < map->secy.max; id++) {
311 if (map->secy2pf_map[id] != pcifunc)
313 mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
317 for (id = 0; id < map->secy.max; id++) {
318 if (map->sc2pf_map[id] != pcifunc)
320 mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
324 for (id = 0; id < map->sa.max; id++) {
325 if (map->sa2pf_map[id] != pcifunc)
327 mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
332 void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
337 reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
339 reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
340 mcs_reg_write(mcs, reg, next_pn);
343 void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
347 val = (map->sa_index0 & 0xFF) |
348 (map->sa_index1 & 0xFF) << 9 |
349 (map->rekey_ena & 0x1) << 18 |
350 (map->sa_index0_vld & 0x1) << 19 |
351 (map->sa_index1_vld & 0x1) << 20 |
352 (map->tx_sa_active & 0x1) << 21 |
353 map->sectag_sci << 22;
354 reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
355 mcs_reg_write(mcs, reg, val);
357 val = map->sectag_sci >> 42;
358 reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
359 mcs_reg_write(mcs, reg, val);
362 void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
366 val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
368 reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
369 mcs_reg_write(mcs, reg, val);
372 void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
378 for (reg_id = 0; reg_id < 8; reg_id++) {
379 reg = MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
380 mcs_reg_write(mcs, reg, plcy[reg_id]);
383 for (reg_id = 0; reg_id < 9; reg_id++) {
384 reg = MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
385 mcs_reg_write(mcs, reg, plcy[reg_id]);
390 void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
394 reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
396 reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
399 val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
401 val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
403 mcs_reg_write(mcs, reg, val);
406 void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
408 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
409 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
411 mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
414 void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
419 reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id);
421 reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id);
423 mcs_reg_write(mcs, reg, plcy);
425 if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
426 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
429 void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
433 val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
435 reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
437 val |= (map->sc & 0x7F) << 9;
438 reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
441 mcs_reg_write(mcs, reg, val);
444 void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
449 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0;
451 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1;
453 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0;
455 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1;
458 /* Enable/Disable the tcam entry */
460 val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
462 val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
464 mcs_reg_write(mcs, reg, val);
467 void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
473 for (reg_id = 0; reg_id < 4; reg_id++) {
474 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
475 mcs_reg_write(mcs, reg, data[reg_id]);
476 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
477 mcs_reg_write(mcs, reg, mask[reg_id]);
480 for (reg_id = 0; reg_id < 4; reg_id++) {
481 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
482 mcs_reg_write(mcs, reg, data[reg_id]);
483 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
484 mcs_reg_write(mcs, reg, mask[reg_id]);
489 int mcs_install_flowid_bypass_entry(struct mcs *mcs)
491 int flow_id, secy_id, reg_id;
492 struct secy_mem_map map;
496 flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
497 __set_bit(flow_id, mcs->rx.flow_ids.bmap);
498 __set_bit(flow_id, mcs->tx.flow_ids.bmap);
500 for (reg_id = 0; reg_id < 4; reg_id++) {
501 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
502 mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
504 for (reg_id = 0; reg_id < 4; reg_id++) {
505 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
506 mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
509 secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
510 __set_bit(secy_id, mcs->rx.secy.bmap);
511 __set_bit(secy_id, mcs->tx.secy.bmap);
513 /* Set validate frames to NULL and enable control port */
515 if (mcs->hw->mcs_blks > 1)
516 plcy = BIT_ULL(0) | 0x3ull << 4;
517 mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
519 /* Enable control port and set mtu to max */
520 plcy = BIT_ULL(0) | GENMASK_ULL(43, 28);
521 if (mcs->hw->mcs_blks > 1)
522 plcy = BIT_ULL(0) | GENMASK_ULL(63, 48);
523 mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
525 /* Map flowid to secy */
528 map.flow_id = flow_id;
529 mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
531 mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
533 /* Enable Flowid entry */
534 mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
535 mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
540 void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
542 struct mcs_rsrc_map *map;
550 /* Clear secy memory to zero */
551 mcs_secy_plcy_write(mcs, 0, secy_id, dir);
553 /* Disable the tcam entry using this secy */
554 for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
555 if (map->flowid2secy_map[flow_id] != secy_id)
557 mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
561 int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc)
568 rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
569 if (rsrc_id >= rsrc->max)
572 bitmap_set(rsrc->bmap, rsrc_id, 1);
573 pf_map[rsrc_id] = pcifunc;
578 int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
580 u16 pcifunc = req->hdr.pcifunc;
581 struct mcs_rsrc_map *map;
585 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
586 map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
589 for (id = 0; id < map->ctrlpktrule.max; id++) {
590 if (map->ctrlpktrule2pf_map[id] != pcifunc)
592 mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
593 dis = mcs_reg_read(mcs, reg);
595 mcs_reg_write(mcs, reg, dis);
600 rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
601 dis = mcs_reg_read(mcs, reg);
602 dis &= ~BIT_ULL(req->rule_idx);
603 mcs_reg_write(mcs, reg, dis);
608 int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
613 switch (req->rule_type) {
614 case MCS_CTRL_PKT_RULE_TYPE_ETH:
615 req->data0 &= GENMASK(15, 0);
616 if (req->data0 != ETH_P_PAE)
619 idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET;
620 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) :
621 MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx);
623 mcs_reg_write(mcs, reg, req->data0);
625 case MCS_CTRL_PKT_RULE_TYPE_DA:
626 if (!(req->data0 & BIT_ULL(40)))
629 idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET;
630 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) :
631 MCSX_PEX_TX_SLAVE_RULE_DAX(idx);
633 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
635 case MCS_CTRL_PKT_RULE_TYPE_RANGE:
636 if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40)))
639 idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
640 if (req->dir == MCS_RX) {
641 reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx);
642 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
643 reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx);
644 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
646 reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx);
647 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
648 reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx);
649 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
652 case MCS_CTRL_PKT_RULE_TYPE_COMBO:
653 req->data2 &= GENMASK(15, 0);
654 if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) ||
655 !(req->data1 & BIT_ULL(40)))
658 idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET;
659 if (req->dir == MCS_RX) {
660 reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx);
661 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
662 reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx);
663 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
664 reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx);
665 mcs_reg_write(mcs, reg, req->data2);
667 reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx);
668 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
669 reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx);
670 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
671 reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx);
672 mcs_reg_write(mcs, reg, req->data2);
675 case MCS_CTRL_PKT_RULE_TYPE_MAC:
676 if (!(req->data0 & BIT_ULL(40)))
679 idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
680 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC :
681 MCSX_PEX_TX_SLAVE_RULE_MAC;
683 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
687 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
689 enb = mcs_reg_read(mcs, reg);
690 enb |= BIT_ULL(req->rule_idx);
691 mcs_reg_write(mcs, reg, enb);
696 int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc)
698 /* Check if the rsrc_id is mapped to PF/VF */
699 if (pf_map[rsrc_id] != pcifunc)
702 rvu_free_rsrc(rsrc, rsrc_id);
707 /* Free all the cam resources mapped to pf */
708 int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
710 struct mcs_rsrc_map *map;
718 /* free tcam entries */
719 for (id = 0; id < map->flow_ids.max; id++) {
720 if (map->flowid2pf_map[id] != pcifunc)
722 mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
724 mcs_ena_dis_flowid_entry(mcs, id, dir, false);
727 /* free secy entries */
728 for (id = 0; id < map->secy.max; id++) {
729 if (map->secy2pf_map[id] != pcifunc)
731 mcs_free_rsrc(&map->secy, map->secy2pf_map,
733 mcs_clear_secy_plcy(mcs, id, dir);
736 /* free sc entries */
737 for (id = 0; id < map->secy.max; id++) {
738 if (map->sc2pf_map[id] != pcifunc)
740 mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
742 /* Disable SC CAM only on RX side */
744 mcs_ena_dis_sc_cam_entry(mcs, id, false);
747 /* free sa entries */
748 for (id = 0; id < map->sa.max; id++) {
749 if (map->sa2pf_map[id] != pcifunc)
751 mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
756 int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc)
760 rsrc_id = rvu_alloc_rsrc(rsrc);
763 pf_map[rsrc_id] = pcifunc;
767 int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
768 u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir)
770 struct mcs_rsrc_map *map;
778 id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
783 id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
788 id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
793 id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
798 id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
806 static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
808 struct mcs_intr_event event = { 0 };
809 struct rsrc_bmap *sc_bmap;
813 sc_bmap = &mcs->tx.sc;
815 event.mcs_id = mcs->mcs_id;
816 event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
818 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
819 val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
821 if (mcs->tx_sa_active[sc])
822 /* SA_index1 was used and got expired */
823 event.sa_id = (val >> 9) & 0xFF;
825 /* SA_index0 was used and got expired */
826 event.sa_id = val & 0xFF;
828 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
829 mcs_add_intr_wq_entry(mcs, &event);
833 static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
835 struct mcs_intr_event event = { 0 };
836 struct rsrc_bmap *sc_bmap;
840 sc_bmap = &mcs->tx.sc;
842 event.mcs_id = mcs->mcs_id;
843 event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
845 /* TX SA interrupt is raised only if autorekey is enabled.
846 * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
847 * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
848 * SA in SA_index1 got expired else SA in SA_index0 got expired.
850 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
851 val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
852 /* Auto rekey is enable */
853 if (!((val >> 18) & 0x1))
856 status = (val >> 21) & 0x1;
858 /* Check if tx_sa_active status had changed */
859 if (status == mcs->tx_sa_active[sc])
861 /* SA_index0 is expired */
863 event.sa_id = val & 0xFF;
865 event.sa_id = (val >> 9) & 0xFF;
867 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
868 mcs_add_intr_wq_entry(mcs, &event);
872 static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
874 struct mcs_intr_event event = { 0 };
878 /* Check expired SAs */
879 for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
880 /* Bit high in *PN_THRESH_REACHEDX implies
881 * corresponding SAs are expired.
883 intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
884 for (sa = 0; sa < 64; sa++) {
885 if (!(intr & BIT_ULL(sa)))
888 event.mcs_id = mcs->mcs_id;
889 event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT;
890 event.sa_id = sa + (reg * 64);
891 event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
892 mcs_add_intr_wq_entry(mcs, &event);
897 static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
899 struct mcs_intr_event event = { 0 };
901 event.mcs_id = mcs->mcs_id;
902 event.pcifunc = mcs->pf_map[0];
904 if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1)
905 event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT;
906 if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1)
907 event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT;
908 if (intr & MCS_CPM_RX_INT_SL_GTE48)
909 event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT;
910 if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1)
911 event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT;
912 if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1)
913 event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT;
914 if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0)
915 event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT;
917 mcs_add_intr_wq_entry(mcs, &event);
920 static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
922 struct mcs_intr_event event = { 0 };
924 if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID))
927 event.mcs_id = mcs->mcs_id;
928 event.pcifunc = mcs->pf_map[0];
930 event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT;
932 mcs_add_intr_wq_entry(mcs, &event);
935 static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
937 struct mcs_intr_event event = { 0 };
940 if (!(intr & MCS_BBE_INT_MASK))
943 event.mcs_id = mcs->mcs_id;
944 event.pcifunc = mcs->pf_map[0];
946 for (i = 0; i < MCS_MAX_BBE_INT; i++) {
947 if (!(intr & BIT_ULL(i)))
950 /* Lower nibble denotes data fifo overflow interrupts and
951 * upper nibble indicates policy fifo overflow interrupts.
954 event.intr_mask = (dir == MCS_RX) ?
955 MCS_BBE_RX_DFIFO_OVERFLOW_INT :
956 MCS_BBE_TX_DFIFO_OVERFLOW_INT;
958 event.intr_mask = (dir == MCS_RX) ?
959 MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
960 MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
962 /* Notify the lmac_id info which ran into BBE fatal error */
963 event.lmac_id = i & 0x3ULL;
964 mcs_add_intr_wq_entry(mcs, &event);
968 static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
970 struct mcs_intr_event event = { 0 };
973 if (!(intr & MCS_PAB_INT_MASK))
976 event.mcs_id = mcs->mcs_id;
977 event.pcifunc = mcs->pf_map[0];
979 for (i = 0; i < MCS_MAX_PAB_INT; i++) {
980 if (!(intr & BIT_ULL(i)))
983 event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
984 MCS_PAB_TX_CHAN_OVERFLOW_INT;
986 /* Notify the lmac_id info which ran into PAB fatal error */
988 mcs_add_intr_wq_entry(mcs, &event);
992 static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
994 struct mcs *mcs = (struct mcs *)mcs_irq;
995 u64 intr, cpm_intr, bbe_intr, pab_intr;
997 /* Disable and clear the interrupt */
998 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
999 mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
1001 /* Check which block has interrupt*/
1002 intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
1005 if (intr & MCS_CPM_RX_INT_ENA) {
1006 /* Check for PN thresh interrupt bit */
1007 cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
1009 if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED)
1010 mcs_rx_pn_thresh_reached_handler(mcs);
1012 if (cpm_intr & MCS_CPM_RX_INT_ALL)
1013 mcs_rx_misc_intr_handler(mcs, cpm_intr);
1015 /* Clear the interrupt */
1016 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
1020 if (intr & MCS_CPM_TX_INT_ENA) {
1021 cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
1023 if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) {
1024 if (mcs->hw->mcs_blks > 1)
1025 cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
1027 cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
1030 if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID)
1031 mcs_tx_misc_intr_handler(mcs, cpm_intr);
1033 if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) {
1034 if (mcs->hw->mcs_blks > 1)
1035 cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
1037 cn10kb_mcs_tx_pn_wrapped_handler(mcs);
1039 /* Clear the interrupt */
1040 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
1044 if (intr & MCS_BBE_RX_INT_ENA) {
1045 bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
1046 mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
1048 /* Clear the interrupt */
1049 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
1050 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
1054 if (intr & MCS_BBE_TX_INT_ENA) {
1055 bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
1056 mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
1058 /* Clear the interrupt */
1059 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
1060 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
1064 if (intr & MCS_PAB_RX_INT_ENA) {
1065 pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
1066 mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
1068 /* Clear the interrupt */
1069 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
1070 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
1074 if (intr & MCS_PAB_TX_INT_ENA) {
1075 pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
1076 mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
1078 /* Clear the interrupt */
1079 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
1080 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
1083 /* Enable the interrupt */
1084 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
1089 static void *alloc_mem(struct mcs *mcs, int n)
1091 return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
1094 static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
1096 struct hwinfo *hw = mcs->hw;
1099 res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
1100 if (!res->flowid2pf_map)
1103 res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
1104 if (!res->secy2pf_map)
1107 res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
1108 if (!res->sc2pf_map)
1111 res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
1112 if (!res->sa2pf_map)
1115 res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
1116 if (!res->flowid2secy_map)
1119 res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
1120 if (!res->ctrlpktrule2pf_map)
1123 res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
1124 err = rvu_alloc_bitmap(&res->flow_ids);
1128 res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
1129 err = rvu_alloc_bitmap(&res->secy);
1133 res->sc.max = hw->sc_entries;
1134 err = rvu_alloc_bitmap(&res->sc);
1138 res->sa.max = hw->sa_entries;
1139 err = rvu_alloc_bitmap(&res->sa);
1143 res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES;
1144 err = rvu_alloc_bitmap(&res->ctrlpktrule);
1151 static int mcs_register_interrupts(struct mcs *mcs)
1155 mcs->num_vec = pci_msix_vec_count(mcs->pdev);
1157 ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
1158 mcs->num_vec, PCI_IRQ_MSIX);
1160 dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
1165 ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
1166 mcs_ip_intr_handler, 0, "MCS_IP", mcs);
1168 dev_err(mcs->dev, "MCS IP irq registration failed\n");
1172 /* MCS enable IP interrupts */
1173 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
1175 /* Enable CPM Rx/Tx interrupts */
1176 mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
1177 MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA |
1178 MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA |
1179 MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA);
1181 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
1182 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
1184 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
1185 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
1187 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
1188 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
1190 mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
1191 if (!mcs->tx_sa_active) {
1199 free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs);
1201 pci_free_irq_vectors(mcs->pdev);
1206 int mcs_get_blkcnt(void)
1209 int idmax = -ENODEV;
1211 /* Check MCS block is present in hardware */
1212 if (!pci_dev_present(mcs_id_table))
1215 list_for_each_entry(mcs, &mcs_list, mcs_list)
1216 if (mcs->mcs_id > idmax)
1217 idmax = mcs->mcs_id;
1225 struct mcs *mcs_get_pdata(int mcs_id)
1227 struct mcs *mcs_dev;
1229 list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
1230 if (mcs_dev->mcs_id == mcs_id)
1236 void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
1240 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
1241 req->port_mode & MCS_PORT_MODE_MASK);
1243 req->cstm_tag_rel_mode_sel &= 0x3;
1245 if (mcs->hw->mcs_blks > 1) {
1246 req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK;
1247 val = (u32)req->fifo_skid << 0x10;
1248 val |= req->fifo_skid;
1249 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
1250 mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
1251 req->cstm_tag_rel_mode_sel);
1252 val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
1254 if (req->custom_hdr_enb)
1255 val |= BIT_ULL(req->port_id);
1257 val &= ~BIT_ULL(req->port_id);
1259 mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
1261 val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
1262 val |= (req->cstm_tag_rel_mode_sel << 2);
1263 mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
1267 void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
1268 struct mcs_port_cfg_get_rsp *rsp)
1272 rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
1275 if (mcs->hw->mcs_blks > 1) {
1276 reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id);
1277 rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
1278 reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id);
1279 rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
1280 if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
1281 rsp->custom_hdr_enb = 1;
1283 reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id);
1284 rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
1287 rsp->port_id = req->port_id;
1288 rsp->mcs_id = req->mcs_id;
1291 void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
1292 struct mcs_custom_tag_cfg_get_rsp *rsp)
1294 u64 reg = 0, val = 0;
1297 for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) {
1298 if (mcs->hw->mcs_blks > 1)
1299 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) :
1300 MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx);
1302 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) :
1303 MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx);
1305 val = mcs_reg_read(mcs, reg);
1306 if (mcs->hw->mcs_blks > 1) {
1307 rsp->cstm_etype[idx] = val & GENMASK(15, 0);
1308 rsp->cstm_indx[idx] = (val >> 0x16) & 0x3;
1309 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE :
1310 MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
1311 rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
1313 rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0);
1314 rsp->cstm_indx[idx] = (val >> 0x11) & 0x3;
1315 rsp->cstm_etype_en |= (val & 0x1) << idx;
1319 rsp->mcs_id = req->mcs_id;
1320 rsp->dir = req->dir;
1323 void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
1325 u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id);
1327 mcs_reg_write(mcs, reg, reset & 0x1);
1330 /* Set lmac to bypass/operational mode */
1331 void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
1334 int id = lmac_id * 2;
1336 reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id);
1337 mcs_reg_write(mcs, reg, (u64)mode);
1338 reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id + 1));
1339 mcs_reg_write(mcs, reg, (u64)mode);
1342 void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
1346 if (pn->dir == MCS_RX)
1347 reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD;
1349 reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD;
1351 mcs_reg_write(mcs, reg, pn->threshold);
1354 void cn10kb_mcs_parser_cfg(struct mcs *mcs)
1359 val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
1361 reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
1362 mcs_reg_write(mcs, reg, val);
1365 reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
1366 mcs_reg_write(mcs, reg, val);
1369 val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
1371 reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
1372 mcs_reg_write(mcs, reg, val);
1375 reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
1376 mcs_reg_write(mcs, reg, val);
1379 static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
1383 /* Port mode 25GB */
1384 reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
1385 mcs_reg_write(mcs, reg, 0);
1387 if (mcs->hw->mcs_blks > 1) {
1388 reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
1389 mcs_reg_write(mcs, reg, 0xe000e);
1393 reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
1394 mcs_reg_write(mcs, reg, 0);
1397 int mcs_set_lmac_channels(int mcs_id, u16 base)
1403 mcs = mcs_get_pdata(mcs_id);
1406 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
1407 cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
1408 cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
1409 cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
1410 cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
1411 mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
1417 static int mcs_x2p_calibration(struct mcs *mcs)
1419 unsigned long timeout = jiffies + usecs_to_jiffies(20000);
1423 /* set X2P calibration */
1424 val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
1426 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
1428 /* Wait for calibration to complete */
1429 while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
1430 if (time_before(jiffies, timeout)) {
1431 usleep_range(80, 100);
1435 dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
1440 val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
1441 for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
1442 if (val & BIT_ULL(1 + i))
1445 dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
1447 /* Clear X2P calibrate */
1448 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
1453 static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
1457 /* Set MCS to external bypass */
1458 val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
1463 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
1466 static void mcs_global_cfg(struct mcs *mcs)
1468 /* Disable external bypass */
1469 mcs_set_external_bypass(mcs, false);
1471 /* Reset TX/RX stats memory */
1472 mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
1473 mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
1475 /* Set MCS to perform standard IEEE802.1AE macsec processing */
1476 if (mcs->hw->mcs_blks == 1) {
1477 mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
1481 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
1482 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
1485 void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
1487 struct hwinfo *hw = mcs->hw;
1489 hw->tcam_entries = 128; /* TCAM entries */
1490 hw->secy_entries = 128; /* SecY entries */
1491 hw->sc_entries = 128; /* SC CAM entries */
1492 hw->sa_entries = 256; /* SA entries */
1493 hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
1494 hw->mcs_x2p_intf = 5; /* x2p clabration intf */
1495 hw->mcs_blks = 1; /* MCS blocks */
1498 static struct mcs_ops cn10kb_mcs_ops = {
1499 .mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities,
1500 .mcs_parser_cfg = cn10kb_mcs_parser_cfg,
1501 .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
1502 .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
1503 .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
1506 static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1508 struct device *dev = &pdev->dev;
1512 mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
1516 mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
1520 err = pci_enable_device(pdev);
1522 dev_err(dev, "Failed to enable PCI device\n");
1523 pci_set_drvdata(pdev, NULL);
1527 err = pci_request_regions(pdev, DRV_NAME);
1529 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1533 mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1534 if (!mcs->reg_base) {
1535 dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
1540 pci_set_drvdata(pdev, mcs);
1542 mcs->dev = &pdev->dev;
1544 if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
1545 mcs->mcs_ops = &cn10kb_mcs_ops;
1547 mcs->mcs_ops = cnf10kb_get_mac_ops();
1549 /* Set hardware capabilities */
1550 mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
1552 mcs_global_cfg(mcs);
1554 /* Perform X2P clibration */
1555 err = mcs_x2p_calibration(mcs);
1559 mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1562 /* Set mcs tx side resources */
1563 err = mcs_alloc_struct_mem(mcs, &mcs->tx);
1567 /* Set mcs rx side resources */
1568 err = mcs_alloc_struct_mem(mcs, &mcs->rx);
1572 /* per port config */
1573 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
1574 mcs_lmac_init(mcs, lmac);
1576 /* Parser configuration */
1577 mcs->mcs_ops->mcs_parser_cfg(mcs);
1579 err = mcs_register_interrupts(mcs);
1583 list_add(&mcs->mcs_list, &mcs_list);
1584 mutex_init(&mcs->stats_lock);
1589 /* Enable external bypass */
1590 mcs_set_external_bypass(mcs, true);
1592 pci_release_regions(pdev);
1593 pci_disable_device(pdev);
1594 pci_set_drvdata(pdev, NULL);
1598 static void mcs_remove(struct pci_dev *pdev)
1600 struct mcs *mcs = pci_get_drvdata(pdev);
1602 /* Set MCS to external bypass */
1603 mcs_set_external_bypass(mcs, true);
1604 free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs);
1605 pci_free_irq_vectors(pdev);
1606 pci_release_regions(pdev);
1607 pci_disable_device(pdev);
1608 pci_set_drvdata(pdev, NULL);
1611 struct pci_driver mcs_driver = {
1613 .id_table = mcs_id_table,
1615 .remove = mcs_remove,