1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2022 Marvell.
7 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
16 #define DRV_NAME "Marvell MCS Driver"
18 #define PCI_CFG_REG_BAR_NUM 0
20 static const struct pci_device_id mcs_id_table[] = {
21 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
22 { 0, } /* end of table */
25 static LIST_HEAD(mcs_list);
27 void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
31 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id);
32 stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
34 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id);
35 stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
37 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id);
38 stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
40 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id);
41 stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
43 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id);
44 stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
46 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id);
47 stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
49 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id);
50 stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
52 reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id);
53 stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
55 reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id);
56 stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg);
58 reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id);
59 stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
61 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id);
62 stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg);
64 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id);
65 stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg);
67 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id);
68 stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg);
71 void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
75 reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id);
76 stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
78 reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id);
79 stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
81 reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id);
82 stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
84 reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id);
85 stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
87 reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id);
88 stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
90 reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id);
91 stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
93 reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id);
94 stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
96 reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id);
97 stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
99 reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id);
100 stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg);
102 reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id);
103 stats->octet_validated_cnt = mcs_reg_read(mcs, reg);
105 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id);
106 stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg);
108 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id);
109 stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg);
111 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id);
112 stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
114 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id);
115 stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
117 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
118 stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
120 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
121 stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
123 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
124 stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
126 if (mcs->hw->mcs_blks > 1) {
127 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id);
128 stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
132 void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
138 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id);
140 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id);
142 stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
145 void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
151 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id);
152 stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
154 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id);
155 stats->parser_err_cnt = mcs_reg_read(mcs, reg);
156 if (mcs->hw->mcs_blks > 1) {
157 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id);
158 stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
161 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id);
162 stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
164 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id);
165 stats->parser_err_cnt = mcs_reg_read(mcs, reg);
167 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id);
168 stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
172 void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
177 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id);
178 stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
180 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id);
181 stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
183 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id);
184 stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
186 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id);
187 stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
189 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id);
190 stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
192 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id);
193 stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
195 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id);
196 stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
200 void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
206 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id);
207 stats->hit_cnt = mcs_reg_read(mcs, reg);
209 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id);
210 stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
212 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id);
213 stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
215 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
216 stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
218 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
219 stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
221 if (mcs->hw->mcs_blks > 1) {
222 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id);
223 stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
225 reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id);
226 stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
228 if (mcs->hw->mcs_blks == 1) {
229 reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id);
230 stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
232 reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id);
233 stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
236 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id);
237 stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
239 reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id);
240 stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
242 if (mcs->hw->mcs_blks == 1) {
243 reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id);
244 stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
246 reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id);
247 stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
252 void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
254 struct mcs_flowid_stats flowid_st;
255 struct mcs_port_stats port_st;
256 struct mcs_secy_stats secy_st;
257 struct mcs_sc_stats sc_st;
258 struct mcs_sa_stats sa_st;
262 reg = MCSX_CSE_RX_SLAVE_CTRL;
264 reg = MCSX_CSE_TX_SLAVE_CTRL;
266 mcs_reg_write(mcs, reg, BIT_ULL(0));
269 case MCS_FLOWID_STATS:
270 mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
274 mcs_get_rx_secy_stats(mcs, &secy_st, id);
276 mcs_get_tx_secy_stats(mcs, &secy_st, id);
279 mcs_get_sc_stats(mcs, &sc_st, id, dir);
282 mcs_get_sa_stats(mcs, &sa_st, id, dir);
285 mcs_get_port_stats(mcs, &port_st, id, dir);
289 mcs_reg_write(mcs, reg, 0x0);
292 int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
294 struct mcs_rsrc_map *map;
302 /* Clear FLOWID stats */
303 for (id = 0; id < map->flow_ids.max; id++) {
304 if (map->flowid2pf_map[id] != pcifunc)
306 mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
309 /* Clear SECY stats */
310 for (id = 0; id < map->secy.max; id++) {
311 if (map->secy2pf_map[id] != pcifunc)
313 mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
317 for (id = 0; id < map->secy.max; id++) {
318 if (map->sc2pf_map[id] != pcifunc)
320 mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
324 for (id = 0; id < map->sa.max; id++) {
325 if (map->sa2pf_map[id] != pcifunc)
327 mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
332 void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
337 reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
339 reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
340 mcs_reg_write(mcs, reg, next_pn);
343 void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
347 val = (map->sa_index0 & 0xFF) |
348 (map->sa_index1 & 0xFF) << 9 |
349 (map->rekey_ena & 0x1) << 18 |
350 (map->sa_index0_vld & 0x1) << 19 |
351 (map->sa_index1_vld & 0x1) << 20 |
352 (map->tx_sa_active & 0x1) << 21 |
353 map->sectag_sci << 22;
354 reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
355 mcs_reg_write(mcs, reg, val);
357 val = map->sectag_sci >> 42;
358 reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
359 mcs_reg_write(mcs, reg, val);
362 void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
366 val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
368 reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
369 mcs_reg_write(mcs, reg, val);
372 void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
378 for (reg_id = 0; reg_id < 8; reg_id++) {
379 reg = MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
380 mcs_reg_write(mcs, reg, plcy[reg_id]);
383 for (reg_id = 0; reg_id < 9; reg_id++) {
384 reg = MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
385 mcs_reg_write(mcs, reg, plcy[reg_id]);
390 void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
394 reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
396 reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
399 val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
401 val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
403 mcs_reg_write(mcs, reg, val);
406 void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
408 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
409 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
411 mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
414 void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
419 reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id);
421 reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id);
423 mcs_reg_write(mcs, reg, plcy);
425 if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
426 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
429 void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
433 val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
435 reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
437 val |= (map->sc & 0x7F) << 9;
438 reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
441 mcs_reg_write(mcs, reg, val);
444 void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
449 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0;
451 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1;
453 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0;
455 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1;
458 /* Enable/Disable the tcam entry */
460 val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
462 val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
464 mcs_reg_write(mcs, reg, val);
467 void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
473 for (reg_id = 0; reg_id < 4; reg_id++) {
474 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
475 mcs_reg_write(mcs, reg, data[reg_id]);
476 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
477 mcs_reg_write(mcs, reg, mask[reg_id]);
480 for (reg_id = 0; reg_id < 4; reg_id++) {
481 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
482 mcs_reg_write(mcs, reg, data[reg_id]);
483 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
484 mcs_reg_write(mcs, reg, mask[reg_id]);
489 int mcs_install_flowid_bypass_entry(struct mcs *mcs)
491 int flow_id, secy_id, reg_id;
492 struct secy_mem_map map;
496 flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
497 for (reg_id = 0; reg_id < 4; reg_id++) {
498 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
499 mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
501 for (reg_id = 0; reg_id < 4; reg_id++) {
502 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
503 mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
506 secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
508 /* Set validate frames to NULL and enable control port */
510 if (mcs->hw->mcs_blks > 1)
511 plcy = BIT_ULL(0) | 0x3ull << 4;
512 mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
514 /* Enable control port and set mtu to max */
515 plcy = BIT_ULL(0) | GENMASK_ULL(43, 28);
516 if (mcs->hw->mcs_blks > 1)
517 plcy = BIT_ULL(0) | GENMASK_ULL(63, 48);
518 mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
520 /* Map flowid to secy */
523 map.flow_id = flow_id;
524 mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
526 mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
528 /* Enable Flowid entry */
529 mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
530 mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
534 void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
536 struct mcs_rsrc_map *map;
544 /* Clear secy memory to zero */
545 mcs_secy_plcy_write(mcs, 0, secy_id, dir);
547 /* Disable the tcam entry using this secy */
548 for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
549 if (map->flowid2secy_map[flow_id] != secy_id)
551 mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
555 int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc)
562 rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
563 if (rsrc_id >= rsrc->max)
566 bitmap_set(rsrc->bmap, rsrc_id, 1);
567 pf_map[rsrc_id] = pcifunc;
572 int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
574 u16 pcifunc = req->hdr.pcifunc;
575 struct mcs_rsrc_map *map;
579 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
580 map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
583 for (id = 0; id < map->ctrlpktrule.max; id++) {
584 if (map->ctrlpktrule2pf_map[id] != pcifunc)
586 mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
587 dis = mcs_reg_read(mcs, reg);
589 mcs_reg_write(mcs, reg, dis);
594 rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
595 dis = mcs_reg_read(mcs, reg);
596 dis &= ~BIT_ULL(req->rule_idx);
597 mcs_reg_write(mcs, reg, dis);
602 int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
607 switch (req->rule_type) {
608 case MCS_CTRL_PKT_RULE_TYPE_ETH:
609 req->data0 &= GENMASK(15, 0);
610 if (req->data0 != ETH_P_PAE)
613 idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET;
614 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) :
615 MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx);
617 mcs_reg_write(mcs, reg, req->data0);
619 case MCS_CTRL_PKT_RULE_TYPE_DA:
620 if (!(req->data0 & BIT_ULL(40)))
623 idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET;
624 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) :
625 MCSX_PEX_TX_SLAVE_RULE_DAX(idx);
627 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
629 case MCS_CTRL_PKT_RULE_TYPE_RANGE:
630 if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40)))
633 idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
634 if (req->dir == MCS_RX) {
635 reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx);
636 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
637 reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx);
638 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
640 reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx);
641 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
642 reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx);
643 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
646 case MCS_CTRL_PKT_RULE_TYPE_COMBO:
647 req->data2 &= GENMASK(15, 0);
648 if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) ||
649 !(req->data1 & BIT_ULL(40)))
652 idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET;
653 if (req->dir == MCS_RX) {
654 reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx);
655 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
656 reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx);
657 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
658 reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx);
659 mcs_reg_write(mcs, reg, req->data2);
661 reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx);
662 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
663 reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx);
664 mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
665 reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx);
666 mcs_reg_write(mcs, reg, req->data2);
669 case MCS_CTRL_PKT_RULE_TYPE_MAC:
670 if (!(req->data0 & BIT_ULL(40)))
673 idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
674 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC :
675 MCSX_PEX_TX_SLAVE_RULE_MAC;
677 mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
681 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
683 enb = mcs_reg_read(mcs, reg);
684 enb |= BIT_ULL(req->rule_idx);
685 mcs_reg_write(mcs, reg, enb);
690 int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc)
692 /* Check if the rsrc_id is mapped to PF/VF */
693 if (pf_map[rsrc_id] != pcifunc)
696 rvu_free_rsrc(rsrc, rsrc_id);
701 /* Free all the cam resources mapped to pf */
702 int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
704 struct mcs_rsrc_map *map;
712 /* free tcam entries */
713 for (id = 0; id < map->flow_ids.max; id++) {
714 if (map->flowid2pf_map[id] != pcifunc)
716 mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
718 mcs_ena_dis_flowid_entry(mcs, id, dir, false);
721 /* free secy entries */
722 for (id = 0; id < map->secy.max; id++) {
723 if (map->secy2pf_map[id] != pcifunc)
725 mcs_free_rsrc(&map->secy, map->secy2pf_map,
727 mcs_clear_secy_plcy(mcs, id, dir);
730 /* free sc entries */
731 for (id = 0; id < map->secy.max; id++) {
732 if (map->sc2pf_map[id] != pcifunc)
734 mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
736 /* Disable SC CAM only on RX side */
738 mcs_ena_dis_sc_cam_entry(mcs, id, false);
741 /* free sa entries */
742 for (id = 0; id < map->sa.max; id++) {
743 if (map->sa2pf_map[id] != pcifunc)
745 mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
750 int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc)
754 rsrc_id = rvu_alloc_rsrc(rsrc);
757 pf_map[rsrc_id] = pcifunc;
761 int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
762 u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir)
764 struct mcs_rsrc_map *map;
772 id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
777 id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
782 id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
787 id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
792 id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
800 static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
802 struct mcs_intr_event event = { 0 };
803 struct rsrc_bmap *sc_bmap;
807 sc_bmap = &mcs->tx.sc;
809 event.mcs_id = mcs->mcs_id;
810 event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
812 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
813 val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
815 if (mcs->tx_sa_active[sc])
816 /* SA_index1 was used and got expired */
817 event.sa_id = (val >> 9) & 0xFF;
819 /* SA_index0 was used and got expired */
820 event.sa_id = val & 0xFF;
822 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
823 mcs_add_intr_wq_entry(mcs, &event);
827 static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
829 struct mcs_intr_event event = { 0 };
830 struct rsrc_bmap *sc_bmap;
834 sc_bmap = &mcs->tx.sc;
836 event.mcs_id = mcs->mcs_id;
837 event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
839 /* TX SA interrupt is raised only if autorekey is enabled.
840 * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
841 * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
842 * SA in SA_index1 got expired else SA in SA_index0 got expired.
844 for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
845 val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
846 /* Auto rekey is enable */
847 if (!((val >> 18) & 0x1))
850 status = (val >> 21) & 0x1;
852 /* Check if tx_sa_active status had changed */
853 if (status == mcs->tx_sa_active[sc])
855 /* SA_index0 is expired */
857 event.sa_id = val & 0xFF;
859 event.sa_id = (val >> 9) & 0xFF;
861 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
862 mcs_add_intr_wq_entry(mcs, &event);
866 static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
868 struct mcs_intr_event event = { 0 };
872 /* Check expired SAs */
873 for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
874 /* Bit high in *PN_THRESH_REACHEDX implies
875 * corresponding SAs are expired.
877 intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
878 for (sa = 0; sa < 64; sa++) {
879 if (!(intr & BIT_ULL(sa)))
882 event.mcs_id = mcs->mcs_id;
883 event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT;
884 event.sa_id = sa + (reg * 64);
885 event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
886 mcs_add_intr_wq_entry(mcs, &event);
891 static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
893 struct mcs_intr_event event = { 0 };
895 event.mcs_id = mcs->mcs_id;
896 event.pcifunc = mcs->pf_map[0];
898 if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1)
899 event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT;
900 if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1)
901 event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT;
902 if (intr & MCS_CPM_RX_INT_SL_GTE48)
903 event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT;
904 if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1)
905 event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT;
906 if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1)
907 event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT;
908 if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0)
909 event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT;
911 mcs_add_intr_wq_entry(mcs, &event);
914 static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
916 struct mcs_intr_event event = { 0 };
918 if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID))
921 event.mcs_id = mcs->mcs_id;
922 event.pcifunc = mcs->pf_map[0];
924 event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT;
926 mcs_add_intr_wq_entry(mcs, &event);
929 static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
931 struct mcs_intr_event event = { 0 };
934 if (!(intr & MCS_BBE_INT_MASK))
937 event.mcs_id = mcs->mcs_id;
938 event.pcifunc = mcs->pf_map[0];
940 for (i = 0; i < MCS_MAX_BBE_INT; i++) {
941 if (!(intr & BIT_ULL(i)))
944 /* Lower nibble denotes data fifo overflow interrupts and
945 * upper nibble indicates policy fifo overflow interrupts.
948 event.intr_mask = (dir == MCS_RX) ?
949 MCS_BBE_RX_DFIFO_OVERFLOW_INT :
950 MCS_BBE_TX_DFIFO_OVERFLOW_INT;
952 event.intr_mask = (dir == MCS_RX) ?
953 MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
954 MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
956 /* Notify the lmac_id info which ran into BBE fatal error */
957 event.lmac_id = i & 0x3ULL;
958 mcs_add_intr_wq_entry(mcs, &event);
962 static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
964 struct mcs_intr_event event = { 0 };
967 if (!(intr & MCS_PAB_INT_MASK))
970 event.mcs_id = mcs->mcs_id;
971 event.pcifunc = mcs->pf_map[0];
973 for (i = 0; i < MCS_MAX_PAB_INT; i++) {
974 if (!(intr & BIT_ULL(i)))
977 event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
978 MCS_PAB_TX_CHAN_OVERFLOW_INT;
980 /* Notify the lmac_id info which ran into PAB fatal error */
982 mcs_add_intr_wq_entry(mcs, &event);
986 static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
988 struct mcs *mcs = (struct mcs *)mcs_irq;
989 u64 intr, cpm_intr, bbe_intr, pab_intr;
991 /* Disable and clear the interrupt */
992 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
993 mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
995 /* Check which block has interrupt*/
996 intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
999 if (intr & MCS_CPM_RX_INT_ENA) {
1000 /* Check for PN thresh interrupt bit */
1001 cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
1003 if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED)
1004 mcs_rx_pn_thresh_reached_handler(mcs);
1006 if (cpm_intr & MCS_CPM_RX_INT_ALL)
1007 mcs_rx_misc_intr_handler(mcs, cpm_intr);
1009 /* Clear the interrupt */
1010 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
1014 if (intr & MCS_CPM_TX_INT_ENA) {
1015 cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
1017 if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) {
1018 if (mcs->hw->mcs_blks > 1)
1019 cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
1021 cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
1024 if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID)
1025 mcs_tx_misc_intr_handler(mcs, cpm_intr);
1027 if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) {
1028 if (mcs->hw->mcs_blks > 1)
1029 cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
1031 cn10kb_mcs_tx_pn_wrapped_handler(mcs);
1033 /* Clear the interrupt */
1034 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
1038 if (intr & MCS_BBE_RX_INT_ENA) {
1039 bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
1040 mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
1042 /* Clear the interrupt */
1043 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
1044 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
1048 if (intr & MCS_BBE_TX_INT_ENA) {
1049 bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
1050 mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
1052 /* Clear the interrupt */
1053 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
1054 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
1058 if (intr & MCS_PAB_RX_INT_ENA) {
1059 pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
1060 mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
1062 /* Clear the interrupt */
1063 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
1064 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
1068 if (intr & MCS_PAB_TX_INT_ENA) {
1069 pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
1070 mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
1072 /* Clear the interrupt */
1073 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
1074 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
1077 /* Enable the interrupt */
1078 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
1083 static void *alloc_mem(struct mcs *mcs, int n)
1085 return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
1088 static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
1090 struct hwinfo *hw = mcs->hw;
1093 res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
1094 if (!res->flowid2pf_map)
1097 res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
1098 if (!res->secy2pf_map)
1101 res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
1102 if (!res->sc2pf_map)
1105 res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
1106 if (!res->sa2pf_map)
1109 res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
1110 if (!res->flowid2secy_map)
1113 res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
1114 if (!res->ctrlpktrule2pf_map)
1117 res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
1118 err = rvu_alloc_bitmap(&res->flow_ids);
1122 res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
1123 err = rvu_alloc_bitmap(&res->secy);
1127 res->sc.max = hw->sc_entries;
1128 err = rvu_alloc_bitmap(&res->sc);
1132 res->sa.max = hw->sa_entries;
1133 err = rvu_alloc_bitmap(&res->sa);
1137 res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES;
1138 err = rvu_alloc_bitmap(&res->ctrlpktrule);
1145 static int mcs_register_interrupts(struct mcs *mcs)
1149 mcs->num_vec = pci_msix_vec_count(mcs->pdev);
1151 ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
1152 mcs->num_vec, PCI_IRQ_MSIX);
1154 dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
1159 ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
1160 mcs_ip_intr_handler, 0, "MCS_IP", mcs);
1162 dev_err(mcs->dev, "MCS IP irq registration failed\n");
1166 /* MCS enable IP interrupts */
1167 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
1169 /* Enable CPM Rx/Tx interrupts */
1170 mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
1171 MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA |
1172 MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA |
1173 MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA);
1175 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
1176 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
1178 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
1179 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
1181 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
1182 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
1184 mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
1185 if (!mcs->tx_sa_active) {
1193 free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs);
1195 pci_free_irq_vectors(mcs->pdev);
1200 int mcs_get_blkcnt(void)
1203 int idmax = -ENODEV;
1205 /* Check MCS block is present in hardware */
1206 if (!pci_dev_present(mcs_id_table))
1209 list_for_each_entry(mcs, &mcs_list, mcs_list)
1210 if (mcs->mcs_id > idmax)
1211 idmax = mcs->mcs_id;
1219 struct mcs *mcs_get_pdata(int mcs_id)
1221 struct mcs *mcs_dev;
1223 list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
1224 if (mcs_dev->mcs_id == mcs_id)
1230 void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
1234 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
1235 req->port_mode & MCS_PORT_MODE_MASK);
1237 req->cstm_tag_rel_mode_sel &= 0x3;
1239 if (mcs->hw->mcs_blks > 1) {
1240 req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK;
1241 val = (u32)req->fifo_skid << 0x10;
1242 val |= req->fifo_skid;
1243 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
1244 mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
1245 req->cstm_tag_rel_mode_sel);
1246 val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
1248 if (req->custom_hdr_enb)
1249 val |= BIT_ULL(req->port_id);
1251 val &= ~BIT_ULL(req->port_id);
1253 mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
1255 val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
1256 val |= (req->cstm_tag_rel_mode_sel << 2);
1257 mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
1261 void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
1262 struct mcs_port_cfg_get_rsp *rsp)
1266 rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
1269 if (mcs->hw->mcs_blks > 1) {
1270 reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id);
1271 rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
1272 reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id);
1273 rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
1274 if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
1275 rsp->custom_hdr_enb = 1;
1277 reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id);
1278 rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
1281 rsp->port_id = req->port_id;
1282 rsp->mcs_id = req->mcs_id;
1285 void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
1286 struct mcs_custom_tag_cfg_get_rsp *rsp)
1288 u64 reg = 0, val = 0;
1291 for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) {
1292 if (mcs->hw->mcs_blks > 1)
1293 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) :
1294 MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx);
1296 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) :
1297 MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx);
1299 val = mcs_reg_read(mcs, reg);
1300 if (mcs->hw->mcs_blks > 1) {
1301 rsp->cstm_etype[idx] = val & GENMASK(15, 0);
1302 rsp->cstm_indx[idx] = (val >> 0x16) & 0x3;
1303 reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE :
1304 MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
1305 rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
1307 rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0);
1308 rsp->cstm_indx[idx] = (val >> 0x11) & 0x3;
1309 rsp->cstm_etype_en |= (val & 0x1) << idx;
1313 rsp->mcs_id = req->mcs_id;
1314 rsp->dir = req->dir;
1317 void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
1319 u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id);
1321 mcs_reg_write(mcs, reg, reset & 0x1);
1324 /* Set lmac to bypass/operational mode */
1325 void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
1329 reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
1330 mcs_reg_write(mcs, reg, (u64)mode);
1333 void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
1337 if (pn->dir == MCS_RX)
1338 reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD;
1340 reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD;
1342 mcs_reg_write(mcs, reg, pn->threshold);
1345 void cn10kb_mcs_parser_cfg(struct mcs *mcs)
1350 val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
1352 reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
1353 mcs_reg_write(mcs, reg, val);
1356 reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
1357 mcs_reg_write(mcs, reg, val);
1360 val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
1362 reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
1363 mcs_reg_write(mcs, reg, val);
1366 reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
1367 mcs_reg_write(mcs, reg, val);
1370 static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
1374 /* Port mode 25GB */
1375 reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
1376 mcs_reg_write(mcs, reg, 0);
1378 if (mcs->hw->mcs_blks > 1) {
1379 reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
1380 mcs_reg_write(mcs, reg, 0xe000e);
1384 reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
1385 mcs_reg_write(mcs, reg, 0);
1388 int mcs_set_lmac_channels(int mcs_id, u16 base)
1394 mcs = mcs_get_pdata(mcs_id);
1397 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
1398 cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
1399 cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
1400 cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
1401 cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
1402 mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
1408 static int mcs_x2p_calibration(struct mcs *mcs)
1410 unsigned long timeout = jiffies + usecs_to_jiffies(20000);
1414 /* set X2P calibration */
1415 val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
1417 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
1419 /* Wait for calibration to complete */
1420 while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
1421 if (time_before(jiffies, timeout)) {
1422 usleep_range(80, 100);
1426 dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
1431 val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
1432 for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
1433 if (val & BIT_ULL(1 + i))
1436 dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
1438 /* Clear X2P calibrate */
1439 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
1444 static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
1448 /* Set MCS to external bypass */
1449 val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
1454 mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
1457 static void mcs_global_cfg(struct mcs *mcs)
1459 /* Disable external bypass */
1460 mcs_set_external_bypass(mcs, false);
1462 /* Reset TX/RX stats memory */
1463 mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
1464 mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
1466 /* Set MCS to perform standard IEEE802.1AE macsec processing */
1467 if (mcs->hw->mcs_blks == 1) {
1468 mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
1472 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
1473 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
1476 void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
1478 struct hwinfo *hw = mcs->hw;
1480 hw->tcam_entries = 128; /* TCAM entries */
1481 hw->secy_entries = 128; /* SecY entries */
1482 hw->sc_entries = 128; /* SC CAM entries */
1483 hw->sa_entries = 256; /* SA entries */
1484 hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
1485 hw->mcs_x2p_intf = 5; /* x2p clabration intf */
1486 hw->mcs_blks = 1; /* MCS blocks */
1489 static struct mcs_ops cn10kb_mcs_ops = {
1490 .mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities,
1491 .mcs_parser_cfg = cn10kb_mcs_parser_cfg,
1492 .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
1493 .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
1494 .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
1497 static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1499 struct device *dev = &pdev->dev;
1503 mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
1507 mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
1511 err = pci_enable_device(pdev);
1513 dev_err(dev, "Failed to enable PCI device\n");
1514 pci_set_drvdata(pdev, NULL);
1518 err = pci_request_regions(pdev, DRV_NAME);
1520 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1524 mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1525 if (!mcs->reg_base) {
1526 dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
1531 pci_set_drvdata(pdev, mcs);
1533 mcs->dev = &pdev->dev;
1535 if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
1536 mcs->mcs_ops = &cn10kb_mcs_ops;
1538 mcs->mcs_ops = cnf10kb_get_mac_ops();
1540 /* Set hardware capabilities */
1541 mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
1543 mcs_global_cfg(mcs);
1545 /* Perform X2P clibration */
1546 err = mcs_x2p_calibration(mcs);
1550 mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1553 /* Set mcs tx side resources */
1554 err = mcs_alloc_struct_mem(mcs, &mcs->tx);
1558 /* Set mcs rx side resources */
1559 err = mcs_alloc_struct_mem(mcs, &mcs->rx);
1563 /* per port config */
1564 for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
1565 mcs_lmac_init(mcs, lmac);
1567 /* Parser configuration */
1568 mcs->mcs_ops->mcs_parser_cfg(mcs);
1570 err = mcs_register_interrupts(mcs);
1574 list_add(&mcs->mcs_list, &mcs_list);
1575 mutex_init(&mcs->stats_lock);
1580 /* Enable external bypass */
1581 mcs_set_external_bypass(mcs, true);
1583 pci_release_regions(pdev);
1584 pci_disable_device(pdev);
1585 pci_set_drvdata(pdev, NULL);
1589 static void mcs_remove(struct pci_dev *pdev)
1591 struct mcs *mcs = pci_get_drvdata(pdev);
1593 /* Set MCS to external bypass */
1594 mcs_set_external_bypass(mcs, true);
1595 free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs);
1596 pci_free_irq_vectors(pdev);
1597 pci_release_regions(pdev);
1598 pci_disable_device(pdev);
1599 pci_set_drvdata(pdev, NULL);
1602 struct pci_driver mcs_driver = {
1604 .id_table = mcs_id_table,
1606 .remove = mcs_remove,