1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2018 Marvell.
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
14 #include "lmac_common.h"
16 #include "rvu_trace.h"
18 struct cgx_evq_entry {
19 struct list_head evq_node;
20 struct cgx_link_event link_event;
23 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
24 static struct _req_type __maybe_unused \
25 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
27 struct _req_type *req; \
29 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
30 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
31 sizeof(struct _rsp_type)); \
34 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
36 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
43 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
48 if (!is_pf_cgxmapped(rvu, pf))
51 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
52 cgxd = rvu_cgx_pdata(cgx_id, rvu);
54 return (cgx_features_get(cgxd) & feature);
57 /* Returns bitmap of mapped PFs */
58 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
60 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
63 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
67 pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
69 /* Assumes only one pf mapped to a cgx lmac port */
73 return find_first_bit(&pfmap, 16);
76 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
78 return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
81 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
83 if (cgx_id >= rvu->cgx_cnt_max)
86 return rvu->cgx_idmap[cgx_id];
89 /* Return first enabled CGX instance if none are enabled then return NULL */
90 void *rvu_first_cgx_pdata(struct rvu *rvu)
92 int first_enabled_cgx = 0;
95 for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
96 cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
104 /* Based on P2X connectivity find mapped NIX block for a PF */
105 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
106 int cgx_id, int lmac_id)
108 struct rvu_pfvf *pfvf = &rvu->pf[pf];
111 p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
112 /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
113 pfvf->nix_blkaddr = BLKADDR_NIX0;
114 if (p2x == CMR_P2X_SEL_NIX1)
115 pfvf->nix_blkaddr = BLKADDR_NIX1;
118 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
120 struct npc_pkind *pkind = &rvu->hw->pkind;
121 int cgx_cnt_max = rvu->cgx_cnt_max;
122 int pf = PF_CGXMAP_BASE;
123 unsigned long lmac_bmap;
124 int size, free_pkind;
131 if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
135 * An additional entry is required since PF id starts from 1 and
136 * hence entry at offset 0 is invalid.
138 size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
139 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
140 if (!rvu->pf2cgxlmac_map)
143 /* Initialize all entries with an invalid cgx and lmac id */
144 memset(rvu->pf2cgxlmac_map, 0xFF, size);
146 /* Reverse map table */
147 rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
148 cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
150 if (!rvu->cgxlmac2pf_map)
153 rvu->cgx_mapped_pfs = 0;
154 for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
155 if (!rvu_cgx_pdata(cgx, rvu))
157 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
158 for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
159 lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
161 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
162 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
163 free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
164 pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
165 rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
166 rvu->cgx_mapped_pfs++;
167 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
168 rvu->cgx_mapped_vfs += numvfs;
175 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
177 struct cgx_evq_entry *qentry;
181 qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
185 /* Lock the event queue before we read the local link status */
186 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
187 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
188 &qentry->link_event.link_uinfo);
189 qentry->link_event.cgx_id = cgx_id;
190 qentry->link_event.lmac_id = lmac_id;
195 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
197 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
199 /* start worker to process the events */
200 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
205 /* This is called from interrupt context and is expected to be atomic */
206 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
208 struct cgx_evq_entry *qentry;
209 struct rvu *rvu = data;
211 /* post event to the event queue */
212 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
215 qentry->link_event = *event;
216 spin_lock(&rvu->cgx_evq_lock);
217 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
218 spin_unlock(&rvu->cgx_evq_lock);
220 /* start worker to process the events */
221 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
226 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
228 struct cgx_link_user_info *linfo;
229 struct cgx_link_info_msg *msg;
233 linfo = &event->link_uinfo;
234 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
237 pfid = find_first_bit(&pfmap, 16);
238 clear_bit(pfid, &pfmap);
240 /* check if notification is enabled */
241 if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
242 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
243 event->cgx_id, event->lmac_id,
244 linfo->link_up ? "UP" : "DOWN");
248 /* Send mbox message to PF */
249 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
252 msg->link_info = *linfo;
253 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
254 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
256 dev_warn(rvu->dev, "notification to pf %d failed\n",
261 static void cgx_evhandler_task(struct work_struct *work)
263 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
264 struct cgx_evq_entry *qentry;
265 struct cgx_link_event *event;
269 /* Dequeue an event */
270 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
271 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
272 struct cgx_evq_entry,
275 list_del(&qentry->evq_node);
276 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
278 break; /* nothing more to process */
280 event = &qentry->link_event;
283 cgx_notify_pfs(event, rvu);
288 static int cgx_lmac_event_handler_init(struct rvu *rvu)
290 unsigned long lmac_bmap;
291 struct cgx_event_cb cb;
295 spin_lock_init(&rvu->cgx_evq_lock);
296 INIT_LIST_HEAD(&rvu->cgx_evq_head);
297 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
298 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
299 if (!rvu->cgx_evh_wq) {
300 dev_err(rvu->dev, "alloc workqueue failed");
304 cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
307 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
308 cgxd = rvu_cgx_pdata(cgx, rvu);
311 lmac_bmap = cgx_get_lmac_bmap(cgxd);
312 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
313 err = cgx_lmac_evh_register(&cb, cgxd, lmac);
316 "%d:%d handler register failed\n",
324 static void rvu_cgx_wq_destroy(struct rvu *rvu)
326 if (rvu->cgx_evh_wq) {
327 destroy_workqueue(rvu->cgx_evh_wq);
328 rvu->cgx_evh_wq = NULL;
332 int rvu_cgx_init(struct rvu *rvu)
337 /* CGX port id starts from 0 and are not necessarily contiguous
338 * Hence we allocate resources based on the maximum port id value.
340 rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
341 if (!rvu->cgx_cnt_max) {
342 dev_info(rvu->dev, "No CGX devices found!\n");
346 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
347 sizeof(void *), GFP_KERNEL);
351 /* Initialize the cgxdata table */
352 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
353 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
355 /* Map CGX LMAC interfaces to RVU PFs */
356 err = rvu_map_cgx_lmac_pf(rvu);
360 /* Register for CGX events */
361 err = cgx_lmac_event_handler_init(rvu);
365 mutex_init(&rvu->cgx_cfg_lock);
367 /* Ensure event handler registration is completed, before
368 * we turn on the links
372 /* Do link up for all CGX ports */
373 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
374 cgxd = rvu_cgx_pdata(cgx, rvu);
377 err = cgx_lmac_linkup_start(cgxd);
380 "Link up process failed to start on cgx %d\n",
387 int rvu_cgx_exit(struct rvu *rvu)
389 unsigned long lmac_bmap;
393 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
394 cgxd = rvu_cgx_pdata(cgx, rvu);
397 lmac_bmap = cgx_get_lmac_bmap(cgxd);
398 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
399 cgx_lmac_evh_unregister(cgxd, lmac);
402 /* Ensure event handler unregister is completed */
405 rvu_cgx_wq_destroy(rvu);
409 /* Most of the CGX configuration is restricted to the mapped PF only,
410 * VF's of mapped PF and other PFs are not allowed. This fn() checks
411 * whether a PFFUNC is permitted to do the config or not.
413 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
415 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
416 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
421 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
423 struct mac_ops *mac_ops;
427 if (!is_pf_cgxmapped(rvu, pf))
430 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
431 cgxd = rvu_cgx_pdata(cgx_id, rvu);
433 mac_ops = get_mac_ops(cgxd);
434 /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
436 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
438 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
441 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
443 int pf = rvu_get_pf(pcifunc);
446 if (!is_cgx_config_permitted(rvu, pcifunc))
447 return LMAC_AF_ERR_PERM_DENIED;
449 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
451 cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
456 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
458 int pf = rvu_get_pf(pcifunc);
459 int i = 0, lmac_count = 0;
464 if (!is_cgx_config_permitted(rvu, pcifunc))
467 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
468 cgx_dev = cgx_get_pdata(cgx_id);
469 lmac_count = cgx_get_lmac_cnt(cgx_dev);
470 max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
472 for (i = 0; i < max_dmac_filters; i++)
473 cgx_lmac_addr_del(cgx_id, lmac_id, i);
475 /* As cgx_lmac_addr_del does not clear entry for index 0
476 * so it needs to be done explicitly
478 cgx_lmac_addr_reset(cgx_id, lmac_id);
481 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
484 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
488 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
491 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
495 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
498 int pf = rvu_get_pf(req->hdr.pcifunc);
499 struct mac_ops *mac_ops;
500 int stat = 0, err = 0;
501 u64 tx_stat, rx_stat;
505 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
506 return LMAC_AF_ERR_PERM_DENIED;
508 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
509 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
510 mac_ops = get_mac_ops(cgxd);
513 while (stat < mac_ops->rx_stats_cnt) {
514 err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
517 if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
518 ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
520 ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
526 while (stat < mac_ops->tx_stats_cnt) {
527 err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
530 if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
531 ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
533 ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
539 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
540 struct cgx_stats_rsp *rsp)
542 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
545 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
546 struct rpm_stats_rsp *rsp)
548 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
551 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
553 struct cgx_fec_stats_rsp *rsp)
555 int pf = rvu_get_pf(req->hdr.pcifunc);
559 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
560 return LMAC_AF_ERR_PERM_DENIED;
561 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
563 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
564 return cgx_get_fec_stats(cgxd, lmac, rsp);
567 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
568 struct cgx_mac_addr_set_or_get *req,
569 struct cgx_mac_addr_set_or_get *rsp)
571 int pf = rvu_get_pf(req->hdr.pcifunc);
574 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
577 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
579 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
584 int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
585 struct cgx_mac_addr_add_req *req,
586 struct cgx_mac_addr_add_rsp *rsp)
588 int pf = rvu_get_pf(req->hdr.pcifunc);
592 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
595 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
596 rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
605 int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
606 struct cgx_mac_addr_del_req *req,
609 int pf = rvu_get_pf(req->hdr.pcifunc);
612 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
615 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
616 return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
619 int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
621 struct cgx_max_dmac_entries_get_rsp
624 int pf = rvu_get_pf(req->hdr.pcifunc);
627 /* If msg is received from PFs(which are not mapped to CGX LMACs)
628 * or VF then no entries are allocated for DMAC filters at CGX level.
631 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
632 rsp->max_dmac_filters = 0;
636 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
637 rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
641 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
642 struct cgx_mac_addr_set_or_get *req,
643 struct cgx_mac_addr_set_or_get *rsp)
645 int pf = rvu_get_pf(req->hdr.pcifunc);
650 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
653 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
656 cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
657 /* copy 48 bit mac address to req->mac_addr */
658 for (i = 0; i < ETH_ALEN; i++)
659 rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
663 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
666 u16 pcifunc = req->hdr.pcifunc;
667 int pf = rvu_get_pf(pcifunc);
670 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
673 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
675 cgx_lmac_promisc_config(cgx_id, lmac_id, true);
679 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
682 int pf = rvu_get_pf(req->hdr.pcifunc);
685 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
688 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
690 cgx_lmac_promisc_config(cgx_id, lmac_id, false);
694 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
696 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
697 int pf = rvu_get_pf(pcifunc);
698 struct mac_ops *mac_ops;
702 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
705 /* This msg is expected only from PFs that are mapped to CGX LMACs,
706 * if received from other PF/VF simply ACK, nothing to do.
708 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
709 !is_pf_cgxmapped(rvu, pf))
712 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
713 cgxd = rvu_cgx_pdata(cgx_id, rvu);
715 mac_ops = get_mac_ops(cgxd);
716 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
717 /* If PTP is enabled then inform NPC that packets to be
718 * parsed by this PF will have their data shifted by 8 bytes
719 * and if PTP is disabled then no shift is required
721 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
723 /* This flag is required to clean up CGX conf if app gets killed */
724 pfvf->hw_rx_tstamp_en = enable;
729 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
732 if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
735 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
738 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
741 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
744 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
746 int pf = rvu_get_pf(pcifunc);
749 if (!is_cgx_config_permitted(rvu, pcifunc))
752 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
755 set_bit(pf, &rvu->pf_notify_bmap);
756 /* Send the current link status to PF */
757 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
759 clear_bit(pf, &rvu->pf_notify_bmap);
765 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
768 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
772 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
775 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
779 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
780 struct cgx_link_info_msg *rsp)
785 pf = rvu_get_pf(req->hdr.pcifunc);
787 if (!is_pf_cgxmapped(rvu, pf))
790 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
792 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
797 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
799 struct cgx_features_info_msg *rsp)
801 int pf = rvu_get_pf(req->hdr.pcifunc);
805 if (!is_pf_cgxmapped(rvu, pf))
808 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
809 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
810 rsp->lmac_features = cgx_features_get(cgxd);
815 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
817 struct mac_ops *mac_ops;
820 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
821 fifo_len = mac_ops ? mac_ops->fifo_len : 0;
826 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
828 int pf = rvu_get_pf(pcifunc);
829 struct mac_ops *mac_ops;
832 if (!is_cgx_config_permitted(rvu, pcifunc))
835 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
836 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
838 return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
842 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
845 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
849 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
852 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
856 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
857 struct cgx_pause_frm_cfg *req,
858 struct cgx_pause_frm_cfg *rsp)
860 int pf = rvu_get_pf(req->hdr.pcifunc);
861 struct mac_ops *mac_ops;
865 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
868 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
869 * if received from other PF/VF simply ACK, nothing to do.
871 if (!is_pf_cgxmapped(rvu, pf))
874 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
875 cgxd = rvu_cgx_pdata(cgx_id, rvu);
876 mac_ops = get_mac_ops(cgxd);
879 mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
880 req->tx_pause, req->rx_pause);
882 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
888 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
891 int pf = rvu_get_pf(req->hdr.pcifunc);
894 if (!is_pf_cgxmapped(rvu, pf))
895 return LMAC_AF_ERR_PF_NOT_MAPPED;
897 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
898 return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
901 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
902 * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
904 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
905 int index, int rxtxflag, u64 *stat)
907 struct rvu_block *block;
917 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
921 /* Assumes LF of a PF and all of its VF belongs to the same
924 pcifunc = pf << RVU_PFVF_PF_SHIFT;
925 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
928 block = &rvu->hw->block[blkaddr];
930 for (lf = 0; lf < block->lf.max; lf++) {
931 /* Check if a lf is attached to this PF or one of its VFs */
932 if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
933 ~RVU_PFVF_FUNC_MASK)))
935 if (rxtxflag == NIX_STATS_RX)
936 *stat += rvu_read64(rvu, blkaddr,
937 NIX_AF_LFX_RX_STATX(lf, index));
939 *stat += rvu_read64(rvu, blkaddr,
940 NIX_AF_LFX_TX_STATX(lf, index));
946 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
948 struct rvu_pfvf *parent_pf, *pfvf;
949 int cgx_users, err = 0;
951 if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
954 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
955 pfvf = rvu_get_pfvf(rvu, pcifunc);
957 mutex_lock(&rvu->cgx_cfg_lock);
959 if (start && pfvf->cgx_in_use)
960 goto exit; /* CGX is already started hence nothing to do */
961 if (!start && !pfvf->cgx_in_use)
962 goto exit; /* CGX is already stopped hence nothing to do */
965 cgx_users = parent_pf->cgx_users;
966 parent_pf->cgx_users++;
968 parent_pf->cgx_users--;
969 cgx_users = parent_pf->cgx_users;
972 /* Start CGX when first of all NIXLFs is started.
973 * Stop CGX when last of all NIXLFs is stopped.
976 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
979 dev_err(rvu->dev, "Unable to %s CGX\n",
980 start ? "start" : "stop");
981 /* Revert the usage count in case of error */
982 parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
983 : parent_pf->cgx_users + 1;
987 pfvf->cgx_in_use = start;
989 mutex_unlock(&rvu->cgx_cfg_lock);
993 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
994 struct fec_mode *req,
995 struct fec_mode *rsp)
997 int pf = rvu_get_pf(req->hdr.pcifunc);
1000 if (!is_pf_cgxmapped(rvu, pf))
1003 if (req->fec == OTX2_FEC_OFF)
1004 req->fec = OTX2_FEC_NONE;
1005 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1006 rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
1010 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
1011 struct cgx_fw_data *rsp)
1013 int pf = rvu_get_pf(req->hdr.pcifunc);
1019 if (!is_pf_cgxmapped(rvu, pf))
1022 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1024 memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
1025 sizeof(struct cgx_lmac_fwdata_s));
1029 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
1030 struct cgx_set_link_mode_req *req,
1031 struct cgx_set_link_mode_rsp *rsp)
1033 int pf = rvu_get_pf(req->hdr.pcifunc);
1037 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1040 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
1041 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
1042 rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
1046 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
1047 struct msg_rsp *rsp)
1049 int pf = rvu_get_pf(req->hdr.pcifunc);
1052 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1053 return LMAC_AF_ERR_PERM_DENIED;
1055 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1056 return cgx_lmac_addr_reset(cgx_id, lmac_id);
1059 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
1060 struct cgx_mac_addr_update_req *req,
1061 struct msg_rsp *rsp)
1063 int pf = rvu_get_pf(req->hdr.pcifunc);
1066 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1067 return LMAC_AF_ERR_PERM_DENIED;
1069 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1070 return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);