1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
4 * Copyright (C) 2018 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
17 #include "lmac_common.h"
19 #include "rvu_trace.h"
21 struct cgx_evq_entry {
22 struct list_head evq_node;
23 struct cgx_link_event link_event;
26 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
27 static struct _req_type __maybe_unused \
28 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
30 struct _req_type *req; \
32 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
33 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
34 sizeof(struct _rsp_type)); \
37 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
39 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
46 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
51 if (!is_pf_cgxmapped(rvu, pf))
54 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
55 cgxd = rvu_cgx_pdata(cgx_id, rvu);
57 return (cgx_features_get(cgxd) & feature);
60 /* Returns bitmap of mapped PFs */
61 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
63 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
66 static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
70 pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
72 /* Assumes only one pf mapped to a cgx lmac port */
76 return find_first_bit(&pfmap, 16);
79 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
81 return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
84 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
86 if (cgx_id >= rvu->cgx_cnt_max)
89 return rvu->cgx_idmap[cgx_id];
92 /* Return first enabled CGX instance if none are enabled then return NULL */
93 void *rvu_first_cgx_pdata(struct rvu *rvu)
95 int first_enabled_cgx = 0;
98 for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
99 cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
107 /* Based on P2X connectivity find mapped NIX block for a PF */
108 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
109 int cgx_id, int lmac_id)
111 struct rvu_pfvf *pfvf = &rvu->pf[pf];
114 p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
115 /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
116 pfvf->nix_blkaddr = BLKADDR_NIX0;
117 if (p2x == CMR_P2X_SEL_NIX1)
118 pfvf->nix_blkaddr = BLKADDR_NIX1;
121 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
123 struct npc_pkind *pkind = &rvu->hw->pkind;
124 int cgx_cnt_max = rvu->cgx_cnt_max;
125 int pf = PF_CGXMAP_BASE;
126 unsigned long lmac_bmap;
127 int size, free_pkind;
133 if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
137 * An additional entry is required since PF id starts from 1 and
138 * hence entry at offset 0 is invalid.
140 size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
141 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
142 if (!rvu->pf2cgxlmac_map)
145 /* Initialize all entries with an invalid cgx and lmac id */
146 memset(rvu->pf2cgxlmac_map, 0xFF, size);
148 /* Reverse map table */
149 rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
150 cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
152 if (!rvu->cgxlmac2pf_map)
155 rvu->cgx_mapped_pfs = 0;
156 for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
157 if (!rvu_cgx_pdata(cgx, rvu))
159 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
160 for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
161 lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
163 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
164 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
165 free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
166 pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
167 rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
168 rvu->cgx_mapped_pfs++;
175 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
177 struct cgx_evq_entry *qentry;
181 qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
185 /* Lock the event queue before we read the local link status */
186 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
187 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
188 &qentry->link_event.link_uinfo);
189 qentry->link_event.cgx_id = cgx_id;
190 qentry->link_event.lmac_id = lmac_id;
195 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
197 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
199 /* start worker to process the events */
200 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
205 /* This is called from interrupt context and is expected to be atomic */
206 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
208 struct cgx_evq_entry *qentry;
209 struct rvu *rvu = data;
211 /* post event to the event queue */
212 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
215 qentry->link_event = *event;
216 spin_lock(&rvu->cgx_evq_lock);
217 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
218 spin_unlock(&rvu->cgx_evq_lock);
220 /* start worker to process the events */
221 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
226 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
228 struct cgx_link_user_info *linfo;
229 struct cgx_link_info_msg *msg;
233 linfo = &event->link_uinfo;
234 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
237 pfid = find_first_bit(&pfmap, 16);
238 clear_bit(pfid, &pfmap);
240 /* check if notification is enabled */
241 if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
242 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
243 event->cgx_id, event->lmac_id,
244 linfo->link_up ? "UP" : "DOWN");
248 /* Send mbox message to PF */
249 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
252 msg->link_info = *linfo;
253 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
254 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
256 dev_warn(rvu->dev, "notification to pf %d failed\n",
261 static void cgx_evhandler_task(struct work_struct *work)
263 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
264 struct cgx_evq_entry *qentry;
265 struct cgx_link_event *event;
269 /* Dequeue an event */
270 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
271 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
272 struct cgx_evq_entry,
275 list_del(&qentry->evq_node);
276 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
278 break; /* nothing more to process */
280 event = &qentry->link_event;
283 cgx_notify_pfs(event, rvu);
288 static int cgx_lmac_event_handler_init(struct rvu *rvu)
290 unsigned long lmac_bmap;
291 struct cgx_event_cb cb;
295 spin_lock_init(&rvu->cgx_evq_lock);
296 INIT_LIST_HEAD(&rvu->cgx_evq_head);
297 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
298 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
299 if (!rvu->cgx_evh_wq) {
300 dev_err(rvu->dev, "alloc workqueue failed");
304 cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
307 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
308 cgxd = rvu_cgx_pdata(cgx, rvu);
311 lmac_bmap = cgx_get_lmac_bmap(cgxd);
312 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
313 err = cgx_lmac_evh_register(&cb, cgxd, lmac);
316 "%d:%d handler register failed\n",
324 static void rvu_cgx_wq_destroy(struct rvu *rvu)
326 if (rvu->cgx_evh_wq) {
327 flush_workqueue(rvu->cgx_evh_wq);
328 destroy_workqueue(rvu->cgx_evh_wq);
329 rvu->cgx_evh_wq = NULL;
333 int rvu_cgx_init(struct rvu *rvu)
338 /* CGX port id starts from 0 and are not necessarily contiguous
339 * Hence we allocate resources based on the maximum port id value.
341 rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
342 if (!rvu->cgx_cnt_max) {
343 dev_info(rvu->dev, "No CGX devices found!\n");
347 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
348 sizeof(void *), GFP_KERNEL);
352 /* Initialize the cgxdata table */
353 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
354 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
356 /* Map CGX LMAC interfaces to RVU PFs */
357 err = rvu_map_cgx_lmac_pf(rvu);
361 /* Register for CGX events */
362 err = cgx_lmac_event_handler_init(rvu);
366 mutex_init(&rvu->cgx_cfg_lock);
368 /* Ensure event handler registration is completed, before
369 * we turn on the links
373 /* Do link up for all CGX ports */
374 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
375 cgxd = rvu_cgx_pdata(cgx, rvu);
378 err = cgx_lmac_linkup_start(cgxd);
381 "Link up process failed to start on cgx %d\n",
388 int rvu_cgx_exit(struct rvu *rvu)
390 unsigned long lmac_bmap;
394 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
395 cgxd = rvu_cgx_pdata(cgx, rvu);
398 lmac_bmap = cgx_get_lmac_bmap(cgxd);
399 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
400 cgx_lmac_evh_unregister(cgxd, lmac);
403 /* Ensure event handler unregister is completed */
406 rvu_cgx_wq_destroy(rvu);
410 /* Most of the CGX configuration is restricted to the mapped PF only,
411 * VF's of mapped PF and other PFs are not allowed. This fn() checks
412 * whether a PFFUNC is permitted to do the config or not.
414 static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
416 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
417 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
422 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
424 struct mac_ops *mac_ops;
428 if (!is_pf_cgxmapped(rvu, pf))
431 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
432 cgxd = rvu_cgx_pdata(cgx_id, rvu);
434 mac_ops = get_mac_ops(cgxd);
435 /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
437 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
439 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
442 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
444 int pf = rvu_get_pf(pcifunc);
447 if (!is_cgx_config_permitted(rvu, pcifunc))
450 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
452 cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
457 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
460 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
464 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
467 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
471 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
474 int pf = rvu_get_pf(req->hdr.pcifunc);
475 struct mac_ops *mac_ops;
476 int stat = 0, err = 0;
477 u64 tx_stat, rx_stat;
481 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
484 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
485 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
486 mac_ops = get_mac_ops(cgxd);
489 while (stat < mac_ops->rx_stats_cnt) {
490 err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
493 if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
494 ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
496 ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
502 while (stat < mac_ops->tx_stats_cnt) {
503 err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
506 if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
507 ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
509 ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
515 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
516 struct cgx_stats_rsp *rsp)
518 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
521 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
522 struct rpm_stats_rsp *rsp)
524 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
527 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
529 struct cgx_fec_stats_rsp *rsp)
531 int pf = rvu_get_pf(req->hdr.pcifunc);
535 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
537 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
539 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
540 return cgx_get_fec_stats(cgxd, lmac, rsp);
543 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
544 struct cgx_mac_addr_set_or_get *req,
545 struct cgx_mac_addr_set_or_get *rsp)
547 int pf = rvu_get_pf(req->hdr.pcifunc);
550 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
553 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
555 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
560 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
561 struct cgx_mac_addr_set_or_get *req,
562 struct cgx_mac_addr_set_or_get *rsp)
564 int pf = rvu_get_pf(req->hdr.pcifunc);
569 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
572 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
575 cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
576 /* copy 48 bit mac address to req->mac_addr */
577 for (i = 0; i < ETH_ALEN; i++)
578 rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
582 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
585 u16 pcifunc = req->hdr.pcifunc;
586 int pf = rvu_get_pf(pcifunc);
589 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
592 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
594 cgx_lmac_promisc_config(cgx_id, lmac_id, true);
598 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
601 int pf = rvu_get_pf(req->hdr.pcifunc);
604 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
607 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
609 cgx_lmac_promisc_config(cgx_id, lmac_id, false);
613 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
615 int pf = rvu_get_pf(pcifunc);
619 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
622 /* This msg is expected only from PFs that are mapped to CGX LMACs,
623 * if received from other PF/VF simply ACK, nothing to do.
625 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
626 !is_pf_cgxmapped(rvu, pf))
629 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
630 cgxd = rvu_cgx_pdata(cgx_id, rvu);
632 cgx_lmac_ptp_config(cgxd, lmac_id, enable);
633 /* If PTP is enabled then inform NPC that packets to be
634 * parsed by this PF will have their data shifted by 8 bytes
635 * and if PTP is disabled then no shift is required
637 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
643 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
646 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
649 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
652 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
655 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
657 int pf = rvu_get_pf(pcifunc);
660 if (!is_cgx_config_permitted(rvu, pcifunc))
663 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
666 set_bit(pf, &rvu->pf_notify_bmap);
667 /* Send the current link status to PF */
668 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
670 clear_bit(pf, &rvu->pf_notify_bmap);
676 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
679 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
683 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
686 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
690 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
691 struct cgx_link_info_msg *rsp)
696 pf = rvu_get_pf(req->hdr.pcifunc);
698 if (!is_pf_cgxmapped(rvu, pf))
701 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
703 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
708 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
710 struct cgx_features_info_msg *rsp)
712 int pf = rvu_get_pf(req->hdr.pcifunc);
716 if (!is_pf_cgxmapped(rvu, pf))
719 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
720 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
721 rsp->lmac_features = cgx_features_get(cgxd);
726 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
728 struct mac_ops *mac_ops;
731 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
732 fifo_len = mac_ops ? mac_ops->fifo_len : 0;
737 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
739 int pf = rvu_get_pf(pcifunc);
740 struct mac_ops *mac_ops;
743 if (!is_cgx_config_permitted(rvu, pcifunc))
746 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
747 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
749 return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
753 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
756 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
760 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
763 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
767 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
768 struct cgx_pause_frm_cfg *req,
769 struct cgx_pause_frm_cfg *rsp)
771 int pf = rvu_get_pf(req->hdr.pcifunc);
772 struct mac_ops *mac_ops;
776 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
779 /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
780 * if received from other PF/VF simply ACK, nothing to do.
782 if (!is_pf_cgxmapped(rvu, pf))
785 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
786 cgxd = rvu_cgx_pdata(cgx_id, rvu);
787 mac_ops = get_mac_ops(cgxd);
790 mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
791 req->tx_pause, req->rx_pause);
793 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
799 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
802 int pf = rvu_get_pf(req->hdr.pcifunc);
805 if (!is_pf_cgxmapped(rvu, pf))
808 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
809 return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
812 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
813 * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
815 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
816 int index, int rxtxflag, u64 *stat)
818 struct rvu_block *block;
828 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
832 /* Assumes LF of a PF and all of its VF belongs to the same
835 pcifunc = pf << RVU_PFVF_PF_SHIFT;
836 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
839 block = &rvu->hw->block[blkaddr];
841 for (lf = 0; lf < block->lf.max; lf++) {
842 /* Check if a lf is attached to this PF or one of its VFs */
843 if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
844 ~RVU_PFVF_FUNC_MASK)))
846 if (rxtxflag == NIX_STATS_RX)
847 *stat += rvu_read64(rvu, blkaddr,
848 NIX_AF_LFX_RX_STATX(lf, index));
850 *stat += rvu_read64(rvu, blkaddr,
851 NIX_AF_LFX_TX_STATX(lf, index));
857 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
859 struct rvu_pfvf *parent_pf, *pfvf;
860 int cgx_users, err = 0;
862 if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
865 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
866 pfvf = rvu_get_pfvf(rvu, pcifunc);
868 mutex_lock(&rvu->cgx_cfg_lock);
870 if (start && pfvf->cgx_in_use)
871 goto exit; /* CGX is already started hence nothing to do */
872 if (!start && !pfvf->cgx_in_use)
873 goto exit; /* CGX is already stopped hence nothing to do */
876 cgx_users = parent_pf->cgx_users;
877 parent_pf->cgx_users++;
879 parent_pf->cgx_users--;
880 cgx_users = parent_pf->cgx_users;
883 /* Start CGX when first of all NIXLFs is started.
884 * Stop CGX when last of all NIXLFs is stopped.
887 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
890 dev_err(rvu->dev, "Unable to %s CGX\n",
891 start ? "start" : "stop");
892 /* Revert the usage count in case of error */
893 parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
894 : parent_pf->cgx_users + 1;
898 pfvf->cgx_in_use = start;
900 mutex_unlock(&rvu->cgx_cfg_lock);
904 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
905 struct fec_mode *req,
906 struct fec_mode *rsp)
908 int pf = rvu_get_pf(req->hdr.pcifunc);
911 if (!is_pf_cgxmapped(rvu, pf))
914 if (req->fec == OTX2_FEC_OFF)
915 req->fec = OTX2_FEC_NONE;
916 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
917 rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
921 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
922 struct cgx_fw_data *rsp)
924 int pf = rvu_get_pf(req->hdr.pcifunc);
930 if (!is_pf_cgxmapped(rvu, pf))
933 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
935 memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
936 sizeof(struct cgx_lmac_fwdata_s));
940 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
941 struct cgx_set_link_mode_req *req,
942 struct cgx_set_link_mode_rsp *rsp)
944 int pf = rvu_get_pf(req->hdr.pcifunc);
948 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
951 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
952 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
953 rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);