clk: Drop the rate range on clk_put()
[linux-2.6-microblaze.git] / drivers / net / ethernet / marvell / octeontx2 / af / rvu_cgx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11
12 #include "rvu.h"
13 #include "cgx.h"
14 #include "lmac_common.h"
15 #include "rvu_reg.h"
16 #include "rvu_trace.h"
17
18 struct cgx_evq_entry {
19         struct list_head evq_node;
20         struct cgx_link_event link_event;
21 };
22
23 #define M(_name, _id, _fn_name, _req_type, _rsp_type)                   \
24 static struct _req_type __maybe_unused                                  \
25 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)           \
26 {                                                                       \
27         struct _req_type *req;                                          \
28                                                                         \
29         req = (struct _req_type *)otx2_mbox_alloc_msg_rsp(              \
30                 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
31                 sizeof(struct _rsp_type));                              \
32         if (!req)                                                       \
33                 return NULL;                                            \
34         req->hdr.sig = OTX2_MBOX_REQ_SIG;                               \
35         req->hdr.id = _id;                                              \
36         trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req));             \
37         return req;                                                     \
38 }
39
40 MBOX_UP_CGX_MESSAGES
41 #undef M
42
43 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
44 {
45         u8 cgx_id, lmac_id;
46         void *cgxd;
47
48         if (!is_pf_cgxmapped(rvu, pf))
49                 return 0;
50
51         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
52         cgxd = rvu_cgx_pdata(cgx_id, rvu);
53
54         return  (cgx_features_get(cgxd) & feature);
55 }
56
57 /* Returns bitmap of mapped PFs */
58 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
59 {
60         return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
61 }
62
63 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
64 {
65         unsigned long pfmap;
66
67         pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
68
69         /* Assumes only one pf mapped to a cgx lmac port */
70         if (!pfmap)
71                 return -ENODEV;
72         else
73                 return find_first_bit(&pfmap, 16);
74 }
75
76 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
77 {
78         return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
79 }
80
81 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
82 {
83         if (cgx_id >= rvu->cgx_cnt_max)
84                 return NULL;
85
86         return rvu->cgx_idmap[cgx_id];
87 }
88
89 /* Return first enabled CGX instance if none are enabled then return NULL */
90 void *rvu_first_cgx_pdata(struct rvu *rvu)
91 {
92         int first_enabled_cgx = 0;
93         void *cgxd = NULL;
94
95         for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
96                 cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
97                 if (cgxd)
98                         break;
99         }
100
101         return cgxd;
102 }
103
104 /* Based on P2X connectivity find mapped NIX block for a PF */
105 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
106                                   int cgx_id, int lmac_id)
107 {
108         struct rvu_pfvf *pfvf = &rvu->pf[pf];
109         u8 p2x;
110
111         p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
112         /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
113         pfvf->nix_blkaddr = BLKADDR_NIX0;
114         if (p2x == CMR_P2X_SEL_NIX1)
115                 pfvf->nix_blkaddr = BLKADDR_NIX1;
116 }
117
118 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
119 {
120         struct npc_pkind *pkind = &rvu->hw->pkind;
121         int cgx_cnt_max = rvu->cgx_cnt_max;
122         int pf = PF_CGXMAP_BASE;
123         unsigned long lmac_bmap;
124         int size, free_pkind;
125         int cgx, lmac, iter;
126         int numvfs, hwvfs;
127
128         if (!cgx_cnt_max)
129                 return 0;
130
131         if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
132                 return -EINVAL;
133
134         /* Alloc map table
135          * An additional entry is required since PF id starts from 1 and
136          * hence entry at offset 0 is invalid.
137          */
138         size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
139         rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
140         if (!rvu->pf2cgxlmac_map)
141                 return -ENOMEM;
142
143         /* Initialize all entries with an invalid cgx and lmac id */
144         memset(rvu->pf2cgxlmac_map, 0xFF, size);
145
146         /* Reverse map table */
147         rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
148                                   cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
149                                   GFP_KERNEL);
150         if (!rvu->cgxlmac2pf_map)
151                 return -ENOMEM;
152
153         rvu->cgx_mapped_pfs = 0;
154         for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
155                 if (!rvu_cgx_pdata(cgx, rvu))
156                         continue;
157                 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
158                 for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
159                         lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
160                                               iter);
161                         rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
162                         rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
163                         free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
164                         pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
165                         rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
166                         rvu->cgx_mapped_pfs++;
167                         rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
168                         rvu->cgx_mapped_vfs += numvfs;
169                         pf++;
170                 }
171         }
172         return 0;
173 }
174
175 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
176 {
177         struct cgx_evq_entry *qentry;
178         unsigned long flags;
179         int err;
180
181         qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
182         if (!qentry)
183                 return -ENOMEM;
184
185         /* Lock the event queue before we read the local link status */
186         spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
187         err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
188                                 &qentry->link_event.link_uinfo);
189         qentry->link_event.cgx_id = cgx_id;
190         qentry->link_event.lmac_id = lmac_id;
191         if (err) {
192                 kfree(qentry);
193                 goto skip_add;
194         }
195         list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
196 skip_add:
197         spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
198
199         /* start worker to process the events */
200         queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
201
202         return 0;
203 }
204
205 /* This is called from interrupt context and is expected to be atomic */
206 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
207 {
208         struct cgx_evq_entry *qentry;
209         struct rvu *rvu = data;
210
211         /* post event to the event queue */
212         qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
213         if (!qentry)
214                 return -ENOMEM;
215         qentry->link_event = *event;
216         spin_lock(&rvu->cgx_evq_lock);
217         list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
218         spin_unlock(&rvu->cgx_evq_lock);
219
220         /* start worker to process the events */
221         queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
222
223         return 0;
224 }
225
226 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
227 {
228         struct cgx_link_user_info *linfo;
229         struct cgx_link_info_msg *msg;
230         unsigned long pfmap;
231         int err, pfid;
232
233         linfo = &event->link_uinfo;
234         pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
235
236         do {
237                 pfid = find_first_bit(&pfmap, 16);
238                 clear_bit(pfid, &pfmap);
239
240                 /* check if notification is enabled */
241                 if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
242                         dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
243                                  event->cgx_id, event->lmac_id,
244                                  linfo->link_up ? "UP" : "DOWN");
245                         continue;
246                 }
247
248                 /* Send mbox message to PF */
249                 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
250                 if (!msg)
251                         continue;
252                 msg->link_info = *linfo;
253                 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
254                 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
255                 if (err)
256                         dev_warn(rvu->dev, "notification to pf %d failed\n",
257                                  pfid);
258         } while (pfmap);
259 }
260
261 static void cgx_evhandler_task(struct work_struct *work)
262 {
263         struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
264         struct cgx_evq_entry *qentry;
265         struct cgx_link_event *event;
266         unsigned long flags;
267
268         do {
269                 /* Dequeue an event */
270                 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
271                 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
272                                                   struct cgx_evq_entry,
273                                                   evq_node);
274                 if (qentry)
275                         list_del(&qentry->evq_node);
276                 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
277                 if (!qentry)
278                         break; /* nothing more to process */
279
280                 event = &qentry->link_event;
281
282                 /* process event */
283                 cgx_notify_pfs(event, rvu);
284                 kfree(qentry);
285         } while (1);
286 }
287
288 static int cgx_lmac_event_handler_init(struct rvu *rvu)
289 {
290         unsigned long lmac_bmap;
291         struct cgx_event_cb cb;
292         int cgx, lmac, err;
293         void *cgxd;
294
295         spin_lock_init(&rvu->cgx_evq_lock);
296         INIT_LIST_HEAD(&rvu->cgx_evq_head);
297         INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
298         rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
299         if (!rvu->cgx_evh_wq) {
300                 dev_err(rvu->dev, "alloc workqueue failed");
301                 return -ENOMEM;
302         }
303
304         cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
305         cb.data = rvu;
306
307         for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
308                 cgxd = rvu_cgx_pdata(cgx, rvu);
309                 if (!cgxd)
310                         continue;
311                 lmac_bmap = cgx_get_lmac_bmap(cgxd);
312                 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
313                         err = cgx_lmac_evh_register(&cb, cgxd, lmac);
314                         if (err)
315                                 dev_err(rvu->dev,
316                                         "%d:%d handler register failed\n",
317                                         cgx, lmac);
318                 }
319         }
320
321         return 0;
322 }
323
324 static void rvu_cgx_wq_destroy(struct rvu *rvu)
325 {
326         if (rvu->cgx_evh_wq) {
327                 destroy_workqueue(rvu->cgx_evh_wq);
328                 rvu->cgx_evh_wq = NULL;
329         }
330 }
331
332 int rvu_cgx_init(struct rvu *rvu)
333 {
334         int cgx, err;
335         void *cgxd;
336
337         /* CGX port id starts from 0 and are not necessarily contiguous
338          * Hence we allocate resources based on the maximum port id value.
339          */
340         rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
341         if (!rvu->cgx_cnt_max) {
342                 dev_info(rvu->dev, "No CGX devices found!\n");
343                 return -ENODEV;
344         }
345
346         rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
347                                       sizeof(void *), GFP_KERNEL);
348         if (!rvu->cgx_idmap)
349                 return -ENOMEM;
350
351         /* Initialize the cgxdata table */
352         for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
353                 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
354
355         /* Map CGX LMAC interfaces to RVU PFs */
356         err = rvu_map_cgx_lmac_pf(rvu);
357         if (err)
358                 return err;
359
360         /* Register for CGX events */
361         err = cgx_lmac_event_handler_init(rvu);
362         if (err)
363                 return err;
364
365         mutex_init(&rvu->cgx_cfg_lock);
366
367         /* Ensure event handler registration is completed, before
368          * we turn on the links
369          */
370         mb();
371
372         /* Do link up for all CGX ports */
373         for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
374                 cgxd = rvu_cgx_pdata(cgx, rvu);
375                 if (!cgxd)
376                         continue;
377                 err = cgx_lmac_linkup_start(cgxd);
378                 if (err)
379                         dev_err(rvu->dev,
380                                 "Link up process failed to start on cgx %d\n",
381                                 cgx);
382         }
383
384         return 0;
385 }
386
387 int rvu_cgx_exit(struct rvu *rvu)
388 {
389         unsigned long lmac_bmap;
390         int cgx, lmac;
391         void *cgxd;
392
393         for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
394                 cgxd = rvu_cgx_pdata(cgx, rvu);
395                 if (!cgxd)
396                         continue;
397                 lmac_bmap = cgx_get_lmac_bmap(cgxd);
398                 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
399                         cgx_lmac_evh_unregister(cgxd, lmac);
400         }
401
402         /* Ensure event handler unregister is completed */
403         mb();
404
405         rvu_cgx_wq_destroy(rvu);
406         return 0;
407 }
408
409 /* Most of the CGX configuration is restricted to the mapped PF only,
410  * VF's of mapped PF and other PFs are not allowed. This fn() checks
411  * whether a PFFUNC is permitted to do the config or not.
412  */
413 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
414 {
415         if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
416             !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
417                 return false;
418         return true;
419 }
420
421 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
422 {
423         struct mac_ops *mac_ops;
424         u8 cgx_id, lmac_id;
425         void *cgxd;
426
427         if (!is_pf_cgxmapped(rvu, pf))
428                 return;
429
430         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
431         cgxd = rvu_cgx_pdata(cgx_id, rvu);
432
433         mac_ops = get_mac_ops(cgxd);
434         /* Set / clear CTL_BCK to control pause frame forwarding to NIX */
435         if (enable)
436                 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
437         else
438                 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
439 }
440
441 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
442 {
443         int pf = rvu_get_pf(pcifunc);
444         u8 cgx_id, lmac_id;
445
446         if (!is_cgx_config_permitted(rvu, pcifunc))
447                 return LMAC_AF_ERR_PERM_DENIED;
448
449         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
450
451         cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
452
453         return 0;
454 }
455
456 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
457 {
458         int pf = rvu_get_pf(pcifunc);
459         int i = 0, lmac_count = 0;
460         u8 max_dmac_filters;
461         u8 cgx_id, lmac_id;
462         void *cgx_dev;
463
464         if (!is_cgx_config_permitted(rvu, pcifunc))
465                 return;
466
467         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
468         cgx_dev = cgx_get_pdata(cgx_id);
469         lmac_count = cgx_get_lmac_cnt(cgx_dev);
470         max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
471
472         for (i = 0; i < max_dmac_filters; i++)
473                 cgx_lmac_addr_del(cgx_id, lmac_id, i);
474
475         /* As cgx_lmac_addr_del does not clear entry for index 0
476          * so it needs to be done explicitly
477          */
478         cgx_lmac_addr_reset(cgx_id, lmac_id);
479 }
480
481 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
482                                     struct msg_rsp *rsp)
483 {
484         rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
485         return 0;
486 }
487
488 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
489                                    struct msg_rsp *rsp)
490 {
491         rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
492         return 0;
493 }
494
495 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
496                               void *rsp)
497 {
498         int pf = rvu_get_pf(req->hdr.pcifunc);
499         struct mac_ops *mac_ops;
500         int stat = 0, err = 0;
501         u64 tx_stat, rx_stat;
502         u8 cgx_idx, lmac;
503         void *cgxd;
504
505         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
506                 return LMAC_AF_ERR_PERM_DENIED;
507
508         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
509         cgxd = rvu_cgx_pdata(cgx_idx, rvu);
510         mac_ops = get_mac_ops(cgxd);
511
512         /* Rx stats */
513         while (stat < mac_ops->rx_stats_cnt) {
514                 err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
515                 if (err)
516                         return err;
517                 if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
518                         ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
519                 else
520                         ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
521                 stat++;
522         }
523
524         /* Tx stats */
525         stat = 0;
526         while (stat < mac_ops->tx_stats_cnt) {
527                 err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
528                 if (err)
529                         return err;
530                 if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
531                         ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
532                 else
533                         ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
534                 stat++;
535         }
536         return 0;
537 }
538
539 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
540                                struct cgx_stats_rsp *rsp)
541 {
542         return rvu_lmac_get_stats(rvu, req, (void *)rsp);
543 }
544
545 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
546                                struct rpm_stats_rsp *rsp)
547 {
548         return rvu_lmac_get_stats(rvu, req, (void *)rsp);
549 }
550
551 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
552                                    struct msg_req *req,
553                                    struct cgx_fec_stats_rsp *rsp)
554 {
555         int pf = rvu_get_pf(req->hdr.pcifunc);
556         u8 cgx_idx, lmac;
557         void *cgxd;
558
559         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
560                 return LMAC_AF_ERR_PERM_DENIED;
561         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
562
563         cgxd = rvu_cgx_pdata(cgx_idx, rvu);
564         return cgx_get_fec_stats(cgxd, lmac, rsp);
565 }
566
567 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
568                                       struct cgx_mac_addr_set_or_get *req,
569                                       struct cgx_mac_addr_set_or_get *rsp)
570 {
571         int pf = rvu_get_pf(req->hdr.pcifunc);
572         u8 cgx_id, lmac_id;
573
574         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
575                 return -EPERM;
576
577         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
578
579         cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
580
581         return 0;
582 }
583
584 int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
585                                       struct cgx_mac_addr_add_req *req,
586                                       struct cgx_mac_addr_add_rsp *rsp)
587 {
588         int pf = rvu_get_pf(req->hdr.pcifunc);
589         u8 cgx_id, lmac_id;
590         int rc = 0;
591
592         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
593                 return -EPERM;
594
595         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
596         rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
597         if (rc >= 0) {
598                 rsp->index = rc;
599                 return 0;
600         }
601
602         return rc;
603 }
604
605 int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
606                                       struct cgx_mac_addr_del_req *req,
607                                       struct msg_rsp *rsp)
608 {
609         int pf = rvu_get_pf(req->hdr.pcifunc);
610         u8 cgx_id, lmac_id;
611
612         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
613                 return -EPERM;
614
615         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
616         return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
617 }
618
619 int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
620                                              struct msg_req *req,
621                                              struct cgx_max_dmac_entries_get_rsp
622                                              *rsp)
623 {
624         int pf = rvu_get_pf(req->hdr.pcifunc);
625         u8 cgx_id, lmac_id;
626
627         /* If msg is received from PFs(which are not mapped to CGX LMACs)
628          * or VF then no entries are allocated for DMAC filters at CGX level.
629          * So returning zero.
630          */
631         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
632                 rsp->max_dmac_filters = 0;
633                 return 0;
634         }
635
636         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
637         rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
638         return 0;
639 }
640
641 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
642                                       struct cgx_mac_addr_set_or_get *req,
643                                       struct cgx_mac_addr_set_or_get *rsp)
644 {
645         int pf = rvu_get_pf(req->hdr.pcifunc);
646         u8 cgx_id, lmac_id;
647         int rc = 0, i;
648         u64 cfg;
649
650         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
651                 return -EPERM;
652
653         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
654
655         rsp->hdr.rc = rc;
656         cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
657         /* copy 48 bit mac address to req->mac_addr */
658         for (i = 0; i < ETH_ALEN; i++)
659                 rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
660         return 0;
661 }
662
663 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
664                                         struct msg_rsp *rsp)
665 {
666         u16 pcifunc = req->hdr.pcifunc;
667         int pf = rvu_get_pf(pcifunc);
668         u8 cgx_id, lmac_id;
669
670         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
671                 return -EPERM;
672
673         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
674
675         cgx_lmac_promisc_config(cgx_id, lmac_id, true);
676         return 0;
677 }
678
679 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
680                                          struct msg_rsp *rsp)
681 {
682         int pf = rvu_get_pf(req->hdr.pcifunc);
683         u8 cgx_id, lmac_id;
684
685         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
686                 return -EPERM;
687
688         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
689
690         cgx_lmac_promisc_config(cgx_id, lmac_id, false);
691         return 0;
692 }
693
694 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
695 {
696         struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
697         int pf = rvu_get_pf(pcifunc);
698         struct mac_ops *mac_ops;
699         u8 cgx_id, lmac_id;
700         void *cgxd;
701
702         if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
703                 return 0;
704
705         /* This msg is expected only from PFs that are mapped to CGX LMACs,
706          * if received from other PF/VF simply ACK, nothing to do.
707          */
708         if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
709             !is_pf_cgxmapped(rvu, pf))
710                 return -ENODEV;
711
712         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
713         cgxd = rvu_cgx_pdata(cgx_id, rvu);
714
715         mac_ops = get_mac_ops(cgxd);
716         mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
717         /* If PTP is enabled then inform NPC that packets to be
718          * parsed by this PF will have their data shifted by 8 bytes
719          * and if PTP is disabled then no shift is required
720          */
721         if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
722                 return -EINVAL;
723         /* This flag is required to clean up CGX conf if app gets killed */
724         pfvf->hw_rx_tstamp_en = enable;
725
726         return 0;
727 }
728
729 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
730                                        struct msg_rsp *rsp)
731 {
732         if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
733                 return -EPERM;
734
735         return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
736 }
737
738 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
739                                         struct msg_rsp *rsp)
740 {
741         return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
742 }
743
744 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
745 {
746         int pf = rvu_get_pf(pcifunc);
747         u8 cgx_id, lmac_id;
748
749         if (!is_cgx_config_permitted(rvu, pcifunc))
750                 return -EPERM;
751
752         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
753
754         if (en) {
755                 set_bit(pf, &rvu->pf_notify_bmap);
756                 /* Send the current link status to PF */
757                 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
758         } else {
759                 clear_bit(pf, &rvu->pf_notify_bmap);
760         }
761
762         return 0;
763 }
764
765 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
766                                           struct msg_rsp *rsp)
767 {
768         rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
769         return 0;
770 }
771
772 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
773                                          struct msg_rsp *rsp)
774 {
775         rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
776         return 0;
777 }
778
779 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
780                                       struct cgx_link_info_msg *rsp)
781 {
782         u8 cgx_id, lmac_id;
783         int pf, err;
784
785         pf = rvu_get_pf(req->hdr.pcifunc);
786
787         if (!is_pf_cgxmapped(rvu, pf))
788                 return -ENODEV;
789
790         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
791
792         err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
793                                 &rsp->link_info);
794         return err;
795 }
796
797 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
798                                       struct msg_req *req,
799                                       struct cgx_features_info_msg *rsp)
800 {
801         int pf = rvu_get_pf(req->hdr.pcifunc);
802         u8 cgx_idx, lmac;
803         void *cgxd;
804
805         if (!is_pf_cgxmapped(rvu, pf))
806                 return 0;
807
808         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
809         cgxd = rvu_cgx_pdata(cgx_idx, rvu);
810         rsp->lmac_features = cgx_features_get(cgxd);
811
812         return 0;
813 }
814
815 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
816 {
817         struct mac_ops *mac_ops;
818         u32 fifo_len;
819
820         mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
821         fifo_len = mac_ops ? mac_ops->fifo_len : 0;
822
823         return fifo_len;
824 }
825
826 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
827 {
828         int pf = rvu_get_pf(pcifunc);
829         struct mac_ops *mac_ops;
830         u8 cgx_id, lmac_id;
831
832         if (!is_cgx_config_permitted(rvu, pcifunc))
833                 return -EPERM;
834
835         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
836         mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
837
838         return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
839                                           lmac_id, en);
840 }
841
842 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
843                                        struct msg_rsp *rsp)
844 {
845         rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
846         return 0;
847 }
848
849 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
850                                         struct msg_rsp *rsp)
851 {
852         rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
853         return 0;
854 }
855
856 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
857                                        struct cgx_pause_frm_cfg *req,
858                                        struct cgx_pause_frm_cfg *rsp)
859 {
860         int pf = rvu_get_pf(req->hdr.pcifunc);
861         struct mac_ops *mac_ops;
862         u8 cgx_id, lmac_id;
863         void *cgxd;
864
865         if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
866                 return 0;
867
868         /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
869          * if received from other PF/VF simply ACK, nothing to do.
870          */
871         if (!is_pf_cgxmapped(rvu, pf))
872                 return -ENODEV;
873
874         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
875         cgxd = rvu_cgx_pdata(cgx_id, rvu);
876         mac_ops = get_mac_ops(cgxd);
877
878         if (req->set)
879                 mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
880                                               req->tx_pause, req->rx_pause);
881         else
882                 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
883                                                   &rsp->tx_pause,
884                                                   &rsp->rx_pause);
885         return 0;
886 }
887
888 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
889                                            struct msg_rsp *rsp)
890 {
891         int pf = rvu_get_pf(req->hdr.pcifunc);
892         u8 cgx_id, lmac_id;
893
894         if (!is_pf_cgxmapped(rvu, pf))
895                 return LMAC_AF_ERR_PF_NOT_MAPPED;
896
897         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
898         return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
899 }
900
901 /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
902  * from its VFs as well. ie. NIX rx/tx counters at the CGX port level
903  */
904 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
905                            int index, int rxtxflag, u64 *stat)
906 {
907         struct rvu_block *block;
908         int blkaddr;
909         u16 pcifunc;
910         int pf, lf;
911
912         *stat = 0;
913
914         if (!cgxd || !rvu)
915                 return -EINVAL;
916
917         pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
918         if (pf < 0)
919                 return pf;
920
921         /* Assumes LF of a PF and all of its VF belongs to the same
922          * NIX block
923          */
924         pcifunc = pf << RVU_PFVF_PF_SHIFT;
925         blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
926         if (blkaddr < 0)
927                 return 0;
928         block = &rvu->hw->block[blkaddr];
929
930         for (lf = 0; lf < block->lf.max; lf++) {
931                 /* Check if a lf is attached to this PF or one of its VFs */
932                 if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
933                          ~RVU_PFVF_FUNC_MASK)))
934                         continue;
935                 if (rxtxflag == NIX_STATS_RX)
936                         *stat += rvu_read64(rvu, blkaddr,
937                                             NIX_AF_LFX_RX_STATX(lf, index));
938                 else
939                         *stat += rvu_read64(rvu, blkaddr,
940                                             NIX_AF_LFX_TX_STATX(lf, index));
941         }
942
943         return 0;
944 }
945
946 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
947 {
948         struct rvu_pfvf *parent_pf, *pfvf;
949         int cgx_users, err = 0;
950
951         if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
952                 return 0;
953
954         parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
955         pfvf = rvu_get_pfvf(rvu, pcifunc);
956
957         mutex_lock(&rvu->cgx_cfg_lock);
958
959         if (start && pfvf->cgx_in_use)
960                 goto exit;  /* CGX is already started hence nothing to do */
961         if (!start && !pfvf->cgx_in_use)
962                 goto exit; /* CGX is already stopped hence nothing to do */
963
964         if (start) {
965                 cgx_users = parent_pf->cgx_users;
966                 parent_pf->cgx_users++;
967         } else {
968                 parent_pf->cgx_users--;
969                 cgx_users = parent_pf->cgx_users;
970         }
971
972         /* Start CGX when first of all NIXLFs is started.
973          * Stop CGX when last of all NIXLFs is stopped.
974          */
975         if (!cgx_users) {
976                 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
977                                           start);
978                 if (err) {
979                         dev_err(rvu->dev, "Unable to %s CGX\n",
980                                 start ? "start" : "stop");
981                         /* Revert the usage count in case of error */
982                         parent_pf->cgx_users = start ? parent_pf->cgx_users  - 1
983                                                : parent_pf->cgx_users  + 1;
984                         goto exit;
985                 }
986         }
987         pfvf->cgx_in_use = start;
988 exit:
989         mutex_unlock(&rvu->cgx_cfg_lock);
990         return err;
991 }
992
993 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
994                                        struct fec_mode *req,
995                                        struct fec_mode *rsp)
996 {
997         int pf = rvu_get_pf(req->hdr.pcifunc);
998         u8 cgx_id, lmac_id;
999
1000         if (!is_pf_cgxmapped(rvu, pf))
1001                 return -EPERM;
1002
1003         if (req->fec == OTX2_FEC_OFF)
1004                 req->fec = OTX2_FEC_NONE;
1005         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1006         rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
1007         return 0;
1008 }
1009
1010 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
1011                                            struct cgx_fw_data *rsp)
1012 {
1013         int pf = rvu_get_pf(req->hdr.pcifunc);
1014         u8 cgx_id, lmac_id;
1015
1016         if (!rvu->fwdata)
1017                 return -ENXIO;
1018
1019         if (!is_pf_cgxmapped(rvu, pf))
1020                 return -EPERM;
1021
1022         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1023
1024         memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
1025                sizeof(struct cgx_lmac_fwdata_s));
1026         return 0;
1027 }
1028
1029 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
1030                                        struct cgx_set_link_mode_req *req,
1031                                        struct cgx_set_link_mode_rsp *rsp)
1032 {
1033         int pf = rvu_get_pf(req->hdr.pcifunc);
1034         u8 cgx_idx, lmac;
1035         void *cgxd;
1036
1037         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1038                 return -EPERM;
1039
1040         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
1041         cgxd = rvu_cgx_pdata(cgx_idx, rvu);
1042         rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
1043         return 0;
1044 }
1045
1046 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
1047                                         struct msg_rsp *rsp)
1048 {
1049         int pf = rvu_get_pf(req->hdr.pcifunc);
1050         u8 cgx_id, lmac_id;
1051
1052         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1053                 return LMAC_AF_ERR_PERM_DENIED;
1054
1055         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1056         return cgx_lmac_addr_reset(cgx_id, lmac_id);
1057 }
1058
1059 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
1060                                          struct cgx_mac_addr_update_req *req,
1061                                          struct msg_rsp *rsp)
1062 {
1063         int pf = rvu_get_pf(req->hdr.pcifunc);
1064         u8 cgx_id, lmac_id;
1065
1066         if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1067                 return LMAC_AF_ERR_PERM_DENIED;
1068
1069         rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1070         return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
1071 }