Linux 6.9-rc1
[linux-2.6-microblaze.git] / drivers / net / ethernet / marvell / octeontx2 / af / cgx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 CGX driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7
8 #include <linux/acpi.h>
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/phy.h>
16 #include <linux/of.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_net.h>
19
20 #include "cgx.h"
21 #include "rvu.h"
22 #include "lmac_common.h"
23
24 #define DRV_NAME        "Marvell-CGX/RPM"
25 #define DRV_STRING      "Marvell CGX/RPM Driver"
26
27 static LIST_HEAD(cgx_list);
28
29 /* Convert firmware speed encoding to user format(Mbps) */
30 static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
31         [CGX_LINK_NONE] = 0,
32         [CGX_LINK_10M] = 10,
33         [CGX_LINK_100M] = 100,
34         [CGX_LINK_1G] = 1000,
35         [CGX_LINK_2HG] = 2500,
36         [CGX_LINK_5G] = 5000,
37         [CGX_LINK_10G] = 10000,
38         [CGX_LINK_20G] = 20000,
39         [CGX_LINK_25G] = 25000,
40         [CGX_LINK_40G] = 40000,
41         [CGX_LINK_50G] = 50000,
42         [CGX_LINK_80G] = 80000,
43         [CGX_LINK_100G] = 100000,
44 };
45
46 /* Convert firmware lmac type encoding to string */
47 static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
48         [LMAC_MODE_SGMII] = "SGMII",
49         [LMAC_MODE_XAUI] = "XAUI",
50         [LMAC_MODE_RXAUI] = "RXAUI",
51         [LMAC_MODE_10G_R] = "10G_R",
52         [LMAC_MODE_40G_R] = "40G_R",
53         [LMAC_MODE_QSGMII] = "QSGMII",
54         [LMAC_MODE_25G_R] = "25G_R",
55         [LMAC_MODE_50G_R] = "50G_R",
56         [LMAC_MODE_100G_R] = "100G_R",
57         [LMAC_MODE_USXGMII] = "USXGMII",
58         [LMAC_MODE_USGMII] = "USGMII",
59 };
60
61 /* CGX PHY management internal APIs */
62 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
63
64 /* Supported devices */
65 static const struct pci_device_id cgx_id_table[] = {
66         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
67         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
68         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
69         { 0, }  /* end of table */
70 };
71
72 MODULE_DEVICE_TABLE(pci, cgx_id_table);
73
74 static bool is_dev_rpm(void *cgxd)
75 {
76         struct cgx *cgx = cgxd;
77
78         return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) ||
79                (cgx->pdev->device == PCI_DEVID_CN10KB_RPM);
80 }
81
82 bool is_lmac_valid(struct cgx *cgx, int lmac_id)
83 {
84         if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
85                 return false;
86         return test_bit(lmac_id, &cgx->lmac_bmap);
87 }
88
89 /* Helper function to get sequential index
90  * given the enabled LMAC of a CGX
91  */
92 static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
93 {
94         int tmp, id = 0;
95
96         for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
97                 if (tmp == lmac_id)
98                         break;
99                 id++;
100         }
101
102         return id;
103 }
104
105 struct mac_ops *get_mac_ops(void *cgxd)
106 {
107         if (!cgxd)
108                 return cgxd;
109
110         return ((struct cgx *)cgxd)->mac_ops;
111 }
112
113 void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
114 {
115         writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
116                offset);
117 }
118
119 u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
120 {
121         return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
122                      offset);
123 }
124
125 struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
126 {
127         if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
128                 return NULL;
129
130         return cgx->lmac_idmap[lmac_id];
131 }
132
133 int cgx_get_cgxcnt_max(void)
134 {
135         struct cgx *cgx_dev;
136         int idmax = -ENODEV;
137
138         list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
139                 if (cgx_dev->cgx_id > idmax)
140                         idmax = cgx_dev->cgx_id;
141
142         if (idmax < 0)
143                 return 0;
144
145         return idmax + 1;
146 }
147
148 int cgx_get_lmac_cnt(void *cgxd)
149 {
150         struct cgx *cgx = cgxd;
151
152         if (!cgx)
153                 return -ENODEV;
154
155         return cgx->lmac_count;
156 }
157
158 void *cgx_get_pdata(int cgx_id)
159 {
160         struct cgx *cgx_dev;
161
162         list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
163                 if (cgx_dev->cgx_id == cgx_id)
164                         return cgx_dev;
165         }
166         return NULL;
167 }
168
169 void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
170 {
171         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
172
173         /* Software must not access disabled LMAC registers */
174         if (!is_lmac_valid(cgx_dev, lmac_id))
175                 return;
176         cgx_write(cgx_dev, lmac_id, offset, val);
177 }
178
179 u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
180 {
181         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
182
183         /* Software must not access disabled LMAC registers */
184         if (!is_lmac_valid(cgx_dev, lmac_id))
185                 return 0;
186
187         return cgx_read(cgx_dev, lmac_id, offset);
188 }
189
190 int cgx_get_cgxid(void *cgxd)
191 {
192         struct cgx *cgx = cgxd;
193
194         if (!cgx)
195                 return -EINVAL;
196
197         return cgx->cgx_id;
198 }
199
200 u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
201 {
202         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
203         u64 cfg;
204
205         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
206
207         return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
208 }
209
210 /* Ensure the required lock for event queue(where asynchronous events are
211  * posted) is acquired before calling this API. Else an asynchronous event(with
212  * latest link status) can reach the destination before this function returns
213  * and could make the link status appear wrong.
214  */
215 int cgx_get_link_info(void *cgxd, int lmac_id,
216                       struct cgx_link_user_info *linfo)
217 {
218         struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
219
220         if (!lmac)
221                 return -ENODEV;
222
223         *linfo = lmac->link_info;
224         return 0;
225 }
226
227 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
228 {
229         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
230         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
231         struct mac_ops *mac_ops;
232         int index, id;
233         u64 cfg;
234
235         if (!lmac)
236                 return -ENODEV;
237
238         /* access mac_ops to know csr_offset */
239         mac_ops = cgx_dev->mac_ops;
240
241         /* copy 6bytes from macaddr */
242         /* memcpy(&cfg, mac_addr, 6); */
243
244         cfg = ether_addr_to_u64(mac_addr);
245
246         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
247
248         index = id * lmac->mac_to_index_bmap.max;
249
250         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
251                   cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
252
253         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
254         cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
255                 CGX_DMAC_MCAST_MODE);
256         cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
257
258         return 0;
259 }
260
261 u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
262 {
263         struct mac_ops *mac_ops;
264         struct cgx *cgx = cgxd;
265
266         if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
267                 return 0;
268
269         cgx = cgxd;
270         /* Get mac_ops to know csr offset */
271         mac_ops = cgx->mac_ops;
272
273         return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
274 }
275
276 u64 cgx_read_dmac_entry(void *cgxd, int index)
277 {
278         struct mac_ops *mac_ops;
279         struct cgx *cgx;
280
281         if (!cgxd)
282                 return 0;
283
284         cgx = cgxd;
285         mac_ops = cgx->mac_ops;
286         return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
287 }
288
289 int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
290 {
291         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
292         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
293         struct mac_ops *mac_ops;
294         int index, idx;
295         u64 cfg = 0;
296         int id;
297
298         if (!lmac)
299                 return -ENODEV;
300
301         mac_ops = cgx_dev->mac_ops;
302         /* Get available index where entry is to be installed */
303         idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
304         if (idx < 0)
305                 return idx;
306
307         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
308
309         index = id * lmac->mac_to_index_bmap.max + idx;
310
311         cfg = ether_addr_to_u64(mac_addr);
312         cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
313         cfg |= ((u64)lmac_id << 49);
314         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
315
316         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
317         cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
318
319         if (is_multicast_ether_addr(mac_addr)) {
320                 cfg &= ~GENMASK_ULL(2, 1);
321                 cfg |= CGX_DMAC_MCAST_MODE_CAM;
322                 lmac->mcast_filters_count++;
323         } else if (!lmac->mcast_filters_count) {
324                 cfg |= CGX_DMAC_MCAST_MODE;
325         }
326
327         cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
328
329         return idx;
330 }
331
332 int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
333 {
334         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
335         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
336         struct mac_ops *mac_ops;
337         u8 index = 0, id;
338         u64 cfg;
339
340         if (!lmac)
341                 return -ENODEV;
342
343         mac_ops = cgx_dev->mac_ops;
344         /* Restore index 0 to its default init value as done during
345          * cgx_lmac_init
346          */
347         set_bit(0, lmac->mac_to_index_bmap.bmap);
348
349         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
350
351         index = id * lmac->mac_to_index_bmap.max + index;
352         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
353
354         /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
355         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
356         cfg &= ~CGX_DMAC_CAM_ACCEPT;
357         cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
358         cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
359
360         return 0;
361 }
362
363 /* Allows caller to change macaddress associated with index
364  * in dmac filter table including index 0 reserved for
365  * interface mac address
366  */
367 int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
368 {
369         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
370         struct mac_ops *mac_ops;
371         struct lmac *lmac;
372         u64 cfg;
373         int id;
374
375         lmac = lmac_pdata(lmac_id, cgx_dev);
376         if (!lmac)
377                 return -ENODEV;
378
379         mac_ops = cgx_dev->mac_ops;
380         /* Validate the index */
381         if (index >= lmac->mac_to_index_bmap.max)
382                 return -EINVAL;
383
384         /* ensure index is already set */
385         if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
386                 return -EINVAL;
387
388         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
389
390         index = id * lmac->mac_to_index_bmap.max + index;
391
392         cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
393         cfg &= ~CGX_RX_DMAC_ADR_MASK;
394         cfg |= ether_addr_to_u64(mac_addr);
395
396         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
397         return 0;
398 }
399
400 int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
401 {
402         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
403         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
404         struct mac_ops *mac_ops;
405         u8 mac[ETH_ALEN];
406         u64 cfg;
407         int id;
408
409         if (!lmac)
410                 return -ENODEV;
411
412         mac_ops = cgx_dev->mac_ops;
413         /* Validate the index */
414         if (index >= lmac->mac_to_index_bmap.max)
415                 return -EINVAL;
416
417         /* Skip deletion for reserved index i.e. index 0 */
418         if (index == 0)
419                 return 0;
420
421         rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
422
423         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
424
425         index = id * lmac->mac_to_index_bmap.max + index;
426
427         /* Read MAC address to check whether it is ucast or mcast */
428         cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
429
430         u64_to_ether_addr(cfg, mac);
431         if (is_multicast_ether_addr(mac))
432                 lmac->mcast_filters_count--;
433
434         if (!lmac->mcast_filters_count) {
435                 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
436                 cfg &= ~GENMASK_ULL(2, 1);
437                 cfg |= CGX_DMAC_MCAST_MODE;
438                 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
439         }
440
441         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
442
443         return 0;
444 }
445
446 int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
447 {
448         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
449         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
450
451         if (lmac)
452                 return lmac->mac_to_index_bmap.max;
453
454         return 0;
455 }
456
457 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
458 {
459         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
460         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
461         struct mac_ops *mac_ops;
462         int index;
463         u64 cfg;
464         int id;
465
466         mac_ops = cgx_dev->mac_ops;
467
468         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
469
470         index = id * lmac->mac_to_index_bmap.max;
471
472         cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
473         return cfg & CGX_RX_DMAC_ADR_MASK;
474 }
475
476 int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
477 {
478         struct cgx *cgx = cgxd;
479
480         if (!is_lmac_valid(cgx, lmac_id))
481                 return -ENODEV;
482
483         cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F));
484         return 0;
485 }
486
487 static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
488 {
489         struct cgx *cgx = cgxd;
490         u64 cfg;
491
492         cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
493         return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
494 }
495
496 static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
497 {
498         struct cgx *cgx = cgxd;
499         u8 num_lmacs;
500         u32 fifo_len;
501
502         fifo_len = cgx->mac_ops->fifo_len;
503         num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
504
505         switch (num_lmacs) {
506         case 1:
507                 return fifo_len;
508         case 2:
509                 return fifo_len / 2;
510         case 3:
511                 /* LMAC0 gets half of the FIFO, reset 1/4th */
512                 if (lmac_id == 0)
513                         return fifo_len / 2;
514                 return fifo_len / 4;
515         case 4:
516         default:
517                 return fifo_len / 4;
518         }
519         return 0;
520 }
521
522 /* Configure CGX LMAC in internal loopback mode */
523 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
524 {
525         struct cgx *cgx = cgxd;
526         struct lmac *lmac;
527         u64 cfg;
528
529         if (!is_lmac_valid(cgx, lmac_id))
530                 return -ENODEV;
531
532         lmac = lmac_pdata(lmac_id, cgx);
533         if (lmac->lmac_type == LMAC_MODE_SGMII ||
534             lmac->lmac_type == LMAC_MODE_QSGMII) {
535                 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
536                 if (enable)
537                         cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
538                 else
539                         cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
540                 cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
541         } else {
542                 cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
543                 if (enable)
544                         cfg |= CGXX_SPUX_CONTROL1_LBK;
545                 else
546                         cfg &= ~CGXX_SPUX_CONTROL1_LBK;
547                 cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
548         }
549         return 0;
550 }
551
552 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
553 {
554         struct cgx *cgx = cgx_get_pdata(cgx_id);
555         struct lmac *lmac = lmac_pdata(lmac_id, cgx);
556         struct mac_ops *mac_ops;
557         u16 max_dmac;
558         int index, i;
559         u64 cfg = 0;
560         int id;
561
562         if (!cgx || !lmac)
563                 return;
564
565         max_dmac = lmac->mac_to_index_bmap.max;
566         id = get_sequence_id_of_lmac(cgx, lmac_id);
567
568         mac_ops = cgx->mac_ops;
569         if (enable) {
570                 /* Enable promiscuous mode on LMAC */
571                 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
572                 cfg &= ~CGX_DMAC_CAM_ACCEPT;
573                 cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
574                 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
575
576                 for (i = 0; i < max_dmac; i++) {
577                         index = id * max_dmac + i;
578                         cfg = cgx_read(cgx, 0,
579                                        (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
580                         cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
581                         cgx_write(cgx, 0,
582                                   (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
583                 }
584         } else {
585                 /* Disable promiscuous mode */
586                 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
587                 cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
588                 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
589                 for (i = 0; i < max_dmac; i++) {
590                         index = id * max_dmac + i;
591                         cfg = cgx_read(cgx, 0,
592                                        (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
593                         if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
594                                 cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
595                                 cgx_write(cgx, 0,
596                                           (CGXX_CMRX_RX_DMAC_CAM0 +
597                                            index * 0x8),
598                                           cfg);
599                         }
600                 }
601         }
602 }
603
604 static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
605                                          u8 *tx_pause, u8 *rx_pause)
606 {
607         struct cgx *cgx = cgxd;
608         u64 cfg;
609
610         if (is_dev_rpm(cgx))
611                 return 0;
612
613         if (!is_lmac_valid(cgx, lmac_id))
614                 return -ENODEV;
615
616         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
617         *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
618
619         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
620         *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
621         return 0;
622 }
623
624 /* Enable or disable forwarding received pause frames to Tx block */
625 void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
626 {
627         struct cgx *cgx = cgxd;
628         u8 rx_pause, tx_pause;
629         bool is_pfc_enabled;
630         struct lmac *lmac;
631         u64 cfg;
632
633         if (!cgx)
634                 return;
635
636         lmac = lmac_pdata(lmac_id, cgx);
637         if (!lmac)
638                 return;
639
640         /* Pause frames are not enabled just return */
641         if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
642                 return;
643
644         cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
645         is_pfc_enabled = rx_pause ? false : true;
646
647         if (enable) {
648                 if (!is_pfc_enabled) {
649                         cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
650                         cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
651                         cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
652
653                         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
654                         cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
655                         cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
656                 } else {
657                         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
658                         cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
659                         cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
660                 }
661         } else {
662
663                 if (!is_pfc_enabled) {
664                         cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
665                         cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
666                         cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
667
668                         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
669                         cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
670                         cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
671                 } else {
672                         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
673                         cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
674                         cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
675                 }
676         }
677 }
678
679 int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
680 {
681         struct cgx *cgx = cgxd;
682
683         if (!is_lmac_valid(cgx, lmac_id))
684                 return -ENODEV;
685         *rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
686         return 0;
687 }
688
689 int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
690 {
691         struct cgx *cgx = cgxd;
692
693         if (!is_lmac_valid(cgx, lmac_id))
694                 return -ENODEV;
695         *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
696         return 0;
697 }
698
699 u64 cgx_features_get(void *cgxd)
700 {
701         return ((struct cgx *)cgxd)->hw_features;
702 }
703
704 static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
705 {
706         if (!linfo->fec)
707                 return 0;
708
709         switch (linfo->lmac_type_id) {
710         case LMAC_MODE_SGMII:
711         case LMAC_MODE_XAUI:
712         case LMAC_MODE_RXAUI:
713         case LMAC_MODE_QSGMII:
714                 return 0;
715         case LMAC_MODE_10G_R:
716         case LMAC_MODE_25G_R:
717         case LMAC_MODE_100G_R:
718         case LMAC_MODE_USXGMII:
719                 return 1;
720         case LMAC_MODE_40G_R:
721                 return 4;
722         case LMAC_MODE_50G_R:
723                 if (linfo->fec == OTX2_FEC_BASER)
724                         return 2;
725                 else
726                         return 1;
727         default:
728                 return 0;
729         }
730 }
731
732 int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
733 {
734         int stats, fec_stats_count = 0;
735         int corr_reg, uncorr_reg;
736         struct cgx *cgx = cgxd;
737
738         if (!is_lmac_valid(cgx, lmac_id))
739                 return -ENODEV;
740
741         if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
742                 return 0;
743
744         fec_stats_count =
745                 cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
746         if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
747                 corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
748                 uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
749         } else {
750                 corr_reg = CGXX_SPUX_RSFEC_CORR;
751                 uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
752         }
753         for (stats = 0; stats < fec_stats_count; stats++) {
754                 rsp->fec_corr_blks +=
755                         cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
756                 rsp->fec_uncorr_blks +=
757                         cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
758         }
759         return 0;
760 }
761
762 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
763 {
764         struct cgx *cgx = cgxd;
765         u64 cfg;
766
767         if (!is_lmac_valid(cgx, lmac_id))
768                 return -ENODEV;
769
770         cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
771         if (enable)
772                 cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
773         else
774                 cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
775         cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
776         return 0;
777 }
778
779 int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
780 {
781         struct cgx *cgx = cgxd;
782         u64 cfg, last;
783
784         if (!is_lmac_valid(cgx, lmac_id))
785                 return -ENODEV;
786
787         cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
788         last = cfg;
789         if (enable)
790                 cfg |= DATA_PKT_TX_EN;
791         else
792                 cfg &= ~DATA_PKT_TX_EN;
793
794         if (cfg != last)
795                 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
796         return !!(last & DATA_PKT_TX_EN);
797 }
798
799 static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
800                                      u8 tx_pause, u8 rx_pause)
801 {
802         struct cgx *cgx = cgxd;
803         u64 cfg;
804
805         if (is_dev_rpm(cgx))
806                 return 0;
807
808         if (!is_lmac_valid(cgx, lmac_id))
809                 return -ENODEV;
810
811         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
812         cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
813         cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
814         cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
815
816         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
817         cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
818         cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
819         cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
820
821         cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
822         if (tx_pause) {
823                 cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
824         } else {
825                 cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
826                 cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
827         }
828         cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
829         return 0;
830 }
831
832 static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
833 {
834         struct cgx *cgx = cgxd;
835         u64 cfg;
836
837         if (!is_lmac_valid(cgx, lmac_id))
838                 return;
839
840         if (enable) {
841                 /* Set pause time and interval */
842                 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
843                           DEFAULT_PAUSE_TIME);
844                 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
845                 cfg &= ~0xFFFFULL;
846                 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
847                           cfg | (DEFAULT_PAUSE_TIME / 2));
848
849                 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
850                           DEFAULT_PAUSE_TIME);
851
852                 cfg = cgx_read(cgx, lmac_id,
853                                CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
854                 cfg &= ~0xFFFFULL;
855                 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
856                           cfg | (DEFAULT_PAUSE_TIME / 2));
857         }
858
859         /* ALL pause frames received are completely ignored */
860         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
861         cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
862         cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
863
864         cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
865         cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
866         cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
867
868         /* Disable pause frames transmission */
869         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
870         cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
871         cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
872
873         cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
874         cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
875         cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
876         cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
877
878         /* Disable all PFC classes by default */
879         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
880         cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
881         cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
882 }
883
884 int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
885                        int pfvf_idx)
886 {
887         struct cgx *cgx = cgxd;
888         struct lmac *lmac;
889
890         lmac = lmac_pdata(lmac_id, cgx);
891         if (!lmac)
892                 return -ENODEV;
893
894         if (!rx_pause)
895                 clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
896         else
897                 set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
898
899         if (!tx_pause)
900                 clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
901         else
902                 set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
903
904         /* check if other pfvfs are using flow control */
905         if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
906                 dev_warn(&cgx->pdev->dev,
907                          "Receive Flow control disable not permitted as its used by other PFVFs\n");
908                 return -EPERM;
909         }
910
911         if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
912                 dev_warn(&cgx->pdev->dev,
913                          "Transmit Flow control disable not permitted as its used by other PFVFs\n");
914                 return -EPERM;
915         }
916
917         return 0;
918 }
919
920 int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
921                         u8 rx_pause, u16 pfc_en)
922 {
923         struct cgx *cgx = cgxd;
924         u64 cfg;
925
926         if (!is_lmac_valid(cgx, lmac_id))
927                 return -ENODEV;
928
929         /* Return as no traffic classes are requested */
930         if (tx_pause && !pfc_en)
931                 return 0;
932
933         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
934         pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
935
936         if (rx_pause) {
937                 cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
938                         CGXX_SMUX_CBFC_CTL_BCK_EN |
939                         CGXX_SMUX_CBFC_CTL_DRP_EN);
940         } else {
941                 cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
942                         CGXX_SMUX_CBFC_CTL_BCK_EN |
943                         CGXX_SMUX_CBFC_CTL_DRP_EN);
944         }
945
946         if (tx_pause) {
947                 cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
948                 cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
949         } else {
950                 cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
951                 cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
952         }
953
954         cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
955
956         /* Write source MAC address which will be filled into PFC packet */
957         cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
958         cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
959
960         return 0;
961 }
962
963 int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
964                              u8 *rx_pause)
965 {
966         struct cgx *cgx = cgxd;
967         u64 cfg;
968
969         if (!is_lmac_valid(cgx, lmac_id))
970                 return -ENODEV;
971
972         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
973
974         *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
975         *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
976
977         return 0;
978 }
979
980 void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
981 {
982         struct cgx *cgx = cgxd;
983         u64 cfg;
984
985         if (!cgx)
986                 return;
987
988         if (enable) {
989                 /* Enable inbound PTP timestamping */
990                 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
991                 cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
992                 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
993
994                 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
995                 cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
996                 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
997         } else {
998                 /* Disable inbound PTP stamping */
999                 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
1000                 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
1001                 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
1002
1003                 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
1004                 cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
1005                 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
1006         }
1007 }
1008
1009 /* CGX Firmware interface low level support */
1010 int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
1011 {
1012         struct cgx *cgx = lmac->cgx;
1013         struct device *dev;
1014         int err = 0;
1015         u64 cmd;
1016
1017         /* Ensure no other command is in progress */
1018         err = mutex_lock_interruptible(&lmac->cmd_lock);
1019         if (err)
1020                 return err;
1021
1022         /* Ensure command register is free */
1023         cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
1024         if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
1025                 err = -EBUSY;
1026                 goto unlock;
1027         }
1028
1029         /* Update ownership in command request */
1030         req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
1031
1032         /* Mark this lmac as pending, before we start */
1033         lmac->cmd_pend = true;
1034
1035         /* Start command in hardware */
1036         cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
1037
1038         /* Ensure command is completed without errors */
1039         if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
1040                                 msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
1041                 dev = &cgx->pdev->dev;
1042                 dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
1043                         cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
1044                 err = LMAC_AF_ERR_CMD_TIMEOUT;
1045                 goto unlock;
1046         }
1047
1048         /* we have a valid command response */
1049         smp_rmb(); /* Ensure the latest updates are visible */
1050         *resp = lmac->resp;
1051
1052 unlock:
1053         mutex_unlock(&lmac->cmd_lock);
1054
1055         return err;
1056 }
1057
1058 int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
1059 {
1060         struct lmac *lmac;
1061         int err;
1062
1063         lmac = lmac_pdata(lmac_id, cgx);
1064         if (!lmac)
1065                 return -ENODEV;
1066
1067         err = cgx_fwi_cmd_send(req, resp, lmac);
1068
1069         /* Check for valid response */
1070         if (!err) {
1071                 if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
1072                         return -EIO;
1073                 else
1074                         return 0;
1075         }
1076
1077         return err;
1078 }
1079
1080 static int cgx_link_usertable_index_map(int speed)
1081 {
1082         switch (speed) {
1083         case SPEED_10:
1084                 return CGX_LINK_10M;
1085         case SPEED_100:
1086                 return CGX_LINK_100M;
1087         case SPEED_1000:
1088                 return CGX_LINK_1G;
1089         case SPEED_2500:
1090                 return CGX_LINK_2HG;
1091         case SPEED_5000:
1092                 return CGX_LINK_5G;
1093         case SPEED_10000:
1094                 return CGX_LINK_10G;
1095         case SPEED_20000:
1096                 return CGX_LINK_20G;
1097         case SPEED_25000:
1098                 return CGX_LINK_25G;
1099         case SPEED_40000:
1100                 return CGX_LINK_40G;
1101         case SPEED_50000:
1102                 return CGX_LINK_50G;
1103         case 80000:
1104                 return CGX_LINK_80G;
1105         case SPEED_100000:
1106                 return CGX_LINK_100G;
1107         case SPEED_UNKNOWN:
1108                 return CGX_LINK_NONE;
1109         }
1110         return CGX_LINK_NONE;
1111 }
1112
1113 static void set_mod_args(struct cgx_set_link_mode_args *args,
1114                          u32 speed, u8 duplex, u8 autoneg, u64 mode)
1115 {
1116         /* Fill default values incase of user did not pass
1117          * valid parameters
1118          */
1119         if (args->duplex == DUPLEX_UNKNOWN)
1120                 args->duplex = duplex;
1121         if (args->speed == SPEED_UNKNOWN)
1122                 args->speed = speed;
1123         if (args->an == AUTONEG_UNKNOWN)
1124                 args->an = autoneg;
1125         args->mode = mode;
1126         args->ports = 0;
1127 }
1128
1129 static void otx2_map_ethtool_link_modes(u64 bitmask,
1130                                         struct cgx_set_link_mode_args *args)
1131 {
1132         switch (bitmask) {
1133         case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
1134                 set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1135                 break;
1136         case  ETHTOOL_LINK_MODE_10baseT_Full_BIT:
1137                 set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1138                 break;
1139         case  ETHTOOL_LINK_MODE_100baseT_Half_BIT:
1140                 set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1141                 break;
1142         case  ETHTOOL_LINK_MODE_100baseT_Full_BIT:
1143                 set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1144                 break;
1145         case  ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
1146                 set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1147                 break;
1148         case  ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
1149                 set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1150                 break;
1151         case  ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
1152                 set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
1153                 break;
1154         case  ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
1155                 set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
1156                 break;
1157         case  ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
1158                 set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
1159                 break;
1160         case  ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
1161                 set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
1162                 break;
1163         case  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
1164                 set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
1165                 break;
1166         case  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
1167                 set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
1168                 break;
1169         case  ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
1170                 set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
1171                 break;
1172         case  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
1173                 set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
1174                 break;
1175         case  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
1176                 set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
1177                 break;
1178         case  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
1179                 set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
1180                 break;
1181         case  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
1182                 set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
1183                 break;
1184         case  ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
1185                 set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
1186                 break;
1187         case  ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
1188                 set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
1189                 break;
1190         case  ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
1191                 set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
1192                 break;
1193         case  ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
1194                 set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
1195                 break;
1196         case  ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
1197                 set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
1198                 break;
1199         case  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
1200                 set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
1201                 break;
1202         case  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
1203                 set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
1204                 break;
1205         case  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
1206                 set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
1207                 break;
1208         case  ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
1209                 set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
1210                 break;
1211         default:
1212                 set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
1213                 break;
1214         }
1215 }
1216
1217 static inline void link_status_user_format(u64 lstat,
1218                                            struct cgx_link_user_info *linfo,
1219                                            struct cgx *cgx, u8 lmac_id)
1220 {
1221         linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
1222         linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
1223         linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
1224         linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
1225         linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
1226         linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
1227
1228         if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
1229                 dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
1230                         linfo->lmac_type_id, cgx->cgx_id, lmac_id);
1231                 strscpy(linfo->lmac_type, "Unknown", sizeof(linfo->lmac_type));
1232                 return;
1233         }
1234
1235         strscpy(linfo->lmac_type, cgx_lmactype_string[linfo->lmac_type_id],
1236                 sizeof(linfo->lmac_type));
1237 }
1238
1239 /* Hardware event handlers */
1240 static inline void cgx_link_change_handler(u64 lstat,
1241                                            struct lmac *lmac)
1242 {
1243         struct cgx_link_user_info *linfo;
1244         struct cgx *cgx = lmac->cgx;
1245         struct cgx_link_event event;
1246         struct device *dev;
1247         int err_type;
1248
1249         dev = &cgx->pdev->dev;
1250
1251         link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
1252         err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
1253
1254         event.cgx_id = cgx->cgx_id;
1255         event.lmac_id = lmac->lmac_id;
1256
1257         /* update the local copy of link status */
1258         lmac->link_info = event.link_uinfo;
1259         linfo = &lmac->link_info;
1260
1261         if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
1262                 return;
1263
1264         /* Ensure callback doesn't get unregistered until we finish it */
1265         spin_lock(&lmac->event_cb_lock);
1266
1267         if (!lmac->event_cb.notify_link_chg) {
1268                 dev_dbg(dev, "cgx port %d:%d Link change handler null",
1269                         cgx->cgx_id, lmac->lmac_id);
1270                 if (err_type != CGX_ERR_NONE) {
1271                         dev_err(dev, "cgx port %d:%d Link error %d\n",
1272                                 cgx->cgx_id, lmac->lmac_id, err_type);
1273                 }
1274                 dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
1275                          cgx->cgx_id, lmac->lmac_id,
1276                          linfo->link_up ? "UP" : "DOWN", linfo->speed);
1277                 goto err;
1278         }
1279
1280         if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
1281                 dev_err(dev, "event notification failure\n");
1282 err:
1283         spin_unlock(&lmac->event_cb_lock);
1284 }
1285
1286 static inline bool cgx_cmdresp_is_linkevent(u64 event)
1287 {
1288         u8 id;
1289
1290         id = FIELD_GET(EVTREG_ID, event);
1291         if (id == CGX_CMD_LINK_BRING_UP ||
1292             id == CGX_CMD_LINK_BRING_DOWN ||
1293             id == CGX_CMD_MODE_CHANGE)
1294                 return true;
1295         else
1296                 return false;
1297 }
1298
1299 static inline bool cgx_event_is_linkevent(u64 event)
1300 {
1301         if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
1302                 return true;
1303         else
1304                 return false;
1305 }
1306
1307 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
1308 {
1309         u64 event, offset, clear_bit;
1310         struct lmac *lmac = data;
1311         struct cgx *cgx;
1312
1313         cgx = lmac->cgx;
1314
1315         /* Clear SW_INT for RPM and CMR_INT for CGX */
1316         offset     = cgx->mac_ops->int_register;
1317         clear_bit  = cgx->mac_ops->int_ena_bit;
1318
1319         event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
1320
1321         if (!FIELD_GET(EVTREG_ACK, event))
1322                 return IRQ_NONE;
1323
1324         switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
1325         case CGX_EVT_CMD_RESP:
1326                 /* Copy the response. Since only one command is active at a
1327                  * time, there is no way a response can get overwritten
1328                  */
1329                 lmac->resp = event;
1330                 /* Ensure response is updated before thread context starts */
1331                 smp_wmb();
1332
1333                 /* There wont be separate events for link change initiated from
1334                  * software; Hence report the command responses as events
1335                  */
1336                 if (cgx_cmdresp_is_linkevent(event))
1337                         cgx_link_change_handler(event, lmac);
1338
1339                 /* Release thread waiting for completion  */
1340                 lmac->cmd_pend = false;
1341                 wake_up(&lmac->wq_cmd_cmplt);
1342                 break;
1343         case CGX_EVT_ASYNC:
1344                 if (cgx_event_is_linkevent(event))
1345                         cgx_link_change_handler(event, lmac);
1346                 break;
1347         }
1348
1349         /* Any new event or command response will be posted by firmware
1350          * only after the current status is acked.
1351          * Ack the interrupt register as well.
1352          */
1353         cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
1354         cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
1355
1356         return IRQ_HANDLED;
1357 }
1358
1359 /* APIs for PHY management using CGX firmware interface */
1360
1361 /* callback registration for hardware events like link change */
1362 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
1363 {
1364         struct cgx *cgx = cgxd;
1365         struct lmac *lmac;
1366
1367         lmac = lmac_pdata(lmac_id, cgx);
1368         if (!lmac)
1369                 return -ENODEV;
1370
1371         lmac->event_cb = *cb;
1372
1373         return 0;
1374 }
1375
1376 int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
1377 {
1378         struct lmac *lmac;
1379         unsigned long flags;
1380         struct cgx *cgx = cgxd;
1381
1382         lmac = lmac_pdata(lmac_id, cgx);
1383         if (!lmac)
1384                 return -ENODEV;
1385
1386         spin_lock_irqsave(&lmac->event_cb_lock, flags);
1387         lmac->event_cb.notify_link_chg = NULL;
1388         lmac->event_cb.data = NULL;
1389         spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
1390
1391         return 0;
1392 }
1393
1394 int cgx_get_fwdata_base(u64 *base)
1395 {
1396         u64 req = 0, resp;
1397         struct cgx *cgx;
1398         int first_lmac;
1399         int err;
1400
1401         cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
1402         if (!cgx)
1403                 return -ENXIO;
1404
1405         first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
1406         req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
1407         err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
1408         if (!err)
1409                 *base = FIELD_GET(RESP_FWD_BASE, resp);
1410
1411         return err;
1412 }
1413
1414 int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
1415                       int cgx_id, int lmac_id)
1416 {
1417         struct cgx *cgx = cgxd;
1418         u64 req = 0, resp;
1419
1420         if (!cgx)
1421                 return -ENODEV;
1422
1423         if (args.mode)
1424                 otx2_map_ethtool_link_modes(args.mode, &args);
1425         if (!args.speed && args.duplex && !args.an)
1426                 return -EINVAL;
1427
1428         req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
1429         req = FIELD_SET(CMDMODECHANGE_SPEED,
1430                         cgx_link_usertable_index_map(args.speed), req);
1431         req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
1432         req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
1433         req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
1434         req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
1435
1436         return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1437 }
1438 int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
1439 {
1440         u64 req = 0, resp;
1441         struct cgx *cgx;
1442         int err = 0;
1443
1444         cgx = cgx_get_pdata(cgx_id);
1445         if (!cgx)
1446                 return -ENXIO;
1447
1448         req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
1449         req = FIELD_SET(CMDSETFEC, fec, req);
1450         err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1451         if (err)
1452                 return err;
1453
1454         cgx->lmac_idmap[lmac_id]->link_info.fec =
1455                         FIELD_GET(RESP_LINKSTAT_FEC, resp);
1456         return cgx->lmac_idmap[lmac_id]->link_info.fec;
1457 }
1458
1459 int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
1460 {
1461         struct cgx *cgx = cgxd;
1462         u64 req = 0, resp;
1463
1464         if (!cgx)
1465                 return -ENODEV;
1466
1467         req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
1468         return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1469 }
1470
1471 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
1472 {
1473         u64 req = 0;
1474         u64 resp;
1475
1476         if (enable) {
1477                 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
1478                 /* On CN10K firmware offloads link bring up/down operations to ECP
1479                  * On Octeontx2 link operations are handled by firmware itself
1480                  * which can cause mbox errors so configure maximum time firmware
1481                  * poll for Link as 1000 ms
1482                  */
1483                 if (!is_dev_rpm(cgx))
1484                         req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req);
1485
1486         } else {
1487                 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
1488         }
1489         return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1490 }
1491
1492 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
1493 {
1494         int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
1495         u64 req = 0;
1496
1497         req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
1498         return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
1499 }
1500
1501 static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
1502 {
1503         struct device *dev = &cgx->pdev->dev;
1504         int major_ver, minor_ver;
1505         u64 resp;
1506         int err;
1507
1508         if (!cgx->lmac_count)
1509                 return 0;
1510
1511         err = cgx_fwi_read_version(&resp, cgx);
1512         if (err)
1513                 return err;
1514
1515         major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
1516         minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
1517         dev_dbg(dev, "Firmware command interface version = %d.%d\n",
1518                 major_ver, minor_ver);
1519         if (major_ver != CGX_FIRMWARE_MAJOR_VER)
1520                 return -EIO;
1521         else
1522                 return 0;
1523 }
1524
1525 static void cgx_lmac_linkup_work(struct work_struct *work)
1526 {
1527         struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
1528         struct device *dev = &cgx->pdev->dev;
1529         int i, err;
1530
1531         /* Do Link up for all the enabled lmacs */
1532         for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
1533                 err = cgx_fwi_link_change(cgx, i, true);
1534                 if (err)
1535                         dev_info(dev, "cgx port %d:%d Link up command failed\n",
1536                                  cgx->cgx_id, i);
1537         }
1538 }
1539
1540 int cgx_lmac_linkup_start(void *cgxd)
1541 {
1542         struct cgx *cgx = cgxd;
1543
1544         if (!cgx)
1545                 return -ENODEV;
1546
1547         queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
1548
1549         return 0;
1550 }
1551
1552 int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr)
1553 {
1554         struct cgx *cgx = cgxd;
1555         u64 cfg;
1556
1557         if (!is_lmac_valid(cgx, lmac_id))
1558                 return -ENODEV;
1559
1560         /* Resetting PFC related CSRs */
1561         cfg = 0xff;
1562         cgx_write(cgxd, lmac_id, CGXX_CMRX_RX_LOGL_XON, cfg);
1563
1564         if (pf_req_flr)
1565                 cgx_lmac_internal_loopback(cgxd, lmac_id, false);
1566         return 0;
1567 }
1568
1569 static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
1570                                    int cnt, bool req_free)
1571 {
1572         struct mac_ops *mac_ops = cgx->mac_ops;
1573         u64 offset, ena_bit;
1574         unsigned int irq;
1575         int err;
1576
1577         irq      = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
1578                                   cnt * mac_ops->irq_offset);
1579         offset   = mac_ops->int_set_reg;
1580         ena_bit  = mac_ops->int_ena_bit;
1581
1582         if (req_free) {
1583                 free_irq(irq, lmac);
1584                 return 0;
1585         }
1586
1587         err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
1588         if (err)
1589                 return err;
1590
1591         /* Enable interrupt */
1592         cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
1593         return 0;
1594 }
1595
1596 int cgx_get_nr_lmacs(void *cgxd)
1597 {
1598         struct cgx *cgx = cgxd;
1599
1600         return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
1601 }
1602
1603 u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
1604 {
1605         struct cgx *cgx = cgxd;
1606
1607         return cgx->lmac_idmap[lmac_index]->lmac_id;
1608 }
1609
1610 unsigned long cgx_get_lmac_bmap(void *cgxd)
1611 {
1612         struct cgx *cgx = cgxd;
1613
1614         return cgx->lmac_bmap;
1615 }
1616
1617 static int cgx_lmac_init(struct cgx *cgx)
1618 {
1619         struct lmac *lmac;
1620         u64 lmac_list;
1621         int i, err;
1622
1623         /* lmac_list specifies which lmacs are enabled
1624          * when bit n is set to 1, LMAC[n] is enabled
1625          */
1626         if (cgx->mac_ops->non_contiguous_serdes_lane) {
1627                 if (is_dev_rpm2(cgx))
1628                         lmac_list =
1629                                 cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL;
1630                 else
1631                         lmac_list =
1632                                 cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
1633         }
1634
1635         if (cgx->lmac_count > cgx->max_lmac_per_mac)
1636                 cgx->lmac_count = cgx->max_lmac_per_mac;
1637
1638         for (i = 0; i < cgx->lmac_count; i++) {
1639                 lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
1640                 if (!lmac)
1641                         return -ENOMEM;
1642                 lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
1643                 if (!lmac->name) {
1644                         err = -ENOMEM;
1645                         goto err_lmac_free;
1646                 }
1647                 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
1648                 if (cgx->mac_ops->non_contiguous_serdes_lane) {
1649                         lmac->lmac_id = __ffs64(lmac_list);
1650                         lmac_list   &= ~BIT_ULL(lmac->lmac_id);
1651                 } else {
1652                         lmac->lmac_id = i;
1653                 }
1654
1655                 lmac->cgx = cgx;
1656                 lmac->mac_to_index_bmap.max =
1657                                 cgx->mac_ops->dmac_filter_count /
1658                                 cgx->lmac_count;
1659
1660                 err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
1661                 if (err)
1662                         goto err_name_free;
1663
1664                 /* Reserve first entry for default MAC address */
1665                 set_bit(0, lmac->mac_to_index_bmap.bmap);
1666
1667                 lmac->rx_fc_pfvf_bmap.max = 128;
1668                 err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
1669                 if (err)
1670                         goto err_dmac_bmap_free;
1671
1672                 lmac->tx_fc_pfvf_bmap.max = 128;
1673                 err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
1674                 if (err)
1675                         goto err_rx_fc_bmap_free;
1676
1677                 init_waitqueue_head(&lmac->wq_cmd_cmplt);
1678                 mutex_init(&lmac->cmd_lock);
1679                 spin_lock_init(&lmac->event_cb_lock);
1680                 err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
1681                 if (err)
1682                         goto err_bitmap_free;
1683
1684                 /* Add reference */
1685                 cgx->lmac_idmap[lmac->lmac_id] = lmac;
1686                 set_bit(lmac->lmac_id, &cgx->lmac_bmap);
1687                 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
1688                 lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
1689         }
1690
1691         return cgx_lmac_verify_fwi_version(cgx);
1692
1693 err_bitmap_free:
1694         rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
1695 err_rx_fc_bmap_free:
1696         rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
1697 err_dmac_bmap_free:
1698         rvu_free_bitmap(&lmac->mac_to_index_bmap);
1699 err_name_free:
1700         kfree(lmac->name);
1701 err_lmac_free:
1702         kfree(lmac);
1703         return err;
1704 }
1705
1706 static int cgx_lmac_exit(struct cgx *cgx)
1707 {
1708         struct lmac *lmac;
1709         int i;
1710
1711         if (cgx->cgx_cmd_workq) {
1712                 destroy_workqueue(cgx->cgx_cmd_workq);
1713                 cgx->cgx_cmd_workq = NULL;
1714         }
1715
1716         /* Free all lmac related resources */
1717         for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
1718                 lmac = cgx->lmac_idmap[i];
1719                 if (!lmac)
1720                         continue;
1721                 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
1722                 cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
1723                 kfree(lmac->mac_to_index_bmap.bmap);
1724                 kfree(lmac->name);
1725                 kfree(lmac);
1726         }
1727
1728         return 0;
1729 }
1730
1731 static void cgx_populate_features(struct cgx *cgx)
1732 {
1733         u64 cfg;
1734
1735         cfg = cgx_read(cgx, 0, CGX_CONST);
1736         cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
1737         cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
1738
1739         if (is_dev_rpm(cgx))
1740                 cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
1741                                     RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
1742         else
1743                 cgx->hw_features = (RVU_LMAC_FEAT_FC  | RVU_LMAC_FEAT_HIGIG2 |
1744                                     RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
1745 }
1746
1747 static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
1748 {
1749         if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM ||
1750             is_dev_rpm2(cgx))
1751                 return 0x80;
1752         else
1753                 return 0x60;
1754 }
1755
1756 static struct mac_ops   cgx_mac_ops    = {
1757         .name           =       "cgx",
1758         .csr_offset     =       0,
1759         .lmac_offset    =       18,
1760         .int_register   =       CGXX_CMRX_INT,
1761         .int_set_reg    =       CGXX_CMRX_INT_ENA_W1S,
1762         .irq_offset     =       9,
1763         .int_ena_bit    =       FW_CGX_INT,
1764         .lmac_fwi       =       CGX_LMAC_FWI,
1765         .non_contiguous_serdes_lane = false,
1766         .rx_stats_cnt   =       9,
1767         .tx_stats_cnt   =       18,
1768         .dmac_filter_count =    32,
1769         .get_nr_lmacs   =       cgx_get_nr_lmacs,
1770         .get_lmac_type  =       cgx_get_lmac_type,
1771         .lmac_fifo_len  =       cgx_get_lmac_fifo_len,
1772         .mac_lmac_intl_lbk =    cgx_lmac_internal_loopback,
1773         .mac_get_rx_stats  =    cgx_get_rx_stats,
1774         .mac_get_tx_stats  =    cgx_get_tx_stats,
1775         .get_fec_stats     =    cgx_get_fec_stats,
1776         .mac_enadis_rx_pause_fwding =   cgx_lmac_enadis_rx_pause_fwding,
1777         .mac_get_pause_frm_status =     cgx_lmac_get_pause_frm_status,
1778         .mac_enadis_pause_frm =         cgx_lmac_enadis_pause_frm,
1779         .mac_pause_frm_config =         cgx_lmac_pause_frm_config,
1780         .mac_enadis_ptp_config =        cgx_lmac_ptp_config,
1781         .mac_rx_tx_enable =             cgx_lmac_rx_tx_enable,
1782         .mac_tx_enable =                cgx_lmac_tx_enable,
1783         .pfc_config =                   cgx_lmac_pfc_config,
1784         .mac_get_pfc_frm_cfg   =        cgx_lmac_get_pfc_frm_cfg,
1785         .mac_reset   =                  cgx_lmac_reset,
1786 };
1787
1788 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1789 {
1790         struct device *dev = &pdev->dev;
1791         struct cgx *cgx;
1792         int err, nvec;
1793
1794         cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
1795         if (!cgx)
1796                 return -ENOMEM;
1797         cgx->pdev = pdev;
1798
1799         pci_set_drvdata(pdev, cgx);
1800
1801         /* Use mac_ops to get MAC specific features */
1802         if (is_dev_rpm(cgx))
1803                 cgx->mac_ops = rpm_get_mac_ops(cgx);
1804         else
1805                 cgx->mac_ops = &cgx_mac_ops;
1806
1807         cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx);
1808
1809         err = pci_enable_device(pdev);
1810         if (err) {
1811                 dev_err(dev, "Failed to enable PCI device\n");
1812                 pci_set_drvdata(pdev, NULL);
1813                 return err;
1814         }
1815
1816         err = pci_request_regions(pdev, DRV_NAME);
1817         if (err) {
1818                 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1819                 goto err_disable_device;
1820         }
1821
1822         /* MAP configuration registers */
1823         cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1824         if (!cgx->reg_base) {
1825                 dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
1826                 err = -ENOMEM;
1827                 goto err_release_regions;
1828         }
1829
1830         cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
1831         if (!cgx->lmac_count) {
1832                 dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
1833                 err = -EOPNOTSUPP;
1834                 goto err_release_regions;
1835         }
1836
1837         nvec = pci_msix_vec_count(cgx->pdev);
1838         err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1839         if (err < 0 || err != nvec) {
1840                 dev_err(dev, "Request for %d msix vectors failed, err %d\n",
1841                         nvec, err);
1842                 goto err_release_regions;
1843         }
1844
1845         cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1846                 & CGX_ID_MASK;
1847
1848         /* init wq for processing linkup requests */
1849         INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
1850         cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
1851         if (!cgx->cgx_cmd_workq) {
1852                 dev_err(dev, "alloc workqueue failed for cgx cmd");
1853                 err = -ENOMEM;
1854                 goto err_free_irq_vectors;
1855         }
1856
1857         list_add(&cgx->cgx_list, &cgx_list);
1858
1859
1860         cgx_populate_features(cgx);
1861
1862         mutex_init(&cgx->lock);
1863
1864         err = cgx_lmac_init(cgx);
1865         if (err)
1866                 goto err_release_lmac;
1867
1868         return 0;
1869
1870 err_release_lmac:
1871         cgx_lmac_exit(cgx);
1872         list_del(&cgx->cgx_list);
1873 err_free_irq_vectors:
1874         pci_free_irq_vectors(pdev);
1875 err_release_regions:
1876         pci_release_regions(pdev);
1877 err_disable_device:
1878         pci_disable_device(pdev);
1879         pci_set_drvdata(pdev, NULL);
1880         return err;
1881 }
1882
1883 static void cgx_remove(struct pci_dev *pdev)
1884 {
1885         struct cgx *cgx = pci_get_drvdata(pdev);
1886
1887         if (cgx) {
1888                 cgx_lmac_exit(cgx);
1889                 list_del(&cgx->cgx_list);
1890         }
1891         pci_free_irq_vectors(pdev);
1892         pci_release_regions(pdev);
1893         pci_disable_device(pdev);
1894         pci_set_drvdata(pdev, NULL);
1895 }
1896
1897 struct pci_driver cgx_driver = {
1898         .name = DRV_NAME,
1899         .id_table = cgx_id_table,
1900         .probe = cgx_probe,
1901         .remove = cgx_remove,
1902 };