2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/acpi.h>
10 #include <linux/module.h>
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/phy.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_net.h>
22 #include "thunder_bgx.h"
24 #define DRV_NAME "thunder-BGX"
25 #define DRV_VERSION "1.0"
32 int lmacid; /* ID within BGX */
33 int lmacid_bd; /* ID on board */
34 struct net_device netdev;
35 struct phy_device *phydev;
36 unsigned int last_duplex;
37 unsigned int last_link;
38 unsigned int last_speed;
40 struct delayed_work dwork;
41 struct workqueue_struct *check_link;
47 struct lmac lmac[MAX_LMAC_PER_BGX];
52 void __iomem *reg_base;
56 static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
57 static int lmac_count; /* Total no of LMACs in system */
59 static int bgx_xaui_check_link(struct lmac *lmac);
61 /* Supported devices */
62 static const struct pci_device_id bgx_id_table[] = {
63 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
64 { 0, } /* end of table */
67 MODULE_AUTHOR("Cavium Inc");
68 MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
69 MODULE_LICENSE("GPL v2");
70 MODULE_VERSION(DRV_VERSION);
71 MODULE_DEVICE_TABLE(pci, bgx_id_table);
73 /* The Cavium ThunderX network controller can *only* be found in SoCs
74 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
75 * registers on this platform are implicitly strongly ordered with respect
76 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
77 * with no memory barriers in this driver. The readq()/writeq() functions add
78 * explicit ordering operation which in this case are redundant, and only
82 /* Register read/write APIs */
83 static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
85 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
87 return readq_relaxed(addr);
90 static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
92 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
94 writeq_relaxed(val, addr);
97 static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
99 void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
101 writeq_relaxed(val | readq_relaxed(addr), addr);
104 static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
110 reg_val = bgx_reg_read(bgx, lmac, reg);
111 if (zero && !(reg_val & mask))
113 if (!zero && (reg_val & mask))
115 usleep_range(1000, 2000);
121 /* Return number of BGX present in HW */
122 unsigned bgx_get_map(int node)
127 for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
128 if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
134 EXPORT_SYMBOL(bgx_get_map);
136 /* Return number of LMAC configured for this BGX */
137 int bgx_get_lmac_count(int node, int bgx_idx)
141 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
143 return bgx->lmac_count;
147 EXPORT_SYMBOL(bgx_get_lmac_count);
149 /* Returns the current link status of LMAC */
150 void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
152 struct bgx_link_status *link = (struct bgx_link_status *)status;
156 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
160 lmac = &bgx->lmac[lmacid];
161 link->link_up = lmac->link_up;
162 link->duplex = lmac->last_duplex;
163 link->speed = lmac->last_speed;
165 EXPORT_SYMBOL(bgx_get_lmac_link_state);
167 const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
169 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
172 return bgx->lmac[lmacid].mac;
176 EXPORT_SYMBOL(bgx_get_lmac_mac);
178 void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
180 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
185 ether_addr_copy(bgx->lmac[lmacid].mac, mac);
187 EXPORT_SYMBOL(bgx_set_lmac_mac);
189 static void bgx_sgmii_change_link_state(struct lmac *lmac)
191 struct bgx *bgx = lmac->bgx;
196 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
198 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
200 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
201 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
204 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
205 port_cfg &= ~GMI_PORT_CFG_DUPLEX;
206 port_cfg |= (lmac->last_duplex << 2);
208 misc_ctl |= PCS_MISC_CTL_GMX_ENO;
211 switch (lmac->last_speed) {
213 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
214 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
215 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
216 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
217 misc_ctl |= 50; /* samp_pt */
218 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
219 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
222 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
223 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
224 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
225 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
226 misc_ctl |= 5; /* samp_pt */
227 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
228 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
231 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
232 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
233 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
234 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
235 misc_ctl |= 1; /* samp_pt */
236 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
237 if (lmac->last_duplex)
238 bgx_reg_write(bgx, lmac->lmacid,
239 BGX_GMP_GMI_TXX_BURST, 0);
241 bgx_reg_write(bgx, lmac->lmacid,
242 BGX_GMP_GMI_TXX_BURST, 8192);
247 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
248 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
250 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
254 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
257 static void bgx_lmac_handler(struct net_device *netdev)
259 struct lmac *lmac = container_of(netdev, struct lmac, netdev);
260 struct phy_device *phydev = lmac->phydev;
261 int link_changed = 0;
266 if (!phydev->link && lmac->last_link)
270 (lmac->last_duplex != phydev->duplex ||
271 lmac->last_link != phydev->link ||
272 lmac->last_speed != phydev->speed)) {
276 lmac->last_link = phydev->link;
277 lmac->last_speed = phydev->speed;
278 lmac->last_duplex = phydev->duplex;
283 if (link_changed > 0)
284 lmac->link_up = true;
286 lmac->link_up = false;
289 bgx_sgmii_change_link_state(lmac);
291 bgx_xaui_check_link(lmac);
294 u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
298 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
304 return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
306 EXPORT_SYMBOL(bgx_get_rx_stats);
308 u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
312 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
316 return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
318 EXPORT_SYMBOL(bgx_get_tx_stats);
320 static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
324 while (bgx->lmac[lmac].dmac > 0) {
325 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
326 (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
327 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
328 bgx->lmac[lmac].dmac--;
332 static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
336 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
337 /* max packet size */
338 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
340 /* Disable frame alignment if using preamble */
341 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
343 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
346 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
349 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
350 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
351 PCS_MRX_CTL_RESET, true)) {
352 dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
356 /* power down, reset autoneg, autoneg enable */
357 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
358 cfg &= ~PCS_MRX_CTL_PWR_DN;
359 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
360 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
362 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
363 PCS_MRX_STATUS_AN_CPT, false)) {
364 dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
371 static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
376 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
377 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
378 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
383 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
385 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
387 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
388 /* Set interleaved running disparity for RXAUI */
389 if (bgx->lmac_type != BGX_MODE_RXAUI)
390 bgx_reg_modify(bgx, lmacid,
391 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
393 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
394 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
396 /* clear all interrupts */
397 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
398 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
399 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
400 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
401 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
402 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
404 if (bgx->use_training) {
405 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
406 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
407 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
408 /* training enable */
409 bgx_reg_modify(bgx, lmacid,
410 BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
413 /* Append FCS to each packet */
414 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
416 /* Disable forward error correction */
417 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
418 cfg &= ~SPU_FEC_CTL_FEC_EN;
419 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
421 /* Disable autoneg */
422 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
423 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
424 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
426 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
427 if (bgx->lmac_type == BGX_MODE_10G_KR)
429 else if (bgx->lmac_type == BGX_MODE_40G_KR)
432 cfg &= ~((1 << 23) | (1 << 24));
433 cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
434 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
436 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
437 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
438 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
441 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
443 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
444 cfg &= ~SPU_CTL_LOW_POWER;
445 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
447 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
448 cfg &= ~SMU_TX_CTL_UNI_EN;
449 cfg |= SMU_TX_CTL_DIC_EN;
450 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
452 /* take lmac_count into account */
453 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
454 /* max packet size */
455 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
460 static int bgx_xaui_check_link(struct lmac *lmac)
462 struct bgx *bgx = lmac->bgx;
463 int lmacid = lmac->lmacid;
464 int lmac_type = bgx->lmac_type;
467 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
468 if (bgx->use_training) {
469 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
470 if (!(cfg & (1ull << 13))) {
471 cfg = (1ull << 13) | (1ull << 14);
472 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
473 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
475 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
480 /* wait for PCS to come out of reset */
481 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
482 dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
486 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
487 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
488 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
489 SPU_BR_STATUS_BLK_LOCK, false)) {
490 dev_err(&bgx->pdev->dev,
491 "SPU_BR_STATUS_BLK_LOCK not completed\n");
495 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
496 SPU_BX_STATUS_RX_ALIGN, false)) {
497 dev_err(&bgx->pdev->dev,
498 "SPU_BX_STATUS_RX_ALIGN not completed\n");
503 /* Clear rcvflt bit (latching high) and read it back */
504 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
505 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
506 dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
507 if (bgx->use_training) {
508 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
509 if (!(cfg & (1ull << 13))) {
510 cfg = (1ull << 13) | (1ull << 14);
511 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
512 cfg = bgx_reg_read(bgx, lmacid,
513 BGX_SPUX_BR_PMD_CRTL);
515 bgx_reg_write(bgx, lmacid,
516 BGX_SPUX_BR_PMD_CRTL, cfg);
523 /* Wait for MAC RX to be ready */
524 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
525 SMU_RX_CTL_STATUS, true)) {
526 dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
530 /* Wait for BGX RX to be idle */
531 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
532 dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
536 /* Wait for BGX TX to be idle */
537 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
538 dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
542 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
543 dev_err(&bgx->pdev->dev, "Receive fault\n");
547 /* Receive link is latching low. Force it high and verify it */
548 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
549 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
550 SPU_STATUS1_RCV_LNK, false)) {
551 dev_err(&bgx->pdev->dev, "SPU receive link down\n");
555 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
556 cfg &= ~SPU_MISC_CTL_RX_DIS;
557 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
561 static void bgx_poll_for_link(struct work_struct *work)
566 lmac = container_of(work, struct lmac, dwork.work);
568 /* Receive link is latching low. Force it high and verify it */
569 bgx_reg_modify(lmac->bgx, lmac->lmacid,
570 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
571 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
572 SPU_STATUS1_RCV_LNK, false);
574 link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
575 if (link & SPU_STATUS1_RCV_LNK) {
577 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
578 lmac->last_speed = 40000;
580 lmac->last_speed = 10000;
581 lmac->last_duplex = 1;
586 if (lmac->last_link != lmac->link_up) {
587 lmac->last_link = lmac->link_up;
589 bgx_xaui_check_link(lmac);
592 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
595 static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
600 lmac = &bgx->lmac[lmacid];
603 if (bgx->lmac_type == BGX_MODE_SGMII) {
605 if (bgx_lmac_sgmii_init(bgx, lmacid))
609 if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
613 if (lmac->is_sgmii) {
614 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
615 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
616 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
617 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
619 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
620 cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
621 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
622 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
626 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
627 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
629 /* Restore default cfg, incase low level firmware changed it */
630 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
632 if ((bgx->lmac_type != BGX_MODE_XFI) &&
633 (bgx->lmac_type != BGX_MODE_XLAUI) &&
634 (bgx->lmac_type != BGX_MODE_40G_KR) &&
635 (bgx->lmac_type != BGX_MODE_10G_KR)) {
639 lmac->phydev->dev_flags = 0;
641 if (phy_connect_direct(&lmac->netdev, lmac->phydev,
643 PHY_INTERFACE_MODE_SGMII))
646 phy_start_aneg(lmac->phydev);
648 lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
650 if (!lmac->check_link)
652 INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
653 queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
659 static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
664 lmac = &bgx->lmac[lmacid];
665 if (lmac->check_link) {
666 /* Destroy work queue */
667 cancel_delayed_work(&lmac->dwork);
668 flush_workqueue(lmac->check_link);
669 destroy_workqueue(lmac->check_link);
672 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
673 cmrx_cfg &= ~(1 << 15);
674 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
675 bgx_flush_dmac_addrs(bgx, lmacid);
677 if ((bgx->lmac_type != BGX_MODE_XFI) &&
678 (bgx->lmac_type != BGX_MODE_XLAUI) &&
679 (bgx->lmac_type != BGX_MODE_40G_KR) &&
680 (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
681 phy_disconnect(lmac->phydev);
686 static void bgx_set_num_ports(struct bgx *bgx)
690 switch (bgx->qlm_mode) {
693 bgx->lmac_type = BGX_MODE_SGMII;
694 bgx->lane_to_sds = 0;
696 case QLM_MODE_XAUI_1X4:
698 bgx->lmac_type = BGX_MODE_XAUI;
699 bgx->lane_to_sds = 0xE4;
701 case QLM_MODE_RXAUI_2X2:
703 bgx->lmac_type = BGX_MODE_RXAUI;
704 bgx->lane_to_sds = 0xE4;
706 case QLM_MODE_XFI_4X1:
708 bgx->lmac_type = BGX_MODE_XFI;
709 bgx->lane_to_sds = 0;
711 case QLM_MODE_XLAUI_1X4:
713 bgx->lmac_type = BGX_MODE_XLAUI;
714 bgx->lane_to_sds = 0xE4;
716 case QLM_MODE_10G_KR_4X1:
718 bgx->lmac_type = BGX_MODE_10G_KR;
719 bgx->lane_to_sds = 0;
720 bgx->use_training = 1;
722 case QLM_MODE_40G_KR4_1X4:
724 bgx->lmac_type = BGX_MODE_40G_KR;
725 bgx->lane_to_sds = 0xE4;
726 bgx->use_training = 1;
733 /* Check if low level firmware has programmed LMAC count
734 * based on board type, if yes consider that otherwise
735 * the default static values
737 lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
739 bgx->lmac_count = lmac_count;
742 static void bgx_init_hw(struct bgx *bgx)
746 bgx_set_num_ports(bgx);
748 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
749 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
750 dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
752 /* Set lmac type and lane2serdes mapping */
753 for (i = 0; i < bgx->lmac_count; i++) {
754 if (bgx->lmac_type == BGX_MODE_RXAUI) {
756 bgx->lane_to_sds = 0x0e;
758 bgx->lane_to_sds = 0x04;
759 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
760 (bgx->lmac_type << 8) | bgx->lane_to_sds);
763 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
764 (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
765 bgx->lmac[i].lmacid_bd = lmac_count;
769 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
770 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
772 /* Set the backpressure AND mask */
773 for (i = 0; i < bgx->lmac_count; i++)
774 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
775 ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
776 (i * MAX_BGX_CHANS_PER_LMAC));
778 /* Disable all MAC filtering */
779 for (i = 0; i < RX_DMAC_COUNT; i++)
780 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
782 /* Disable MAC steering (NCSI traffic) */
783 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
784 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
787 static void bgx_get_qlm_mode(struct bgx *bgx)
789 struct device *dev = &bgx->pdev->dev;
793 /* Read LMAC0 type to figure out QLM mode
794 * This is configured by low level firmware
796 lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
797 lmac_type = (lmac_type >> 8) & 0x07;
799 train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
800 SPU_PMD_CRTL_TRAIN_EN;
804 bgx->qlm_mode = QLM_MODE_SGMII;
805 dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id);
808 bgx->qlm_mode = QLM_MODE_XAUI_1X4;
809 dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id);
812 bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
813 dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id);
817 bgx->qlm_mode = QLM_MODE_XFI_4X1;
818 dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id);
820 bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
821 dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id);
826 bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
827 dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id);
829 bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
830 dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id);
834 bgx->qlm_mode = QLM_MODE_SGMII;
835 dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id);
841 static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
846 ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
847 "mac-address", mac, ETH_ALEN);
851 if (!is_valid_ether_addr(mac)) {
856 memcpy(dst, mac, ETH_ALEN);
861 /* Currently only sets the MAC address. */
862 static acpi_status bgx_acpi_register_phy(acpi_handle handle,
863 u32 lvl, void *context, void **rv)
865 struct bgx *bgx = context;
866 struct acpi_device *adev;
868 if (acpi_bus_get_device(handle, &adev))
871 acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac);
873 SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev);
875 bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
881 static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
882 void *context, void **ret_val)
884 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
885 struct bgx *bgx = context;
888 snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
889 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
890 pr_warn("Invalid link device\n");
894 if (strncmp(string.pointer, bgx_sel, 4))
897 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
898 bgx_acpi_register_phy, NULL, bgx, NULL);
900 kfree(string.pointer);
901 return AE_CTRL_TERMINATE;
904 static int bgx_init_acpi_phy(struct bgx *bgx)
906 acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
912 static int bgx_init_acpi_phy(struct bgx *bgx)
917 #endif /* CONFIG_ACPI */
919 #if IS_ENABLED(CONFIG_OF_MDIO)
921 static int bgx_init_of_phy(struct bgx *bgx)
923 struct device_node *np;
924 struct device_node *np_child;
929 /* Get BGX node from DT */
930 snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
931 np = of_find_node_by_name(NULL, bgx_sel);
935 for_each_child_of_node(np, np_child) {
936 struct device_node *phy_np = of_parse_phandle(np_child,
940 bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
942 mac = of_get_mac_address(np_child);
944 ether_addr_copy(bgx->lmac[lmac].mac, mac);
946 SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
947 bgx->lmac[lmac].lmacid = lmac;
949 if (lmac == MAX_LMAC_PER_BGX)
957 static int bgx_init_of_phy(struct bgx *bgx)
962 #endif /* CONFIG_OF_MDIO */
964 static int bgx_init_phy(struct bgx *bgx)
967 return bgx_init_acpi_phy(bgx);
969 return bgx_init_of_phy(bgx);
972 static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
975 struct device *dev = &pdev->dev;
976 struct bgx *bgx = NULL;
979 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
984 pci_set_drvdata(pdev, bgx);
986 err = pci_enable_device(pdev);
988 dev_err(dev, "Failed to enable PCI device\n");
989 pci_set_drvdata(pdev, NULL);
993 err = pci_request_regions(pdev, DRV_NAME);
995 dev_err(dev, "PCI request regions failed 0x%x\n", err);
996 goto err_disable_device;
999 /* MAP configuration registers */
1000 bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1001 if (!bgx->reg_base) {
1002 dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
1004 goto err_release_regions;
1006 bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
1007 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
1009 bgx_vnic[bgx->bgx_id] = bgx;
1010 bgx_get_qlm_mode(bgx);
1012 err = bgx_init_phy(bgx);
1018 /* Enable all LMACs */
1019 for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
1020 err = bgx_lmac_enable(bgx, lmac);
1022 dev_err(dev, "BGX%d failed to enable lmac%d\n",
1031 bgx_vnic[bgx->bgx_id] = NULL;
1032 err_release_regions:
1033 pci_release_regions(pdev);
1035 pci_disable_device(pdev);
1036 pci_set_drvdata(pdev, NULL);
1040 static void bgx_remove(struct pci_dev *pdev)
1042 struct bgx *bgx = pci_get_drvdata(pdev);
1045 /* Disable all LMACs */
1046 for (lmac = 0; lmac < bgx->lmac_count; lmac++)
1047 bgx_lmac_disable(bgx, lmac);
1049 bgx_vnic[bgx->bgx_id] = NULL;
1050 pci_release_regions(pdev);
1051 pci_disable_device(pdev);
1052 pci_set_drvdata(pdev, NULL);
1055 static struct pci_driver bgx_driver = {
1057 .id_table = bgx_id_table,
1059 .remove = bgx_remove,
1062 static int __init bgx_init_module(void)
1064 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1066 return pci_register_driver(&bgx_driver);
1069 static void __exit bgx_cleanup_module(void)
1071 pci_unregister_driver(&bgx_driver);
1074 module_init(bgx_init_module);
1075 module_exit(bgx_cleanup_module);