1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for BCM963xx builtin Ethernet mac
5 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/module.h>
10 #include <linux/clk.h>
11 #include <linux/etherdevice.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14 #include <linux/ethtool.h>
15 #include <linux/crc32.h>
16 #include <linux/err.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/platform_device.h>
19 #include <linux/if_vlan.h>
21 #include <bcm63xx_dev_enet.h>
22 #include "bcm63xx_enet.h"
24 static char bcm_enet_driver_name[] = "bcm63xx_enet";
26 static int copybreak __read_mostly = 128;
27 module_param(copybreak, int, 0);
28 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
30 /* io registers memory shared between all devices */
31 static void __iomem *bcm_enet_shared_base[3];
34 * io helpers to access mac registers
36 static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
38 return bcm_readl(priv->base + off);
41 static inline void enet_writel(struct bcm_enet_priv *priv,
44 bcm_writel(val, priv->base + off);
48 * io helpers to access switch registers
50 static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
52 return bcm_readl(priv->base + off);
55 static inline void enetsw_writel(struct bcm_enet_priv *priv,
58 bcm_writel(val, priv->base + off);
61 static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
63 return bcm_readw(priv->base + off);
66 static inline void enetsw_writew(struct bcm_enet_priv *priv,
69 bcm_writew(val, priv->base + off);
72 static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
74 return bcm_readb(priv->base + off);
77 static inline void enetsw_writeb(struct bcm_enet_priv *priv,
80 bcm_writeb(val, priv->base + off);
84 /* io helpers to access shared registers */
85 static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
87 return bcm_readl(bcm_enet_shared_base[0] + off);
90 static inline void enet_dma_writel(struct bcm_enet_priv *priv,
93 bcm_writel(val, bcm_enet_shared_base[0] + off);
96 static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
98 return bcm_readl(bcm_enet_shared_base[1] +
99 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
102 static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
103 u32 val, u32 off, int chan)
105 bcm_writel(val, bcm_enet_shared_base[1] +
106 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
109 static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
111 return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
114 static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
115 u32 val, u32 off, int chan)
117 bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
121 * write given data into mii register and wait for transfer to end
122 * with timeout (average measured transfer time is 25us)
124 static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
128 /* make sure mii interrupt status is cleared */
129 enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
131 enet_writel(priv, data, ENET_MIIDATA_REG);
134 /* busy wait on mii interrupt bit, with timeout */
137 if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
140 } while (limit-- > 0);
142 return (limit < 0) ? 1 : 0;
146 * MII internal read callback
148 static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
153 tmp = regnum << ENET_MIIDATA_REG_SHIFT;
154 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
155 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
156 tmp |= ENET_MIIDATA_OP_READ_MASK;
158 if (do_mdio_op(priv, tmp))
161 val = enet_readl(priv, ENET_MIIDATA_REG);
167 * MII internal write callback
169 static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
170 int regnum, u16 value)
174 tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
175 tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
176 tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
177 tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
178 tmp |= ENET_MIIDATA_OP_WRITE_MASK;
180 (void)do_mdio_op(priv, tmp);
185 * MII read callback from phylib
187 static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
190 return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
194 * MII write callback from phylib
196 static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
197 int regnum, u16 value)
199 return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
203 * MII read callback from mii core
205 static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
208 return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
212 * MII write callback from mii core
214 static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
215 int regnum, int value)
217 bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
223 static int bcm_enet_refill_rx(struct net_device *dev)
225 struct bcm_enet_priv *priv;
227 priv = netdev_priv(dev);
229 while (priv->rx_desc_count < priv->rx_ring_size) {
230 struct bcm_enet_desc *desc;
236 desc_idx = priv->rx_dirty_desc;
237 desc = &priv->rx_desc_cpu[desc_idx];
239 if (!priv->rx_skb[desc_idx]) {
240 skb = netdev_alloc_skb(dev, priv->rx_skb_size);
243 priv->rx_skb[desc_idx] = skb;
244 p = dma_map_single(&priv->pdev->dev, skb->data,
250 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
251 len_stat |= DMADESC_OWNER_MASK;
252 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
253 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
254 priv->rx_dirty_desc = 0;
256 priv->rx_dirty_desc++;
259 desc->len_stat = len_stat;
261 priv->rx_desc_count++;
263 /* tell dma engine we allocated one buffer */
264 if (priv->dma_has_sram)
265 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
267 enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
270 /* If rx ring is still empty, set a timer to try allocating
271 * again at a later time. */
272 if (priv->rx_desc_count == 0 && netif_running(dev)) {
273 dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
274 priv->rx_timeout.expires = jiffies + HZ;
275 add_timer(&priv->rx_timeout);
282 * timer callback to defer refill rx queue in case we're OOM
284 static void bcm_enet_refill_rx_timer(struct timer_list *t)
286 struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout);
287 struct net_device *dev = priv->net_dev;
289 spin_lock(&priv->rx_lock);
290 bcm_enet_refill_rx(dev);
291 spin_unlock(&priv->rx_lock);
295 * extract packet from rx queue
297 static int bcm_enet_receive_queue(struct net_device *dev, int budget)
299 struct bcm_enet_priv *priv;
303 priv = netdev_priv(dev);
304 kdev = &priv->pdev->dev;
307 /* don't scan ring further than number of refilled
309 if (budget > priv->rx_desc_count)
310 budget = priv->rx_desc_count;
313 struct bcm_enet_desc *desc;
319 desc_idx = priv->rx_curr_desc;
320 desc = &priv->rx_desc_cpu[desc_idx];
322 /* make sure we actually read the descriptor status at
326 len_stat = desc->len_stat;
328 /* break if dma ownership belongs to hw */
329 if (len_stat & DMADESC_OWNER_MASK)
333 priv->rx_curr_desc++;
334 if (priv->rx_curr_desc == priv->rx_ring_size)
335 priv->rx_curr_desc = 0;
336 priv->rx_desc_count--;
338 /* if the packet does not have start of packet _and_
339 * end of packet flag set, then just recycle it */
340 if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
341 (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
342 dev->stats.rx_dropped++;
346 /* recycle packet if it's marked as bad */
347 if (!priv->enet_is_sw &&
348 unlikely(len_stat & DMADESC_ERR_MASK)) {
349 dev->stats.rx_errors++;
351 if (len_stat & DMADESC_OVSIZE_MASK)
352 dev->stats.rx_length_errors++;
353 if (len_stat & DMADESC_CRC_MASK)
354 dev->stats.rx_crc_errors++;
355 if (len_stat & DMADESC_UNDER_MASK)
356 dev->stats.rx_frame_errors++;
357 if (len_stat & DMADESC_OV_MASK)
358 dev->stats.rx_fifo_errors++;
363 skb = priv->rx_skb[desc_idx];
364 len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
365 /* don't include FCS */
368 if (len < copybreak) {
369 struct sk_buff *nskb;
371 nskb = napi_alloc_skb(&priv->napi, len);
373 /* forget packet, just rearm desc */
374 dev->stats.rx_dropped++;
378 dma_sync_single_for_cpu(kdev, desc->address,
379 len, DMA_FROM_DEVICE);
380 memcpy(nskb->data, skb->data, len);
381 dma_sync_single_for_device(kdev, desc->address,
382 len, DMA_FROM_DEVICE);
385 dma_unmap_single(&priv->pdev->dev, desc->address,
386 priv->rx_skb_size, DMA_FROM_DEVICE);
387 priv->rx_skb[desc_idx] = NULL;
391 skb->protocol = eth_type_trans(skb, dev);
392 dev->stats.rx_packets++;
393 dev->stats.rx_bytes += len;
394 netif_receive_skb(skb);
396 } while (--budget > 0);
398 if (processed || !priv->rx_desc_count) {
399 bcm_enet_refill_rx(dev);
402 enet_dmac_writel(priv, priv->dma_chan_en_mask,
403 ENETDMAC_CHANCFG, priv->rx_chan);
411 * try to or force reclaim of transmitted buffers
413 static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
415 struct bcm_enet_priv *priv;
418 priv = netdev_priv(dev);
421 while (priv->tx_desc_count < priv->tx_ring_size) {
422 struct bcm_enet_desc *desc;
425 /* We run in a bh and fight against start_xmit, which
426 * is called with bh disabled */
427 spin_lock(&priv->tx_lock);
429 desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
431 if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
432 spin_unlock(&priv->tx_lock);
436 /* ensure other field of the descriptor were not read
437 * before we checked ownership */
440 skb = priv->tx_skb[priv->tx_dirty_desc];
441 priv->tx_skb[priv->tx_dirty_desc] = NULL;
442 dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
445 priv->tx_dirty_desc++;
446 if (priv->tx_dirty_desc == priv->tx_ring_size)
447 priv->tx_dirty_desc = 0;
448 priv->tx_desc_count++;
450 spin_unlock(&priv->tx_lock);
452 if (desc->len_stat & DMADESC_UNDER_MASK)
453 dev->stats.tx_errors++;
459 if (netif_queue_stopped(dev) && released)
460 netif_wake_queue(dev);
466 * poll func, called by network core
468 static int bcm_enet_poll(struct napi_struct *napi, int budget)
470 struct bcm_enet_priv *priv;
471 struct net_device *dev;
474 priv = container_of(napi, struct bcm_enet_priv, napi);
478 enet_dmac_writel(priv, priv->dma_chan_int_mask,
479 ENETDMAC_IR, priv->rx_chan);
480 enet_dmac_writel(priv, priv->dma_chan_int_mask,
481 ENETDMAC_IR, priv->tx_chan);
483 /* reclaim sent skb */
484 bcm_enet_tx_reclaim(dev, 0);
486 spin_lock(&priv->rx_lock);
487 rx_work_done = bcm_enet_receive_queue(dev, budget);
488 spin_unlock(&priv->rx_lock);
490 if (rx_work_done >= budget) {
491 /* rx queue is not yet empty/clean */
495 /* no more packet in rx/tx queue, remove device from poll
497 napi_complete_done(napi, rx_work_done);
499 /* restore rx/tx interrupt */
500 enet_dmac_writel(priv, priv->dma_chan_int_mask,
501 ENETDMAC_IRMASK, priv->rx_chan);
502 enet_dmac_writel(priv, priv->dma_chan_int_mask,
503 ENETDMAC_IRMASK, priv->tx_chan);
509 * mac interrupt handler
511 static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
513 struct net_device *dev;
514 struct bcm_enet_priv *priv;
518 priv = netdev_priv(dev);
520 stat = enet_readl(priv, ENET_IR_REG);
521 if (!(stat & ENET_IR_MIB))
524 /* clear & mask interrupt */
525 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
526 enet_writel(priv, 0, ENET_IRMASK_REG);
528 /* read mib registers in workqueue */
529 schedule_work(&priv->mib_update_task);
535 * rx/tx dma interrupt handler
537 static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
539 struct net_device *dev;
540 struct bcm_enet_priv *priv;
543 priv = netdev_priv(dev);
545 /* mask rx/tx interrupts */
546 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
547 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
549 napi_schedule(&priv->napi);
555 * tx request callback
558 bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
560 struct bcm_enet_priv *priv;
561 struct bcm_enet_desc *desc;
565 priv = netdev_priv(dev);
567 /* lock against tx reclaim */
568 spin_lock(&priv->tx_lock);
570 /* make sure the tx hw queue is not full, should not happen
571 * since we stop queue before it's the case */
572 if (unlikely(!priv->tx_desc_count)) {
573 netif_stop_queue(dev);
574 dev_err(&priv->pdev->dev, "xmit called with no tx desc "
576 ret = NETDEV_TX_BUSY;
580 /* pad small packets sent on a switch device */
581 if (priv->enet_is_sw && skb->len < 64) {
582 int needed = 64 - skb->len;
585 if (unlikely(skb_tailroom(skb) < needed)) {
586 struct sk_buff *nskb;
588 nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
590 ret = NETDEV_TX_BUSY;
596 data = skb_put_zero(skb, needed);
599 /* point to the next available desc */
600 desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
601 priv->tx_skb[priv->tx_curr_desc] = skb;
603 /* fill descriptor */
604 desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
607 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
608 len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
612 priv->tx_curr_desc++;
613 if (priv->tx_curr_desc == priv->tx_ring_size) {
614 priv->tx_curr_desc = 0;
615 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
617 priv->tx_desc_count--;
619 /* dma might be already polling, make sure we update desc
620 * fields in correct order */
622 desc->len_stat = len_stat;
626 enet_dmac_writel(priv, priv->dma_chan_en_mask,
627 ENETDMAC_CHANCFG, priv->tx_chan);
629 /* stop queue if no more desc available */
630 if (!priv->tx_desc_count)
631 netif_stop_queue(dev);
633 dev->stats.tx_bytes += skb->len;
634 dev->stats.tx_packets++;
638 spin_unlock(&priv->tx_lock);
643 * Change the interface's mac address.
645 static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
647 struct bcm_enet_priv *priv;
648 struct sockaddr *addr = p;
651 priv = netdev_priv(dev);
652 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
654 /* use perfect match register 0 to store my mac address */
655 val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
656 (dev->dev_addr[4] << 8) | dev->dev_addr[5];
657 enet_writel(priv, val, ENET_PML_REG(0));
659 val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
660 val |= ENET_PMH_DATAVALID_MASK;
661 enet_writel(priv, val, ENET_PMH_REG(0));
667 * Change rx mode (promiscuous/allmulti) and update multicast list
669 static void bcm_enet_set_multicast_list(struct net_device *dev)
671 struct bcm_enet_priv *priv;
672 struct netdev_hw_addr *ha;
676 priv = netdev_priv(dev);
678 val = enet_readl(priv, ENET_RXCFG_REG);
680 if (dev->flags & IFF_PROMISC)
681 val |= ENET_RXCFG_PROMISC_MASK;
683 val &= ~ENET_RXCFG_PROMISC_MASK;
685 /* only 3 perfect match registers left, first one is used for
687 if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
688 val |= ENET_RXCFG_ALLMCAST_MASK;
690 val &= ~ENET_RXCFG_ALLMCAST_MASK;
692 /* no need to set perfect match registers if we catch all
694 if (val & ENET_RXCFG_ALLMCAST_MASK) {
695 enet_writel(priv, val, ENET_RXCFG_REG);
700 netdev_for_each_mc_addr(ha, dev) {
706 /* update perfect match registers */
708 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
709 (dmi_addr[4] << 8) | dmi_addr[5];
710 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
712 tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
713 tmp |= ENET_PMH_DATAVALID_MASK;
714 enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
718 enet_writel(priv, 0, ENET_PML_REG(i + 1));
719 enet_writel(priv, 0, ENET_PMH_REG(i + 1));
722 enet_writel(priv, val, ENET_RXCFG_REG);
726 * set mac duplex parameters
728 static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
732 val = enet_readl(priv, ENET_TXCTL_REG);
734 val |= ENET_TXCTL_FD_MASK;
736 val &= ~ENET_TXCTL_FD_MASK;
737 enet_writel(priv, val, ENET_TXCTL_REG);
741 * set mac flow control parameters
743 static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
747 /* rx flow control (pause frame handling) */
748 val = enet_readl(priv, ENET_RXCFG_REG);
750 val |= ENET_RXCFG_ENFLOW_MASK;
752 val &= ~ENET_RXCFG_ENFLOW_MASK;
753 enet_writel(priv, val, ENET_RXCFG_REG);
755 if (!priv->dma_has_sram)
758 /* tx flow control (pause frame generation) */
759 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
761 val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
763 val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
764 enet_dma_writel(priv, val, ENETDMA_CFG_REG);
768 * link changed callback (from phylib)
770 static void bcm_enet_adjust_phy_link(struct net_device *dev)
772 struct bcm_enet_priv *priv;
773 struct phy_device *phydev;
776 priv = netdev_priv(dev);
777 phydev = dev->phydev;
780 if (priv->old_link != phydev->link) {
782 priv->old_link = phydev->link;
785 /* reflect duplex change in mac configuration */
786 if (phydev->link && phydev->duplex != priv->old_duplex) {
787 bcm_enet_set_duplex(priv,
788 (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
790 priv->old_duplex = phydev->duplex;
793 /* enable flow control if remote advertise it (trust phylib to
794 * check that duplex is full */
795 if (phydev->link && phydev->pause != priv->old_pause) {
796 int rx_pause_en, tx_pause_en;
799 /* pause was advertised by lpa and us */
802 } else if (!priv->pause_auto) {
803 /* pause setting overridden by user */
804 rx_pause_en = priv->pause_rx;
805 tx_pause_en = priv->pause_tx;
811 bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
813 priv->old_pause = phydev->pause;
816 if (status_changed) {
817 pr_info("%s: link %s", dev->name, phydev->link ?
820 pr_cont(" - %d/%s - flow control %s", phydev->speed,
821 DUPLEX_FULL == phydev->duplex ? "full" : "half",
822 phydev->pause == 1 ? "rx&tx" : "off");
829 * link changed callback (if phylib is not used)
831 static void bcm_enet_adjust_link(struct net_device *dev)
833 struct bcm_enet_priv *priv;
835 priv = netdev_priv(dev);
836 bcm_enet_set_duplex(priv, priv->force_duplex_full);
837 bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
838 netif_carrier_on(dev);
840 pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
842 priv->force_speed_100 ? 100 : 10,
843 priv->force_duplex_full ? "full" : "half",
844 priv->pause_rx ? "rx" : "off",
845 priv->pause_tx ? "tx" : "off");
849 * open callback, allocate dma rings & buffers and start rx operation
851 static int bcm_enet_open(struct net_device *dev)
853 struct bcm_enet_priv *priv;
854 struct sockaddr addr;
856 struct phy_device *phydev;
859 char phy_id[MII_BUS_ID_SIZE + 3];
863 priv = netdev_priv(dev);
864 kdev = &priv->pdev->dev;
868 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
869 priv->mii_bus->id, priv->phy_id);
871 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
872 PHY_INTERFACE_MODE_MII);
874 if (IS_ERR(phydev)) {
875 dev_err(kdev, "could not attach to PHY\n");
876 return PTR_ERR(phydev);
879 /* mask with MAC supported features */
880 phy_support_sym_pause(phydev);
881 phy_set_max_speed(phydev, SPEED_100);
882 phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx,
885 phy_attached_info(phydev);
888 priv->old_duplex = -1;
889 priv->old_pause = -1;
894 /* mask all interrupts and request them */
895 enet_writel(priv, 0, ENET_IRMASK_REG);
896 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
897 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
899 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
901 goto out_phy_disconnect;
903 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
908 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
913 /* initialize perfect match registers */
914 for (i = 0; i < 4; i++) {
915 enet_writel(priv, 0, ENET_PML_REG(i));
916 enet_writel(priv, 0, ENET_PMH_REG(i));
919 /* write device mac address */
920 memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
921 bcm_enet_set_mac_address(dev, &addr);
923 /* allocate rx dma ring */
924 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
925 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
931 priv->rx_desc_alloc_size = size;
932 priv->rx_desc_cpu = p;
934 /* allocate tx dma ring */
935 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
936 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
939 goto out_free_rx_ring;
942 priv->tx_desc_alloc_size = size;
943 priv->tx_desc_cpu = p;
945 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
949 goto out_free_tx_ring;
952 priv->tx_desc_count = priv->tx_ring_size;
953 priv->tx_dirty_desc = 0;
954 priv->tx_curr_desc = 0;
955 spin_lock_init(&priv->tx_lock);
957 /* init & fill rx ring with skbs */
958 priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
962 goto out_free_tx_skb;
965 priv->rx_desc_count = 0;
966 priv->rx_dirty_desc = 0;
967 priv->rx_curr_desc = 0;
969 /* initialize flow control buffer allocation */
970 if (priv->dma_has_sram)
971 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
972 ENETDMA_BUFALLOC_REG(priv->rx_chan));
974 enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
975 ENETDMAC_BUFALLOC, priv->rx_chan);
977 if (bcm_enet_refill_rx(dev)) {
978 dev_err(kdev, "cannot allocate rx skb queue\n");
983 /* write rx & tx ring addresses */
984 if (priv->dma_has_sram) {
985 enet_dmas_writel(priv, priv->rx_desc_dma,
986 ENETDMAS_RSTART_REG, priv->rx_chan);
987 enet_dmas_writel(priv, priv->tx_desc_dma,
988 ENETDMAS_RSTART_REG, priv->tx_chan);
990 enet_dmac_writel(priv, priv->rx_desc_dma,
991 ENETDMAC_RSTART, priv->rx_chan);
992 enet_dmac_writel(priv, priv->tx_desc_dma,
993 ENETDMAC_RSTART, priv->tx_chan);
996 /* clear remaining state ram for rx & tx channel */
997 if (priv->dma_has_sram) {
998 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
999 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1000 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1001 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1002 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1003 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1005 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1006 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1009 /* set max rx/tx length */
1010 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
1011 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
1013 /* set dma maximum burst len */
1014 enet_dmac_writel(priv, priv->dma_maxburst,
1015 ENETDMAC_MAXBURST, priv->rx_chan);
1016 enet_dmac_writel(priv, priv->dma_maxburst,
1017 ENETDMAC_MAXBURST, priv->tx_chan);
1019 /* set correct transmit fifo watermark */
1020 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1022 /* set flow control low/high threshold to 1/3 / 2/3 */
1023 if (priv->dma_has_sram) {
1024 val = priv->rx_ring_size / 3;
1025 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1026 val = (priv->rx_ring_size * 2) / 3;
1027 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1029 enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1030 enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1031 enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1034 /* all set, enable mac and interrupts, start dma engine and
1035 * kick rx dma channel */
1037 val = enet_readl(priv, ENET_CTL_REG);
1038 val |= ENET_CTL_ENABLE_MASK;
1039 enet_writel(priv, val, ENET_CTL_REG);
1040 if (priv->dma_has_sram)
1041 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1042 enet_dmac_writel(priv, priv->dma_chan_en_mask,
1043 ENETDMAC_CHANCFG, priv->rx_chan);
1045 /* watch "mib counters about to overflow" interrupt */
1046 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1047 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1049 /* watch "packet transferred" interrupt in rx and tx */
1050 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1051 ENETDMAC_IR, priv->rx_chan);
1052 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1053 ENETDMAC_IR, priv->tx_chan);
1055 /* make sure we enable napi before rx interrupt */
1056 napi_enable(&priv->napi);
1058 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1059 ENETDMAC_IRMASK, priv->rx_chan);
1060 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1061 ENETDMAC_IRMASK, priv->tx_chan);
1066 bcm_enet_adjust_link(dev);
1068 netif_start_queue(dev);
1072 for (i = 0; i < priv->rx_ring_size; i++) {
1073 struct bcm_enet_desc *desc;
1075 if (!priv->rx_skb[i])
1078 desc = &priv->rx_desc_cpu[i];
1079 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1081 kfree_skb(priv->rx_skb[i]);
1083 kfree(priv->rx_skb);
1086 kfree(priv->tx_skb);
1089 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1090 priv->tx_desc_cpu, priv->tx_desc_dma);
1093 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1094 priv->rx_desc_cpu, priv->rx_desc_dma);
1097 free_irq(priv->irq_tx, dev);
1100 free_irq(priv->irq_rx, dev);
1103 free_irq(dev->irq, dev);
1107 phy_disconnect(phydev);
1115 static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
1120 val = enet_readl(priv, ENET_CTL_REG);
1121 val |= ENET_CTL_DISABLE_MASK;
1122 enet_writel(priv, val, ENET_CTL_REG);
1128 val = enet_readl(priv, ENET_CTL_REG);
1129 if (!(val & ENET_CTL_DISABLE_MASK))
1136 * disable dma in given channel
1138 static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1142 enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1148 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1149 if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1158 static int bcm_enet_stop(struct net_device *dev)
1160 struct bcm_enet_priv *priv;
1161 struct device *kdev;
1164 priv = netdev_priv(dev);
1165 kdev = &priv->pdev->dev;
1167 netif_stop_queue(dev);
1168 napi_disable(&priv->napi);
1170 phy_stop(dev->phydev);
1171 del_timer_sync(&priv->rx_timeout);
1173 /* mask all interrupts */
1174 enet_writel(priv, 0, ENET_IRMASK_REG);
1175 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1176 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1178 /* make sure no mib update is scheduled */
1179 cancel_work_sync(&priv->mib_update_task);
1181 /* disable dma & mac */
1182 bcm_enet_disable_dma(priv, priv->tx_chan);
1183 bcm_enet_disable_dma(priv, priv->rx_chan);
1184 bcm_enet_disable_mac(priv);
1186 /* force reclaim of all tx buffers */
1187 bcm_enet_tx_reclaim(dev, 1);
1189 /* free the rx skb ring */
1190 for (i = 0; i < priv->rx_ring_size; i++) {
1191 struct bcm_enet_desc *desc;
1193 if (!priv->rx_skb[i])
1196 desc = &priv->rx_desc_cpu[i];
1197 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
1199 kfree_skb(priv->rx_skb[i]);
1202 /* free remaining allocated memory */
1203 kfree(priv->rx_skb);
1204 kfree(priv->tx_skb);
1205 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
1206 priv->rx_desc_cpu, priv->rx_desc_dma);
1207 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
1208 priv->tx_desc_cpu, priv->tx_desc_dma);
1209 free_irq(priv->irq_tx, dev);
1210 free_irq(priv->irq_rx, dev);
1211 free_irq(dev->irq, dev);
1215 phy_disconnect(dev->phydev);
1223 struct bcm_enet_stats {
1224 char stat_string[ETH_GSTRING_LEN];
1230 #define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
1231 offsetof(struct bcm_enet_priv, m)
1232 #define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
1233 offsetof(struct net_device_stats, m)
1235 static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1236 { "rx_packets", DEV_STAT(rx_packets), -1 },
1237 { "tx_packets", DEV_STAT(tx_packets), -1 },
1238 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1239 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1240 { "rx_errors", DEV_STAT(rx_errors), -1 },
1241 { "tx_errors", DEV_STAT(tx_errors), -1 },
1242 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1243 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1245 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1246 { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
1247 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
1248 { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
1249 { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
1250 { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
1251 { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
1252 { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
1253 { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
1254 { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
1255 { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
1256 { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
1257 { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
1258 { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
1259 { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
1260 { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
1261 { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
1262 { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
1263 { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
1264 { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
1265 { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
1267 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
1268 { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
1269 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
1270 { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
1271 { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
1272 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
1273 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
1274 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
1275 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
1276 { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
1277 { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
1278 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
1279 { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
1280 { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
1281 { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
1282 { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
1283 { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
1284 { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
1285 { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
1286 { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
1287 { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
1288 { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
1292 #define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
1294 static const u32 unused_mib_regs[] = {
1295 ETH_MIB_TX_ALL_OCTETS,
1296 ETH_MIB_TX_ALL_PKTS,
1297 ETH_MIB_RX_ALL_OCTETS,
1298 ETH_MIB_RX_ALL_PKTS,
1302 static void bcm_enet_get_drvinfo(struct net_device *netdev,
1303 struct ethtool_drvinfo *drvinfo)
1305 strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
1306 strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
1309 static int bcm_enet_get_sset_count(struct net_device *netdev,
1312 switch (string_set) {
1314 return BCM_ENET_STATS_LEN;
1320 static void bcm_enet_get_strings(struct net_device *netdev,
1321 u32 stringset, u8 *data)
1325 switch (stringset) {
1327 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1328 memcpy(data + i * ETH_GSTRING_LEN,
1329 bcm_enet_gstrings_stats[i].stat_string,
1336 static void update_mib_counters(struct bcm_enet_priv *priv)
1340 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1341 const struct bcm_enet_stats *s;
1345 s = &bcm_enet_gstrings_stats[i];
1346 if (s->mib_reg == -1)
1349 val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
1350 p = (char *)priv + s->stat_offset;
1352 if (s->sizeof_stat == sizeof(u64))
1358 /* also empty unused mib counters to make sure mib counter
1359 * overflow interrupt is cleared */
1360 for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
1361 (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
1364 static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
1366 struct bcm_enet_priv *priv;
1368 priv = container_of(t, struct bcm_enet_priv, mib_update_task);
1369 mutex_lock(&priv->mib_update_lock);
1370 update_mib_counters(priv);
1371 mutex_unlock(&priv->mib_update_lock);
1373 /* reenable mib interrupt */
1374 if (netif_running(priv->net_dev))
1375 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1378 static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1379 struct ethtool_stats *stats,
1382 struct bcm_enet_priv *priv;
1385 priv = netdev_priv(netdev);
1387 mutex_lock(&priv->mib_update_lock);
1388 update_mib_counters(priv);
1390 for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
1391 const struct bcm_enet_stats *s;
1394 s = &bcm_enet_gstrings_stats[i];
1395 if (s->mib_reg == -1)
1396 p = (char *)&netdev->stats;
1399 p += s->stat_offset;
1400 data[i] = (s->sizeof_stat == sizeof(u64)) ?
1401 *(u64 *)p : *(u32 *)p;
1403 mutex_unlock(&priv->mib_update_lock);
1406 static int bcm_enet_nway_reset(struct net_device *dev)
1408 struct bcm_enet_priv *priv;
1410 priv = netdev_priv(dev);
1412 return phy_ethtool_nway_reset(dev);
1417 static int bcm_enet_get_link_ksettings(struct net_device *dev,
1418 struct ethtool_link_ksettings *cmd)
1420 struct bcm_enet_priv *priv;
1421 u32 supported, advertising;
1423 priv = netdev_priv(dev);
1425 if (priv->has_phy) {
1429 phy_ethtool_ksettings_get(dev->phydev, cmd);
1433 cmd->base.autoneg = 0;
1434 cmd->base.speed = (priv->force_speed_100) ?
1435 SPEED_100 : SPEED_10;
1436 cmd->base.duplex = (priv->force_duplex_full) ?
1437 DUPLEX_FULL : DUPLEX_HALF;
1438 supported = ADVERTISED_10baseT_Half |
1439 ADVERTISED_10baseT_Full |
1440 ADVERTISED_100baseT_Half |
1441 ADVERTISED_100baseT_Full;
1443 ethtool_convert_legacy_u32_to_link_mode(
1444 cmd->link_modes.supported, supported);
1445 ethtool_convert_legacy_u32_to_link_mode(
1446 cmd->link_modes.advertising, advertising);
1447 cmd->base.port = PORT_MII;
1452 static int bcm_enet_set_link_ksettings(struct net_device *dev,
1453 const struct ethtool_link_ksettings *cmd)
1455 struct bcm_enet_priv *priv;
1457 priv = netdev_priv(dev);
1458 if (priv->has_phy) {
1461 return phy_ethtool_ksettings_set(dev->phydev, cmd);
1464 if (cmd->base.autoneg ||
1465 (cmd->base.speed != SPEED_100 &&
1466 cmd->base.speed != SPEED_10) ||
1467 cmd->base.port != PORT_MII)
1470 priv->force_speed_100 =
1471 (cmd->base.speed == SPEED_100) ? 1 : 0;
1472 priv->force_duplex_full =
1473 (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
1475 if (netif_running(dev))
1476 bcm_enet_adjust_link(dev);
1481 static void bcm_enet_get_ringparam(struct net_device *dev,
1482 struct ethtool_ringparam *ering)
1484 struct bcm_enet_priv *priv;
1486 priv = netdev_priv(dev);
1488 /* rx/tx ring is actually only limited by memory */
1489 ering->rx_max_pending = 8192;
1490 ering->tx_max_pending = 8192;
1491 ering->rx_pending = priv->rx_ring_size;
1492 ering->tx_pending = priv->tx_ring_size;
1495 static int bcm_enet_set_ringparam(struct net_device *dev,
1496 struct ethtool_ringparam *ering)
1498 struct bcm_enet_priv *priv;
1501 priv = netdev_priv(dev);
1504 if (netif_running(dev)) {
1509 priv->rx_ring_size = ering->rx_pending;
1510 priv->tx_ring_size = ering->tx_pending;
1515 err = bcm_enet_open(dev);
1519 bcm_enet_set_multicast_list(dev);
1524 static void bcm_enet_get_pauseparam(struct net_device *dev,
1525 struct ethtool_pauseparam *ecmd)
1527 struct bcm_enet_priv *priv;
1529 priv = netdev_priv(dev);
1530 ecmd->autoneg = priv->pause_auto;
1531 ecmd->rx_pause = priv->pause_rx;
1532 ecmd->tx_pause = priv->pause_tx;
1535 static int bcm_enet_set_pauseparam(struct net_device *dev,
1536 struct ethtool_pauseparam *ecmd)
1538 struct bcm_enet_priv *priv;
1540 priv = netdev_priv(dev);
1542 if (priv->has_phy) {
1543 if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
1544 /* asymetric pause mode not supported,
1545 * actually possible but integrated PHY has RO
1550 /* no pause autoneg on direct mii connection */
1555 priv->pause_auto = ecmd->autoneg;
1556 priv->pause_rx = ecmd->rx_pause;
1557 priv->pause_tx = ecmd->tx_pause;
1562 static const struct ethtool_ops bcm_enet_ethtool_ops = {
1563 .get_strings = bcm_enet_get_strings,
1564 .get_sset_count = bcm_enet_get_sset_count,
1565 .get_ethtool_stats = bcm_enet_get_ethtool_stats,
1566 .nway_reset = bcm_enet_nway_reset,
1567 .get_drvinfo = bcm_enet_get_drvinfo,
1568 .get_link = ethtool_op_get_link,
1569 .get_ringparam = bcm_enet_get_ringparam,
1570 .set_ringparam = bcm_enet_set_ringparam,
1571 .get_pauseparam = bcm_enet_get_pauseparam,
1572 .set_pauseparam = bcm_enet_set_pauseparam,
1573 .get_link_ksettings = bcm_enet_get_link_ksettings,
1574 .set_link_ksettings = bcm_enet_set_link_ksettings,
1577 static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1579 struct bcm_enet_priv *priv;
1581 priv = netdev_priv(dev);
1582 if (priv->has_phy) {
1585 return phy_mii_ioctl(dev->phydev, rq, cmd);
1587 struct mii_if_info mii;
1590 mii.mdio_read = bcm_enet_mdio_read_mii;
1591 mii.mdio_write = bcm_enet_mdio_write_mii;
1593 mii.phy_id_mask = 0x3f;
1594 mii.reg_num_mask = 0x1f;
1595 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
1600 * adjust mtu, can't be called while device is running
1602 static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
1604 struct bcm_enet_priv *priv = netdev_priv(dev);
1605 int actual_mtu = new_mtu;
1607 if (netif_running(dev))
1610 /* add ethernet header + vlan tag size */
1611 actual_mtu += VLAN_ETH_HLEN;
1614 * setup maximum size before we get overflow mark in
1615 * descriptor, note that this will not prevent reception of
1616 * big frames, they will be split into multiple buffers
1619 priv->hw_mtu = actual_mtu;
1622 * align rx buffer size to dma burst len, account FCS since
1625 priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1626 priv->dma_maxburst * 4);
1633 * preinit hardware to allow mii operation while device is down
1635 static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
1640 /* make sure mac is disabled */
1641 bcm_enet_disable_mac(priv);
1643 /* soft reset mac */
1644 val = ENET_CTL_SRESET_MASK;
1645 enet_writel(priv, val, ENET_CTL_REG);
1650 val = enet_readl(priv, ENET_CTL_REG);
1651 if (!(val & ENET_CTL_SRESET_MASK))
1656 /* select correct mii interface */
1657 val = enet_readl(priv, ENET_CTL_REG);
1658 if (priv->use_external_mii)
1659 val |= ENET_CTL_EPHYSEL_MASK;
1661 val &= ~ENET_CTL_EPHYSEL_MASK;
1662 enet_writel(priv, val, ENET_CTL_REG);
1664 /* turn on mdc clock */
1665 enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
1666 ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
1668 /* set mib counters to self-clear when read */
1669 val = enet_readl(priv, ENET_MIBCTL_REG);
1670 val |= ENET_MIBCTL_RDCLEAR_MASK;
1671 enet_writel(priv, val, ENET_MIBCTL_REG);
1674 static const struct net_device_ops bcm_enet_ops = {
1675 .ndo_open = bcm_enet_open,
1676 .ndo_stop = bcm_enet_stop,
1677 .ndo_start_xmit = bcm_enet_start_xmit,
1678 .ndo_set_mac_address = bcm_enet_set_mac_address,
1679 .ndo_set_rx_mode = bcm_enet_set_multicast_list,
1680 .ndo_do_ioctl = bcm_enet_ioctl,
1681 .ndo_change_mtu = bcm_enet_change_mtu,
1685 * allocate netdevice, request register memory and register device.
1687 static int bcm_enet_probe(struct platform_device *pdev)
1689 struct bcm_enet_priv *priv;
1690 struct net_device *dev;
1691 struct bcm63xx_enet_platform_data *pd;
1692 struct resource *res_irq, *res_irq_rx, *res_irq_tx;
1693 struct mii_bus *bus;
1696 if (!bcm_enet_shared_base[0])
1697 return -EPROBE_DEFER;
1699 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1700 res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1701 res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1702 if (!res_irq || !res_irq_rx || !res_irq_tx)
1705 dev = alloc_etherdev(sizeof(*priv));
1708 priv = netdev_priv(dev);
1710 priv->enet_is_sw = false;
1711 priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1713 ret = bcm_enet_change_mtu(dev, dev->mtu);
1717 priv->base = devm_platform_ioremap_resource(pdev, 0);
1718 if (IS_ERR(priv->base)) {
1719 ret = PTR_ERR(priv->base);
1723 dev->irq = priv->irq = res_irq->start;
1724 priv->irq_rx = res_irq_rx->start;
1725 priv->irq_tx = res_irq_tx->start;
1727 priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
1728 if (IS_ERR(priv->mac_clk)) {
1729 ret = PTR_ERR(priv->mac_clk);
1732 ret = clk_prepare_enable(priv->mac_clk);
1736 /* initialize default and fetch platform data */
1737 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
1738 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
1740 pd = dev_get_platdata(&pdev->dev);
1742 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
1743 priv->has_phy = pd->has_phy;
1744 priv->phy_id = pd->phy_id;
1745 priv->has_phy_interrupt = pd->has_phy_interrupt;
1746 priv->phy_interrupt = pd->phy_interrupt;
1747 priv->use_external_mii = !pd->use_internal_phy;
1748 priv->pause_auto = pd->pause_auto;
1749 priv->pause_rx = pd->pause_rx;
1750 priv->pause_tx = pd->pause_tx;
1751 priv->force_duplex_full = pd->force_duplex_full;
1752 priv->force_speed_100 = pd->force_speed_100;
1753 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1754 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1755 priv->dma_chan_width = pd->dma_chan_width;
1756 priv->dma_has_sram = pd->dma_has_sram;
1757 priv->dma_desc_shift = pd->dma_desc_shift;
1758 priv->rx_chan = pd->rx_chan;
1759 priv->tx_chan = pd->tx_chan;
1762 if (priv->has_phy && !priv->use_external_mii) {
1763 /* using internal PHY, enable clock */
1764 priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
1765 if (IS_ERR(priv->phy_clk)) {
1766 ret = PTR_ERR(priv->phy_clk);
1767 priv->phy_clk = NULL;
1768 goto out_disable_clk_mac;
1770 ret = clk_prepare_enable(priv->phy_clk);
1772 goto out_disable_clk_mac;
1775 /* do minimal hardware init to be able to probe mii bus */
1776 bcm_enet_hw_preinit(priv);
1778 /* MII bus registration */
1779 if (priv->has_phy) {
1781 priv->mii_bus = mdiobus_alloc();
1782 if (!priv->mii_bus) {
1787 bus = priv->mii_bus;
1788 bus->name = "bcm63xx_enet MII bus";
1789 bus->parent = &pdev->dev;
1791 bus->read = bcm_enet_mdio_read_phylib;
1792 bus->write = bcm_enet_mdio_write_phylib;
1793 sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
1795 /* only probe bus where we think the PHY is, because
1796 * the mdio read operation return 0 instead of 0xffff
1797 * if a slave is not present on hw */
1798 bus->phy_mask = ~(1 << priv->phy_id);
1800 if (priv->has_phy_interrupt)
1801 bus->irq[priv->phy_id] = priv->phy_interrupt;
1803 ret = mdiobus_register(bus);
1805 dev_err(&pdev->dev, "unable to register mdio bus\n");
1810 /* run platform code to initialize PHY device */
1811 if (pd && pd->mii_config &&
1812 pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
1813 bcm_enet_mdio_write_mii)) {
1814 dev_err(&pdev->dev, "unable to configure mdio bus\n");
1819 spin_lock_init(&priv->rx_lock);
1821 /* init rx timeout (used for oom) */
1822 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
1824 /* init the mib update lock&work */
1825 mutex_init(&priv->mib_update_lock);
1826 INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
1828 /* zero mib counters */
1829 for (i = 0; i < ENET_MIB_REG_COUNT; i++)
1830 enet_writel(priv, 0, ENET_MIB_REG(i));
1832 /* register netdevice */
1833 dev->netdev_ops = &bcm_enet_ops;
1834 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
1836 dev->ethtool_ops = &bcm_enet_ethtool_ops;
1837 /* MTU range: 46 - 2028 */
1838 dev->min_mtu = ETH_ZLEN - ETH_HLEN;
1839 dev->max_mtu = BCMENET_MAX_MTU - VLAN_ETH_HLEN;
1840 SET_NETDEV_DEV(dev, &pdev->dev);
1842 ret = register_netdev(dev);
1844 goto out_unregister_mdio;
1846 netif_carrier_off(dev);
1847 platform_set_drvdata(pdev, dev);
1849 priv->net_dev = dev;
1853 out_unregister_mdio:
1855 mdiobus_unregister(priv->mii_bus);
1859 mdiobus_free(priv->mii_bus);
1862 /* turn off mdc clock */
1863 enet_writel(priv, 0, ENET_MIISC_REG);
1864 clk_disable_unprepare(priv->phy_clk);
1866 out_disable_clk_mac:
1867 clk_disable_unprepare(priv->mac_clk);
1875 * exit func, stops hardware and unregisters netdevice
1877 static int bcm_enet_remove(struct platform_device *pdev)
1879 struct bcm_enet_priv *priv;
1880 struct net_device *dev;
1882 /* stop netdevice */
1883 dev = platform_get_drvdata(pdev);
1884 priv = netdev_priv(dev);
1885 unregister_netdev(dev);
1887 /* turn off mdc clock */
1888 enet_writel(priv, 0, ENET_MIISC_REG);
1890 if (priv->has_phy) {
1891 mdiobus_unregister(priv->mii_bus);
1892 mdiobus_free(priv->mii_bus);
1894 struct bcm63xx_enet_platform_data *pd;
1896 pd = dev_get_platdata(&pdev->dev);
1897 if (pd && pd->mii_config)
1898 pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
1899 bcm_enet_mdio_write_mii);
1902 /* disable hw block clocks */
1903 clk_disable_unprepare(priv->phy_clk);
1904 clk_disable_unprepare(priv->mac_clk);
1910 struct platform_driver bcm63xx_enet_driver = {
1911 .probe = bcm_enet_probe,
1912 .remove = bcm_enet_remove,
1914 .name = "bcm63xx_enet",
1915 .owner = THIS_MODULE,
1920 * switch mii access callbacks
1922 static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1923 int ext, int phy_id, int location)
1928 spin_lock_bh(&priv->enetsw_mdio_lock);
1929 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1931 reg = ENETSW_MDIOC_RD_MASK |
1932 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1933 (location << ENETSW_MDIOC_REG_SHIFT);
1936 reg |= ENETSW_MDIOC_EXT_MASK;
1938 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1940 ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1941 spin_unlock_bh(&priv->enetsw_mdio_lock);
1945 static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1946 int ext, int phy_id, int location,
1951 spin_lock_bh(&priv->enetsw_mdio_lock);
1952 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1954 reg = ENETSW_MDIOC_WR_MASK |
1955 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1956 (location << ENETSW_MDIOC_REG_SHIFT);
1959 reg |= ENETSW_MDIOC_EXT_MASK;
1963 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1965 spin_unlock_bh(&priv->enetsw_mdio_lock);
1968 static inline int bcm_enet_port_is_rgmii(int portid)
1970 return portid >= ENETSW_RGMII_PORT0;
1974 * enet sw PHY polling
1976 static void swphy_poll_timer(struct timer_list *t)
1978 struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll);
1981 for (i = 0; i < priv->num_ports; i++) {
1982 struct bcm63xx_enetsw_port *port;
1983 int val, j, up, advertise, lpa, speed, duplex, media;
1984 int external_phy = bcm_enet_port_is_rgmii(i);
1987 port = &priv->used_ports[i];
1991 if (port->bypass_link)
1994 /* dummy read to clear */
1995 for (j = 0; j < 2; j++)
1996 val = bcmenet_sw_mdio_read(priv, external_phy,
1997 port->phy_id, MII_BMSR);
2002 up = (val & BMSR_LSTATUS) ? 1 : 0;
2003 if (!(up ^ priv->sw_port_link[i]))
2006 priv->sw_port_link[i] = up;
2010 dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2012 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2013 ENETSW_PORTOV_REG(i));
2014 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2015 ENETSW_PTCTRL_TXDIS_MASK,
2016 ENETSW_PTCTRL_REG(i));
2020 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2021 port->phy_id, MII_ADVERTISE);
2023 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2026 /* figure out media and duplex from advertise and LPA values */
2027 media = mii_nway_result(lpa & advertise);
2028 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2030 if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2035 if (val & BMSR_ESTATEN) {
2036 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2037 port->phy_id, MII_CTRL1000);
2039 lpa = bcmenet_sw_mdio_read(priv, external_phy,
2040 port->phy_id, MII_STAT1000);
2042 if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
2043 && lpa & (LPA_1000FULL | LPA_1000HALF)) {
2045 duplex = (lpa & LPA_1000FULL);
2049 dev_info(&priv->pdev->dev,
2050 "link UP on %s, %dMbps, %s-duplex\n",
2051 port->name, speed, duplex ? "full" : "half");
2053 override = ENETSW_PORTOV_ENABLE_MASK |
2054 ENETSW_PORTOV_LINKUP_MASK;
2057 override |= ENETSW_IMPOV_1000_MASK;
2058 else if (speed == 100)
2059 override |= ENETSW_IMPOV_100_MASK;
2061 override |= ENETSW_IMPOV_FDX_MASK;
2063 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2064 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2067 priv->swphy_poll.expires = jiffies + HZ;
2068 add_timer(&priv->swphy_poll);
2072 * open callback, allocate dma rings & buffers and start rx operation
2074 static int bcm_enetsw_open(struct net_device *dev)
2076 struct bcm_enet_priv *priv;
2077 struct device *kdev;
2083 priv = netdev_priv(dev);
2084 kdev = &priv->pdev->dev;
2086 /* mask all interrupts and request them */
2087 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2088 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2090 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2095 if (priv->irq_tx != -1) {
2096 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2099 goto out_freeirq_rx;
2102 /* allocate rx dma ring */
2103 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2104 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2106 dev_err(kdev, "cannot allocate rx ring %u\n", size);
2108 goto out_freeirq_tx;
2111 priv->rx_desc_alloc_size = size;
2112 priv->rx_desc_cpu = p;
2114 /* allocate tx dma ring */
2115 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2116 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2118 dev_err(kdev, "cannot allocate tx ring\n");
2120 goto out_free_rx_ring;
2123 priv->tx_desc_alloc_size = size;
2124 priv->tx_desc_cpu = p;
2126 priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
2128 if (!priv->tx_skb) {
2129 dev_err(kdev, "cannot allocate rx skb queue\n");
2131 goto out_free_tx_ring;
2134 priv->tx_desc_count = priv->tx_ring_size;
2135 priv->tx_dirty_desc = 0;
2136 priv->tx_curr_desc = 0;
2137 spin_lock_init(&priv->tx_lock);
2139 /* init & fill rx ring with skbs */
2140 priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
2142 if (!priv->rx_skb) {
2143 dev_err(kdev, "cannot allocate rx skb queue\n");
2145 goto out_free_tx_skb;
2148 priv->rx_desc_count = 0;
2149 priv->rx_dirty_desc = 0;
2150 priv->rx_curr_desc = 0;
2152 /* disable all ports */
2153 for (i = 0; i < priv->num_ports; i++) {
2154 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2155 ENETSW_PORTOV_REG(i));
2156 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2157 ENETSW_PTCTRL_TXDIS_MASK,
2158 ENETSW_PTCTRL_REG(i));
2160 priv->sw_port_link[i] = 0;
2164 val = enetsw_readb(priv, ENETSW_GMCR_REG);
2165 val |= ENETSW_GMCR_RST_MIB_MASK;
2166 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2168 val &= ~ENETSW_GMCR_RST_MIB_MASK;
2169 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2172 /* force CPU port state */
2173 val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2174 val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2175 enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2177 /* enable switch forward engine */
2178 val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2179 val |= ENETSW_SWMODE_FWD_EN_MASK;
2180 enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2182 /* enable jumbo on all ports */
2183 enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2184 enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2186 /* initialize flow control buffer allocation */
2187 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2188 ENETDMA_BUFALLOC_REG(priv->rx_chan));
2190 if (bcm_enet_refill_rx(dev)) {
2191 dev_err(kdev, "cannot allocate rx skb queue\n");
2196 /* write rx & tx ring addresses */
2197 enet_dmas_writel(priv, priv->rx_desc_dma,
2198 ENETDMAS_RSTART_REG, priv->rx_chan);
2199 enet_dmas_writel(priv, priv->tx_desc_dma,
2200 ENETDMAS_RSTART_REG, priv->tx_chan);
2202 /* clear remaining state ram for rx & tx channel */
2203 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2204 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2205 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2206 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2207 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2208 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2210 /* set dma maximum burst len */
2211 enet_dmac_writel(priv, priv->dma_maxburst,
2212 ENETDMAC_MAXBURST, priv->rx_chan);
2213 enet_dmac_writel(priv, priv->dma_maxburst,
2214 ENETDMAC_MAXBURST, priv->tx_chan);
2216 /* set flow control low/high threshold to 1/3 / 2/3 */
2217 val = priv->rx_ring_size / 3;
2218 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2219 val = (priv->rx_ring_size * 2) / 3;
2220 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2222 /* all set, enable mac and interrupts, start dma engine and
2223 * kick rx dma channel
2226 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2227 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2228 ENETDMAC_CHANCFG, priv->rx_chan);
2230 /* watch "packet transferred" interrupt in rx and tx */
2231 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2232 ENETDMAC_IR, priv->rx_chan);
2233 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2234 ENETDMAC_IR, priv->tx_chan);
2236 /* make sure we enable napi before rx interrupt */
2237 napi_enable(&priv->napi);
2239 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2240 ENETDMAC_IRMASK, priv->rx_chan);
2241 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2242 ENETDMAC_IRMASK, priv->tx_chan);
2244 netif_carrier_on(dev);
2245 netif_start_queue(dev);
2247 /* apply override config for bypass_link ports here. */
2248 for (i = 0; i < priv->num_ports; i++) {
2249 struct bcm63xx_enetsw_port *port;
2251 port = &priv->used_ports[i];
2255 if (!port->bypass_link)
2258 override = ENETSW_PORTOV_ENABLE_MASK |
2259 ENETSW_PORTOV_LINKUP_MASK;
2261 switch (port->force_speed) {
2263 override |= ENETSW_IMPOV_1000_MASK;
2266 override |= ENETSW_IMPOV_100_MASK;
2271 pr_warn("invalid forced speed on port %s: assume 10\n",
2276 if (port->force_duplex_full)
2277 override |= ENETSW_IMPOV_FDX_MASK;
2280 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2281 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2284 /* start phy polling timer */
2285 timer_setup(&priv->swphy_poll, swphy_poll_timer, 0);
2286 mod_timer(&priv->swphy_poll, jiffies);
2290 for (i = 0; i < priv->rx_ring_size; i++) {
2291 struct bcm_enet_desc *desc;
2293 if (!priv->rx_skb[i])
2296 desc = &priv->rx_desc_cpu[i];
2297 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2299 kfree_skb(priv->rx_skb[i]);
2301 kfree(priv->rx_skb);
2304 kfree(priv->tx_skb);
2307 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2308 priv->tx_desc_cpu, priv->tx_desc_dma);
2311 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2312 priv->rx_desc_cpu, priv->rx_desc_dma);
2315 if (priv->irq_tx != -1)
2316 free_irq(priv->irq_tx, dev);
2319 free_irq(priv->irq_rx, dev);
2326 static int bcm_enetsw_stop(struct net_device *dev)
2328 struct bcm_enet_priv *priv;
2329 struct device *kdev;
2332 priv = netdev_priv(dev);
2333 kdev = &priv->pdev->dev;
2335 del_timer_sync(&priv->swphy_poll);
2336 netif_stop_queue(dev);
2337 napi_disable(&priv->napi);
2338 del_timer_sync(&priv->rx_timeout);
2340 /* mask all interrupts */
2341 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2342 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2344 /* disable dma & mac */
2345 bcm_enet_disable_dma(priv, priv->tx_chan);
2346 bcm_enet_disable_dma(priv, priv->rx_chan);
2348 /* force reclaim of all tx buffers */
2349 bcm_enet_tx_reclaim(dev, 1);
2351 /* free the rx skb ring */
2352 for (i = 0; i < priv->rx_ring_size; i++) {
2353 struct bcm_enet_desc *desc;
2355 if (!priv->rx_skb[i])
2358 desc = &priv->rx_desc_cpu[i];
2359 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2361 kfree_skb(priv->rx_skb[i]);
2364 /* free remaining allocated memory */
2365 kfree(priv->rx_skb);
2366 kfree(priv->tx_skb);
2367 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2368 priv->rx_desc_cpu, priv->rx_desc_dma);
2369 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2370 priv->tx_desc_cpu, priv->tx_desc_dma);
2371 if (priv->irq_tx != -1)
2372 free_irq(priv->irq_tx, dev);
2373 free_irq(priv->irq_rx, dev);
2378 /* try to sort out phy external status by walking the used_port field
2379 * in the bcm_enet_priv structure. in case the phy address is not
2380 * assigned to any physical port on the switch, assume it is external
2381 * (and yell at the user).
2383 static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2387 for (i = 0; i < priv->num_ports; ++i) {
2388 if (!priv->used_ports[i].used)
2390 if (priv->used_ports[i].phy_id == phy_id)
2391 return bcm_enet_port_is_rgmii(i);
2394 printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2399 /* can't use bcmenet_sw_mdio_read directly as we need to sort out
2400 * external/internal status of the given phy_id first.
2402 static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2405 struct bcm_enet_priv *priv;
2407 priv = netdev_priv(dev);
2408 return bcmenet_sw_mdio_read(priv,
2409 bcm_enetsw_phy_is_external(priv, phy_id),
2413 /* can't use bcmenet_sw_mdio_write directly as we need to sort out
2414 * external/internal status of the given phy_id first.
2416 static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2420 struct bcm_enet_priv *priv;
2422 priv = netdev_priv(dev);
2423 bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2424 phy_id, location, val);
2427 static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2429 struct mii_if_info mii;
2432 mii.mdio_read = bcm_enetsw_mii_mdio_read;
2433 mii.mdio_write = bcm_enetsw_mii_mdio_write;
2435 mii.phy_id_mask = 0x3f;
2436 mii.reg_num_mask = 0x1f;
2437 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2441 static const struct net_device_ops bcm_enetsw_ops = {
2442 .ndo_open = bcm_enetsw_open,
2443 .ndo_stop = bcm_enetsw_stop,
2444 .ndo_start_xmit = bcm_enet_start_xmit,
2445 .ndo_change_mtu = bcm_enet_change_mtu,
2446 .ndo_do_ioctl = bcm_enetsw_ioctl,
2450 static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2451 { "rx_packets", DEV_STAT(rx_packets), -1 },
2452 { "tx_packets", DEV_STAT(tx_packets), -1 },
2453 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2454 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2455 { "rx_errors", DEV_STAT(rx_errors), -1 },
2456 { "tx_errors", DEV_STAT(tx_errors), -1 },
2457 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2458 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2460 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2461 { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2462 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2463 { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2464 { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2465 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2466 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2467 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2468 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2469 { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2470 ETHSW_MIB_RX_1024_1522 },
2471 { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2472 ETHSW_MIB_RX_1523_2047 },
2473 { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2474 ETHSW_MIB_RX_2048_4095 },
2475 { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2476 ETHSW_MIB_RX_4096_8191 },
2477 { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2478 ETHSW_MIB_RX_8192_9728 },
2479 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2480 { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2481 { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2482 { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2483 { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2485 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2486 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2487 { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2488 { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2489 { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2490 { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2494 #define BCM_ENETSW_STATS_LEN \
2495 (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2497 static void bcm_enetsw_get_strings(struct net_device *netdev,
2498 u32 stringset, u8 *data)
2502 switch (stringset) {
2504 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2505 memcpy(data + i * ETH_GSTRING_LEN,
2506 bcm_enetsw_gstrings_stats[i].stat_string,
2513 static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2516 switch (string_set) {
2518 return BCM_ENETSW_STATS_LEN;
2524 static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2525 struct ethtool_drvinfo *drvinfo)
2527 strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
2528 strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
2531 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2532 struct ethtool_stats *stats,
2535 struct bcm_enet_priv *priv;
2538 priv = netdev_priv(netdev);
2540 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2541 const struct bcm_enet_stats *s;
2546 s = &bcm_enetsw_gstrings_stats[i];
2552 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2553 p = (char *)priv + s->stat_offset;
2555 if (s->sizeof_stat == sizeof(u64)) {
2556 hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2557 *(u64 *)p = ((u64)hi << 32 | lo);
2563 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2564 const struct bcm_enet_stats *s;
2567 s = &bcm_enetsw_gstrings_stats[i];
2569 if (s->mib_reg == -1)
2570 p = (char *)&netdev->stats + s->stat_offset;
2572 p = (char *)priv + s->stat_offset;
2574 data[i] = (s->sizeof_stat == sizeof(u64)) ?
2575 *(u64 *)p : *(u32 *)p;
2579 static void bcm_enetsw_get_ringparam(struct net_device *dev,
2580 struct ethtool_ringparam *ering)
2582 struct bcm_enet_priv *priv;
2584 priv = netdev_priv(dev);
2586 /* rx/tx ring is actually only limited by memory */
2587 ering->rx_max_pending = 8192;
2588 ering->tx_max_pending = 8192;
2589 ering->rx_mini_max_pending = 0;
2590 ering->rx_jumbo_max_pending = 0;
2591 ering->rx_pending = priv->rx_ring_size;
2592 ering->tx_pending = priv->tx_ring_size;
2595 static int bcm_enetsw_set_ringparam(struct net_device *dev,
2596 struct ethtool_ringparam *ering)
2598 struct bcm_enet_priv *priv;
2601 priv = netdev_priv(dev);
2604 if (netif_running(dev)) {
2605 bcm_enetsw_stop(dev);
2609 priv->rx_ring_size = ering->rx_pending;
2610 priv->tx_ring_size = ering->tx_pending;
2615 err = bcm_enetsw_open(dev);
2622 static const struct ethtool_ops bcm_enetsw_ethtool_ops = {
2623 .get_strings = bcm_enetsw_get_strings,
2624 .get_sset_count = bcm_enetsw_get_sset_count,
2625 .get_ethtool_stats = bcm_enetsw_get_ethtool_stats,
2626 .get_drvinfo = bcm_enetsw_get_drvinfo,
2627 .get_ringparam = bcm_enetsw_get_ringparam,
2628 .set_ringparam = bcm_enetsw_set_ringparam,
2631 /* allocate netdevice, request register memory and register device. */
2632 static int bcm_enetsw_probe(struct platform_device *pdev)
2634 struct bcm_enet_priv *priv;
2635 struct net_device *dev;
2636 struct bcm63xx_enetsw_platform_data *pd;
2637 struct resource *res_mem;
2638 int ret, irq_rx, irq_tx;
2640 if (!bcm_enet_shared_base[0])
2641 return -EPROBE_DEFER;
2643 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2644 irq_rx = platform_get_irq(pdev, 0);
2645 irq_tx = platform_get_irq(pdev, 1);
2646 if (!res_mem || irq_rx < 0)
2650 dev = alloc_etherdev(sizeof(*priv));
2653 priv = netdev_priv(dev);
2655 /* initialize default and fetch platform data */
2656 priv->enet_is_sw = true;
2657 priv->irq_rx = irq_rx;
2658 priv->irq_tx = irq_tx;
2659 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2660 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2661 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2663 pd = dev_get_platdata(&pdev->dev);
2665 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2666 memcpy(priv->used_ports, pd->used_ports,
2667 sizeof(pd->used_ports));
2668 priv->num_ports = pd->num_ports;
2669 priv->dma_has_sram = pd->dma_has_sram;
2670 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2671 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2672 priv->dma_chan_width = pd->dma_chan_width;
2675 ret = bcm_enet_change_mtu(dev, dev->mtu);
2679 priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
2680 if (IS_ERR(priv->base)) {
2681 ret = PTR_ERR(priv->base);
2685 priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw");
2686 if (IS_ERR(priv->mac_clk)) {
2687 ret = PTR_ERR(priv->mac_clk);
2690 ret = clk_prepare_enable(priv->mac_clk);
2696 spin_lock_init(&priv->rx_lock);
2698 /* init rx timeout (used for oom) */
2699 timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0);
2701 /* register netdevice */
2702 dev->netdev_ops = &bcm_enetsw_ops;
2703 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2704 dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
2705 SET_NETDEV_DEV(dev, &pdev->dev);
2707 spin_lock_init(&priv->enetsw_mdio_lock);
2709 ret = register_netdev(dev);
2711 goto out_disable_clk;
2713 netif_carrier_off(dev);
2714 platform_set_drvdata(pdev, dev);
2716 priv->net_dev = dev;
2721 clk_disable_unprepare(priv->mac_clk);
2728 /* exit func, stops hardware and unregisters netdevice */
2729 static int bcm_enetsw_remove(struct platform_device *pdev)
2731 struct bcm_enet_priv *priv;
2732 struct net_device *dev;
2734 /* stop netdevice */
2735 dev = platform_get_drvdata(pdev);
2736 priv = netdev_priv(dev);
2737 unregister_netdev(dev);
2739 clk_disable_unprepare(priv->mac_clk);
2745 struct platform_driver bcm63xx_enetsw_driver = {
2746 .probe = bcm_enetsw_probe,
2747 .remove = bcm_enetsw_remove,
2749 .name = "bcm63xx_enetsw",
2750 .owner = THIS_MODULE,
2754 /* reserve & remap memory space shared between all macs */
2755 static int bcm_enet_shared_probe(struct platform_device *pdev)
2760 memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
2762 for (i = 0; i < 3; i++) {
2763 p[i] = devm_platform_ioremap_resource(pdev, i);
2765 return PTR_ERR(p[i]);
2768 memcpy(bcm_enet_shared_base, p, sizeof(bcm_enet_shared_base));
2773 static int bcm_enet_shared_remove(struct platform_device *pdev)
2778 /* this "shared" driver is needed because both macs share a single
2781 struct platform_driver bcm63xx_enet_shared_driver = {
2782 .probe = bcm_enet_shared_probe,
2783 .remove = bcm_enet_shared_remove,
2785 .name = "bcm63xx_enet_shared",
2786 .owner = THIS_MODULE,
2790 static struct platform_driver * const drivers[] = {
2791 &bcm63xx_enet_shared_driver,
2792 &bcm63xx_enet_driver,
2793 &bcm63xx_enetsw_driver,
2797 static int __init bcm_enet_init(void)
2799 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2802 static void __exit bcm_enet_exit(void)
2804 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2808 module_init(bcm_enet_init);
2809 module_exit(bcm_enet_exit);
2811 MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
2812 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
2813 MODULE_LICENSE("GPL");