1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* drivers/net/ethernet/freescale/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
13 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
16 * Gianfar: AKA Lambda Draconis, "Dragon"
24 * The driver is initialized through of_device. Configuration information
25 * is therefore conveyed through an OF-style device tree.
27 * The Gianfar Ethernet Controller uses a ring of buffer
28 * descriptors. The beginning is indicated by a register
29 * pointing to the physical address of the start of the ring.
30 * The end is determined by a "wrap" bit being set in the
31 * last descriptor of the ring.
33 * When a packet is received, the RXF bit in the
34 * IEVENT register is set, triggering an interrupt when the
35 * corresponding bit in the IMASK register is also set (if
36 * interrupt coalescing is active, then the interrupt may not
37 * happen immediately, but will wait until either a set number
38 * of frames or amount of time have passed). In NAPI, the
39 * interrupt handler will signal there is work to be done, and
40 * exit. This method will start at the last known empty
41 * descriptor, and process every subsequent descriptor until there
42 * are none left with data (NAPI will stop after a set number of
43 * packets to give time to other tasks, but will eventually
44 * process all the packets). The data arrives inside a
45 * pre-allocated skb, and so after the skb is passed up to the
46 * stack, a new skb must be allocated, and the address field in
47 * the buffer descriptor must be updated to indicate this new
50 * When the kernel requests that a packet be transmitted, the
51 * driver starts where it left off last time, and points the
52 * descriptor at the buffer which was passed in. The driver
53 * then informs the DMA engine that there are packets ready to
54 * be transmitted. Once the controller is finished transmitting
55 * the packet, an interrupt may be triggered (under the same
56 * conditions as for reception, but depending on the TXF bit).
57 * The driver then cleans up the buffer.
60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
62 #include <linux/kernel.h>
63 #include <linux/string.h>
64 #include <linux/errno.h>
65 #include <linux/unistd.h>
66 #include <linux/slab.h>
67 #include <linux/interrupt.h>
68 #include <linux/delay.h>
69 #include <linux/netdevice.h>
70 #include <linux/etherdevice.h>
71 #include <linux/skbuff.h>
72 #include <linux/if_vlan.h>
73 #include <linux/spinlock.h>
75 #include <linux/of_address.h>
76 #include <linux/of_irq.h>
77 #include <linux/of_mdio.h>
78 #include <linux/of_platform.h>
80 #include <linux/tcp.h>
81 #include <linux/udp.h>
83 #include <linux/net_tstamp.h>
88 #include <asm/mpc85xx.h>
91 #include <linux/uaccess.h>
92 #include <linux/module.h>
93 #include <linux/dma-mapping.h>
94 #include <linux/crc32.h>
95 #include <linux/mii.h>
96 #include <linux/phy.h>
97 #include <linux/phy_fixed.h>
99 #include <linux/of_net.h>
103 #define TX_TIMEOUT (5*HZ)
105 MODULE_AUTHOR("Freescale Semiconductor, Inc");
106 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
107 MODULE_LICENSE("GPL");
109 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
114 bdp->bufPtr = cpu_to_be32(buf);
116 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
117 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
118 lstatus |= BD_LFLAG(RXBD_WRAP);
122 bdp->lstatus = cpu_to_be32(lstatus);
125 static void gfar_init_tx_rx_base(struct gfar_private *priv)
127 struct gfar __iomem *regs = priv->gfargrp[0].regs;
131 baddr = ®s->tbase0;
132 for (i = 0; i < priv->num_tx_queues; i++) {
133 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
137 baddr = ®s->rbase0;
138 for (i = 0; i < priv->num_rx_queues; i++) {
139 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
144 static void gfar_init_rqprm(struct gfar_private *priv)
146 struct gfar __iomem *regs = priv->gfargrp[0].regs;
150 baddr = ®s->rqprm0;
151 for (i = 0; i < priv->num_rx_queues; i++) {
152 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
153 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
158 static void gfar_rx_offload_en(struct gfar_private *priv)
160 /* set this when rx hw offload (TOE) functions are being used */
161 priv->uses_rxfcb = 0;
163 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
164 priv->uses_rxfcb = 1;
166 if (priv->hwts_rx_en || priv->rx_filer_enable)
167 priv->uses_rxfcb = 1;
170 static void gfar_mac_rx_config(struct gfar_private *priv)
172 struct gfar __iomem *regs = priv->gfargrp[0].regs;
175 if (priv->rx_filer_enable) {
176 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
177 /* Program the RIR0 reg with the required distribution */
178 if (priv->poll_mode == GFAR_SQ_POLLING)
179 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
180 else /* GFAR_MQ_POLLING */
181 gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0);
184 /* Restore PROMISC mode */
185 if (priv->ndev->flags & IFF_PROMISC)
188 if (priv->ndev->features & NETIF_F_RXCSUM)
189 rctrl |= RCTRL_CHECKSUMMING;
191 if (priv->extended_hash)
192 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
195 rctrl &= ~RCTRL_PAL_MASK;
196 rctrl |= RCTRL_PADDING(priv->padding);
199 /* Enable HW time stamping if requested from user space */
200 if (priv->hwts_rx_en)
201 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
203 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
204 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
206 /* Clear the LFC bit */
207 gfar_write(®s->rctrl, rctrl);
208 /* Init flow control threshold values */
209 gfar_init_rqprm(priv);
210 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL);
213 /* Init rctrl based on our settings */
214 gfar_write(®s->rctrl, rctrl);
217 static void gfar_mac_tx_config(struct gfar_private *priv)
219 struct gfar __iomem *regs = priv->gfargrp[0].regs;
222 if (priv->ndev->features & NETIF_F_IP_CSUM)
223 tctrl |= TCTRL_INIT_CSUM;
225 if (priv->prio_sched_en)
226 tctrl |= TCTRL_TXSCHED_PRIO;
228 tctrl |= TCTRL_TXSCHED_WRRS;
229 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
230 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
233 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
234 tctrl |= TCTRL_VLINS;
236 gfar_write(®s->tctrl, tctrl);
239 static void gfar_configure_coalescing(struct gfar_private *priv,
240 unsigned long tx_mask, unsigned long rx_mask)
242 struct gfar __iomem *regs = priv->gfargrp[0].regs;
245 if (priv->mode == MQ_MG_MODE) {
248 baddr = ®s->txic0;
249 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
250 gfar_write(baddr + i, 0);
251 if (likely(priv->tx_queue[i]->txcoalescing))
252 gfar_write(baddr + i, priv->tx_queue[i]->txic);
255 baddr = ®s->rxic0;
256 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
257 gfar_write(baddr + i, 0);
258 if (likely(priv->rx_queue[i]->rxcoalescing))
259 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
262 /* Backward compatible case -- even if we enable
263 * multiple queues, there's only single reg to program
265 gfar_write(®s->txic, 0);
266 if (likely(priv->tx_queue[0]->txcoalescing))
267 gfar_write(®s->txic, priv->tx_queue[0]->txic);
269 gfar_write(®s->rxic, 0);
270 if (unlikely(priv->rx_queue[0]->rxcoalescing))
271 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
275 static void gfar_configure_coalescing_all(struct gfar_private *priv)
277 gfar_configure_coalescing(priv, 0xFF, 0xFF);
280 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
282 struct gfar_private *priv = netdev_priv(dev);
283 unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
284 unsigned long tx_packets = 0, tx_bytes = 0;
287 for (i = 0; i < priv->num_rx_queues; i++) {
288 rx_packets += priv->rx_queue[i]->stats.rx_packets;
289 rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
290 rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
293 dev->stats.rx_packets = rx_packets;
294 dev->stats.rx_bytes = rx_bytes;
295 dev->stats.rx_dropped = rx_dropped;
297 for (i = 0; i < priv->num_tx_queues; i++) {
298 tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
299 tx_packets += priv->tx_queue[i]->stats.tx_packets;
302 dev->stats.tx_bytes = tx_bytes;
303 dev->stats.tx_packets = tx_packets;
308 /* Set the appropriate hash bit for the given addr */
309 /* The algorithm works like so:
310 * 1) Take the Destination Address (ie the multicast address), and
311 * do a CRC on it (little endian), and reverse the bits of the
313 * 2) Use the 8 most significant bits as a hash into a 256-entry
314 * table. The table is controlled through 8 32-bit registers:
315 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
316 * gaddr7. This means that the 3 most significant bits in the
317 * hash index which gaddr register to use, and the 5 other bits
318 * indicate which bit (assuming an IBM numbering scheme, which
319 * for PowerPC (tm) is usually the case) in the register holds
322 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
325 struct gfar_private *priv = netdev_priv(dev);
326 u32 result = ether_crc(ETH_ALEN, addr);
327 int width = priv->hash_width;
328 u8 whichbit = (result >> (32 - width)) & 0x1f;
329 u8 whichreg = result >> (32 - width + 5);
330 u32 value = (1 << (31-whichbit));
332 tempval = gfar_read(priv->hash_regs[whichreg]);
334 gfar_write(priv->hash_regs[whichreg], tempval);
337 /* There are multiple MAC Address register pairs on some controllers
338 * This function sets the numth pair to a given address
340 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
343 struct gfar_private *priv = netdev_priv(dev);
344 struct gfar __iomem *regs = priv->gfargrp[0].regs;
346 u32 __iomem *macptr = ®s->macstnaddr1;
350 /* For a station address of 0x12345678ABCD in transmission
351 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
352 * MACnADDR2 is set to 0x34120000.
354 tempval = (addr[5] << 24) | (addr[4] << 16) |
355 (addr[3] << 8) | addr[2];
357 gfar_write(macptr, tempval);
359 tempval = (addr[1] << 24) | (addr[0] << 16);
361 gfar_write(macptr+1, tempval);
364 static int gfar_set_mac_addr(struct net_device *dev, void *p)
368 ret = eth_mac_addr(dev, p);
372 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
377 static void gfar_ints_disable(struct gfar_private *priv)
380 for (i = 0; i < priv->num_grps; i++) {
381 struct gfar __iomem *regs = priv->gfargrp[i].regs;
383 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
385 /* Initialize IMASK */
386 gfar_write(®s->imask, IMASK_INIT_CLEAR);
390 static void gfar_ints_enable(struct gfar_private *priv)
393 for (i = 0; i < priv->num_grps; i++) {
394 struct gfar __iomem *regs = priv->gfargrp[i].regs;
395 /* Unmask the interrupts we look for */
396 gfar_write(®s->imask, IMASK_DEFAULT);
400 static int gfar_alloc_tx_queues(struct gfar_private *priv)
404 for (i = 0; i < priv->num_tx_queues; i++) {
405 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
407 if (!priv->tx_queue[i])
410 priv->tx_queue[i]->tx_skbuff = NULL;
411 priv->tx_queue[i]->qindex = i;
412 priv->tx_queue[i]->dev = priv->ndev;
413 spin_lock_init(&(priv->tx_queue[i]->txlock));
418 static int gfar_alloc_rx_queues(struct gfar_private *priv)
422 for (i = 0; i < priv->num_rx_queues; i++) {
423 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
425 if (!priv->rx_queue[i])
428 priv->rx_queue[i]->qindex = i;
429 priv->rx_queue[i]->ndev = priv->ndev;
434 static void gfar_free_tx_queues(struct gfar_private *priv)
438 for (i = 0; i < priv->num_tx_queues; i++)
439 kfree(priv->tx_queue[i]);
442 static void gfar_free_rx_queues(struct gfar_private *priv)
446 for (i = 0; i < priv->num_rx_queues; i++)
447 kfree(priv->rx_queue[i]);
450 static void unmap_group_regs(struct gfar_private *priv)
454 for (i = 0; i < MAXGROUPS; i++)
455 if (priv->gfargrp[i].regs)
456 iounmap(priv->gfargrp[i].regs);
459 static void free_gfar_dev(struct gfar_private *priv)
463 for (i = 0; i < priv->num_grps; i++)
464 for (j = 0; j < GFAR_NUM_IRQS; j++) {
465 kfree(priv->gfargrp[i].irqinfo[j]);
466 priv->gfargrp[i].irqinfo[j] = NULL;
469 free_netdev(priv->ndev);
472 static void disable_napi(struct gfar_private *priv)
476 for (i = 0; i < priv->num_grps; i++) {
477 napi_disable(&priv->gfargrp[i].napi_rx);
478 napi_disable(&priv->gfargrp[i].napi_tx);
482 static void enable_napi(struct gfar_private *priv)
486 for (i = 0; i < priv->num_grps; i++) {
487 napi_enable(&priv->gfargrp[i].napi_rx);
488 napi_enable(&priv->gfargrp[i].napi_tx);
492 static int gfar_parse_group(struct device_node *np,
493 struct gfar_private *priv, const char *model)
495 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
498 for (i = 0; i < GFAR_NUM_IRQS; i++) {
499 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
501 if (!grp->irqinfo[i])
505 grp->regs = of_iomap(np, 0);
509 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
511 /* If we aren't the FEC we have multiple interrupts */
512 if (model && strcasecmp(model, "FEC")) {
513 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
514 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
515 if (!gfar_irq(grp, TX)->irq ||
516 !gfar_irq(grp, RX)->irq ||
517 !gfar_irq(grp, ER)->irq)
522 spin_lock_init(&grp->grplock);
523 if (priv->mode == MQ_MG_MODE) {
524 u32 rxq_mask, txq_mask;
527 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
528 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
530 ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask);
532 grp->rx_bit_map = rxq_mask ?
533 rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
536 ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask);
538 grp->tx_bit_map = txq_mask ?
539 txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
542 if (priv->poll_mode == GFAR_SQ_POLLING) {
543 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
544 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
545 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
548 grp->rx_bit_map = 0xFF;
549 grp->tx_bit_map = 0xFF;
552 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
553 * right to left, so we need to revert the 8 bits to get the q index
555 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
556 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
558 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
559 * also assign queues to groups
561 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
563 grp->rx_queue = priv->rx_queue[i];
564 grp->num_rx_queues++;
565 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
566 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
567 priv->rx_queue[i]->grp = grp;
570 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
572 grp->tx_queue = priv->tx_queue[i];
573 grp->num_tx_queues++;
574 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
575 priv->tqueue |= (TQUEUE_EN0 >> i);
576 priv->tx_queue[i]->grp = grp;
584 static int gfar_of_group_count(struct device_node *np)
586 struct device_node *child;
589 for_each_available_child_of_node(np, child)
590 if (of_node_name_eq(child, "queue-group"))
596 /* Reads the controller's registers to determine what interface
597 * connects it to the PHY.
599 static phy_interface_t gfar_get_interface(struct net_device *dev)
601 struct gfar_private *priv = netdev_priv(dev);
602 struct gfar __iomem *regs = priv->gfargrp[0].regs;
605 ecntrl = gfar_read(®s->ecntrl);
607 if (ecntrl & ECNTRL_SGMII_MODE)
608 return PHY_INTERFACE_MODE_SGMII;
610 if (ecntrl & ECNTRL_TBI_MODE) {
611 if (ecntrl & ECNTRL_REDUCED_MODE)
612 return PHY_INTERFACE_MODE_RTBI;
614 return PHY_INTERFACE_MODE_TBI;
617 if (ecntrl & ECNTRL_REDUCED_MODE) {
618 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
619 return PHY_INTERFACE_MODE_RMII;
622 phy_interface_t interface = priv->interface;
624 /* This isn't autodetected right now, so it must
625 * be set by the device tree or platform code.
627 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
628 return PHY_INTERFACE_MODE_RGMII_ID;
630 return PHY_INTERFACE_MODE_RGMII;
634 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
635 return PHY_INTERFACE_MODE_GMII;
637 return PHY_INTERFACE_MODE_MII;
640 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
643 const void *mac_addr;
645 phy_interface_t interface;
646 struct net_device *dev = NULL;
647 struct gfar_private *priv = NULL;
648 struct device_node *np = ofdev->dev.of_node;
649 struct device_node *child = NULL;
652 unsigned int num_tx_qs, num_rx_qs;
653 unsigned short mode, poll_mode;
658 if (of_device_is_compatible(np, "fsl,etsec2")) {
660 poll_mode = GFAR_SQ_POLLING;
663 poll_mode = GFAR_SQ_POLLING;
666 if (mode == SQ_SG_MODE) {
669 } else { /* MQ_MG_MODE */
670 /* get the actual number of supported groups */
671 unsigned int num_grps = gfar_of_group_count(np);
673 if (num_grps == 0 || num_grps > MAXGROUPS) {
674 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
676 pr_err("Cannot do alloc_etherdev, aborting\n");
680 if (poll_mode == GFAR_SQ_POLLING) {
681 num_tx_qs = num_grps; /* one txq per int group */
682 num_rx_qs = num_grps; /* one rxq per int group */
683 } else { /* GFAR_MQ_POLLING */
684 u32 tx_queues, rx_queues;
687 /* parse the num of HW tx and rx queues */
688 ret = of_property_read_u32(np, "fsl,num_tx_queues",
690 num_tx_qs = ret ? 1 : tx_queues;
692 ret = of_property_read_u32(np, "fsl,num_rx_queues",
694 num_rx_qs = ret ? 1 : rx_queues;
698 if (num_tx_qs > MAX_TX_QS) {
699 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
700 num_tx_qs, MAX_TX_QS);
701 pr_err("Cannot do alloc_etherdev, aborting\n");
705 if (num_rx_qs > MAX_RX_QS) {
706 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
707 num_rx_qs, MAX_RX_QS);
708 pr_err("Cannot do alloc_etherdev, aborting\n");
712 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
717 priv = netdev_priv(dev);
721 priv->poll_mode = poll_mode;
723 priv->num_tx_queues = num_tx_qs;
724 netif_set_real_num_rx_queues(dev, num_rx_qs);
725 priv->num_rx_queues = num_rx_qs;
727 err = gfar_alloc_tx_queues(priv);
729 goto tx_alloc_failed;
731 err = gfar_alloc_rx_queues(priv);
733 goto rx_alloc_failed;
735 err = of_property_read_string(np, "model", &model);
737 pr_err("Device model property missing, aborting\n");
738 goto rx_alloc_failed;
741 /* Init Rx queue filer rule set linked list */
742 INIT_LIST_HEAD(&priv->rx_list.list);
743 priv->rx_list.count = 0;
744 mutex_init(&priv->rx_queue_access);
746 for (i = 0; i < MAXGROUPS; i++)
747 priv->gfargrp[i].regs = NULL;
749 /* Parse and initialize group specific information */
750 if (priv->mode == MQ_MG_MODE) {
751 for_each_available_child_of_node(np, child) {
752 if (!of_node_name_eq(child, "queue-group"))
755 err = gfar_parse_group(child, priv, model);
761 } else { /* SQ_SG_MODE */
762 err = gfar_parse_group(np, priv, model);
767 if (of_property_read_bool(np, "bd-stash")) {
768 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
769 priv->bd_stash_en = 1;
772 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
775 priv->rx_stash_size = stash_len;
777 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
780 priv->rx_stash_index = stash_idx;
782 if (stash_len || stash_idx)
783 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
785 mac_addr = of_get_mac_address(np);
787 if (!IS_ERR(mac_addr)) {
788 ether_addr_copy(dev->dev_addr, mac_addr);
790 eth_hw_addr_random(dev);
791 dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
794 if (model && !strcasecmp(model, "TSEC"))
795 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
796 FSL_GIANFAR_DEV_HAS_COALESCE |
797 FSL_GIANFAR_DEV_HAS_RMON |
798 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
800 if (model && !strcasecmp(model, "eTSEC"))
801 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
802 FSL_GIANFAR_DEV_HAS_COALESCE |
803 FSL_GIANFAR_DEV_HAS_RMON |
804 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
805 FSL_GIANFAR_DEV_HAS_CSUM |
806 FSL_GIANFAR_DEV_HAS_VLAN |
807 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
808 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
809 FSL_GIANFAR_DEV_HAS_TIMER |
810 FSL_GIANFAR_DEV_HAS_RX_FILER;
812 /* Use PHY connection type from the DT node if one is specified there.
813 * rgmii-id really needs to be specified. Other types can be
814 * detected by hardware
816 err = of_get_phy_mode(np, &interface);
818 priv->interface = interface;
820 priv->interface = gfar_get_interface(dev);
822 if (of_find_property(np, "fsl,magic-packet", NULL))
823 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
825 if (of_get_property(np, "fsl,wake-on-filer", NULL))
826 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
828 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
830 /* In the case of a fixed PHY, the DT node associated
831 * to the PHY is the Ethernet MAC DT node.
833 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
834 err = of_phy_register_fixed_link(np);
838 priv->phy_node = of_node_get(np);
841 /* Find the TBI PHY. If it's not there, we don't support SGMII */
842 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
847 unmap_group_regs(priv);
849 gfar_free_rx_queues(priv);
851 gfar_free_tx_queues(priv);
856 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
859 u32 rqfpr = FPR_FILER_MASK;
863 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
864 priv->ftp_rqfpr[rqfar] = rqfpr;
865 priv->ftp_rqfcr[rqfar] = rqfcr;
866 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
869 rqfcr = RQFCR_CMP_NOMATCH;
870 priv->ftp_rqfpr[rqfar] = rqfpr;
871 priv->ftp_rqfcr[rqfar] = rqfcr;
872 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
875 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
877 priv->ftp_rqfcr[rqfar] = rqfcr;
878 priv->ftp_rqfpr[rqfar] = rqfpr;
879 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
882 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
884 priv->ftp_rqfcr[rqfar] = rqfcr;
885 priv->ftp_rqfpr[rqfar] = rqfpr;
886 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
891 static void gfar_init_filer_table(struct gfar_private *priv)
894 u32 rqfar = MAX_FILER_IDX;
896 u32 rqfpr = FPR_FILER_MASK;
899 rqfcr = RQFCR_CMP_MATCH;
900 priv->ftp_rqfcr[rqfar] = rqfcr;
901 priv->ftp_rqfpr[rqfar] = rqfpr;
902 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
904 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
905 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
906 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
907 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
908 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
909 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
911 /* cur_filer_idx indicated the first non-masked rule */
912 priv->cur_filer_idx = rqfar;
914 /* Rest are masked rules */
915 rqfcr = RQFCR_CMP_NOMATCH;
916 for (i = 0; i < rqfar; i++) {
917 priv->ftp_rqfcr[i] = rqfcr;
918 priv->ftp_rqfpr[i] = rqfpr;
919 gfar_write_filer(priv, i, rqfcr, rqfpr);
924 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
926 unsigned int pvr = mfspr(SPRN_PVR);
927 unsigned int svr = mfspr(SPRN_SVR);
928 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
929 unsigned int rev = svr & 0xffff;
931 /* MPC8313 Rev 2.0 and higher; All MPC837x */
932 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
933 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
934 priv->errata |= GFAR_ERRATA_74;
936 /* MPC8313 and MPC837x all rev */
937 if ((pvr == 0x80850010 && mod == 0x80b0) ||
938 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
939 priv->errata |= GFAR_ERRATA_76;
941 /* MPC8313 Rev < 2.0 */
942 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
943 priv->errata |= GFAR_ERRATA_12;
946 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
948 unsigned int svr = mfspr(SPRN_SVR);
950 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
951 priv->errata |= GFAR_ERRATA_12;
952 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
953 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
954 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
955 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
956 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
960 static void gfar_detect_errata(struct gfar_private *priv)
962 struct device *dev = &priv->ofdev->dev;
964 /* no plans to fix */
965 priv->errata |= GFAR_ERRATA_A002;
968 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
969 __gfar_detect_errata_85xx(priv);
970 else /* non-mpc85xx parts, i.e. e300 core based */
971 __gfar_detect_errata_83xx(priv);
975 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
979 static void gfar_init_addr_hash_table(struct gfar_private *priv)
981 struct gfar __iomem *regs = priv->gfargrp[0].regs;
983 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
984 priv->extended_hash = 1;
985 priv->hash_width = 9;
987 priv->hash_regs[0] = ®s->igaddr0;
988 priv->hash_regs[1] = ®s->igaddr1;
989 priv->hash_regs[2] = ®s->igaddr2;
990 priv->hash_regs[3] = ®s->igaddr3;
991 priv->hash_regs[4] = ®s->igaddr4;
992 priv->hash_regs[5] = ®s->igaddr5;
993 priv->hash_regs[6] = ®s->igaddr6;
994 priv->hash_regs[7] = ®s->igaddr7;
995 priv->hash_regs[8] = ®s->gaddr0;
996 priv->hash_regs[9] = ®s->gaddr1;
997 priv->hash_regs[10] = ®s->gaddr2;
998 priv->hash_regs[11] = ®s->gaddr3;
999 priv->hash_regs[12] = ®s->gaddr4;
1000 priv->hash_regs[13] = ®s->gaddr5;
1001 priv->hash_regs[14] = ®s->gaddr6;
1002 priv->hash_regs[15] = ®s->gaddr7;
1005 priv->extended_hash = 0;
1006 priv->hash_width = 8;
1008 priv->hash_regs[0] = ®s->gaddr0;
1009 priv->hash_regs[1] = ®s->gaddr1;
1010 priv->hash_regs[2] = ®s->gaddr2;
1011 priv->hash_regs[3] = ®s->gaddr3;
1012 priv->hash_regs[4] = ®s->gaddr4;
1013 priv->hash_regs[5] = ®s->gaddr5;
1014 priv->hash_regs[6] = ®s->gaddr6;
1015 priv->hash_regs[7] = ®s->gaddr7;
1019 static int __gfar_is_rx_idle(struct gfar_private *priv)
1023 /* Normaly TSEC should not hang on GRS commands, so we should
1024 * actually wait for IEVENT_GRSC flag.
1026 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
1029 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1030 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1031 * and the Rx can be safely reset.
1033 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1035 if ((res & 0xffff) == (res >> 16))
1041 /* Halt the receive and transmit queues */
1042 static void gfar_halt_nodisable(struct gfar_private *priv)
1044 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1046 unsigned int timeout;
1049 gfar_ints_disable(priv);
1051 if (gfar_is_dma_stopped(priv))
1054 /* Stop the DMA, and wait for it to stop */
1055 tempval = gfar_read(®s->dmactrl);
1056 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1057 gfar_write(®s->dmactrl, tempval);
1061 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1067 stopped = gfar_is_dma_stopped(priv);
1069 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1070 !__gfar_is_rx_idle(priv))
1074 /* Halt the receive and transmit queues */
1075 static void gfar_halt(struct gfar_private *priv)
1077 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1080 /* Dissable the Rx/Tx hw queues */
1081 gfar_write(®s->rqueue, 0);
1082 gfar_write(®s->tqueue, 0);
1086 gfar_halt_nodisable(priv);
1088 /* Disable Rx/Tx DMA */
1089 tempval = gfar_read(®s->maccfg1);
1090 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1091 gfar_write(®s->maccfg1, tempval);
1094 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1096 struct txbd8 *txbdp;
1097 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1100 txbdp = tx_queue->tx_bd_base;
1102 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1103 if (!tx_queue->tx_skbuff[i])
1106 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1107 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1109 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1112 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1113 be16_to_cpu(txbdp->length),
1117 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1118 tx_queue->tx_skbuff[i] = NULL;
1120 kfree(tx_queue->tx_skbuff);
1121 tx_queue->tx_skbuff = NULL;
1124 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1128 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1130 dev_kfree_skb(rx_queue->skb);
1132 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1133 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1142 dma_unmap_page(rx_queue->dev, rxb->dma,
1143 PAGE_SIZE, DMA_FROM_DEVICE);
1144 __free_page(rxb->page);
1149 kfree(rx_queue->rx_buff);
1150 rx_queue->rx_buff = NULL;
1153 /* If there are any tx skbs or rx skbs still around, free them.
1154 * Then free tx_skbuff and rx_skbuff
1156 static void free_skb_resources(struct gfar_private *priv)
1158 struct gfar_priv_tx_q *tx_queue = NULL;
1159 struct gfar_priv_rx_q *rx_queue = NULL;
1162 /* Go through all the buffer descriptors and free their data buffers */
1163 for (i = 0; i < priv->num_tx_queues; i++) {
1164 struct netdev_queue *txq;
1166 tx_queue = priv->tx_queue[i];
1167 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1168 if (tx_queue->tx_skbuff)
1169 free_skb_tx_queue(tx_queue);
1170 netdev_tx_reset_queue(txq);
1173 for (i = 0; i < priv->num_rx_queues; i++) {
1174 rx_queue = priv->rx_queue[i];
1175 if (rx_queue->rx_buff)
1176 free_skb_rx_queue(rx_queue);
1179 dma_free_coherent(priv->dev,
1180 sizeof(struct txbd8) * priv->total_tx_ring_size +
1181 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1182 priv->tx_queue[0]->tx_bd_base,
1183 priv->tx_queue[0]->tx_bd_dma_base);
1186 void stop_gfar(struct net_device *dev)
1188 struct gfar_private *priv = netdev_priv(dev);
1190 netif_tx_stop_all_queues(dev);
1192 smp_mb__before_atomic();
1193 set_bit(GFAR_DOWN, &priv->state);
1194 smp_mb__after_atomic();
1198 /* disable ints and gracefully shut down Rx/Tx DMA */
1201 phy_stop(dev->phydev);
1203 free_skb_resources(priv);
1206 static void gfar_start(struct gfar_private *priv)
1208 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1212 /* Enable Rx/Tx hw queues */
1213 gfar_write(®s->rqueue, priv->rqueue);
1214 gfar_write(®s->tqueue, priv->tqueue);
1216 /* Initialize DMACTRL to have WWR and WOP */
1217 tempval = gfar_read(®s->dmactrl);
1218 tempval |= DMACTRL_INIT_SETTINGS;
1219 gfar_write(®s->dmactrl, tempval);
1221 /* Make sure we aren't stopped */
1222 tempval = gfar_read(®s->dmactrl);
1223 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1224 gfar_write(®s->dmactrl, tempval);
1226 for (i = 0; i < priv->num_grps; i++) {
1227 regs = priv->gfargrp[i].regs;
1228 /* Clear THLT/RHLT, so that the DMA starts polling now */
1229 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1230 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1233 /* Enable Rx/Tx DMA */
1234 tempval = gfar_read(®s->maccfg1);
1235 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1236 gfar_write(®s->maccfg1, tempval);
1238 gfar_ints_enable(priv);
1240 netif_trans_update(priv->ndev); /* prevent tx timeout */
1243 static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
1248 page = dev_alloc_page();
1249 if (unlikely(!page))
1252 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1253 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
1261 rxb->page_offset = 0;
1266 static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1268 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
1269 struct gfar_extra_stats *estats = &priv->extra_stats;
1271 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
1272 atomic64_inc(&estats->rx_alloc_err);
1275 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
1279 struct gfar_rx_buff *rxb;
1282 i = rx_queue->next_to_use;
1283 bdp = &rx_queue->rx_bd_base[i];
1284 rxb = &rx_queue->rx_buff[i];
1286 while (alloc_cnt--) {
1287 /* try reuse page */
1288 if (unlikely(!rxb->page)) {
1289 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
1290 gfar_rx_alloc_err(rx_queue);
1295 /* Setup the new RxBD */
1296 gfar_init_rxbdp(rx_queue, bdp,
1297 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
1299 /* Update to the next pointer */
1303 if (unlikely(++i == rx_queue->rx_ring_size)) {
1305 bdp = rx_queue->rx_bd_base;
1306 rxb = rx_queue->rx_buff;
1310 rx_queue->next_to_use = i;
1311 rx_queue->next_to_alloc = i;
1314 static void gfar_init_bds(struct net_device *ndev)
1316 struct gfar_private *priv = netdev_priv(ndev);
1317 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1318 struct gfar_priv_tx_q *tx_queue = NULL;
1319 struct gfar_priv_rx_q *rx_queue = NULL;
1320 struct txbd8 *txbdp;
1321 u32 __iomem *rfbptr;
1324 for (i = 0; i < priv->num_tx_queues; i++) {
1325 tx_queue = priv->tx_queue[i];
1326 /* Initialize some variables in our dev structure */
1327 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
1328 tx_queue->dirty_tx = tx_queue->tx_bd_base;
1329 tx_queue->cur_tx = tx_queue->tx_bd_base;
1330 tx_queue->skb_curtx = 0;
1331 tx_queue->skb_dirtytx = 0;
1333 /* Initialize Transmit Descriptor Ring */
1334 txbdp = tx_queue->tx_bd_base;
1335 for (j = 0; j < tx_queue->tx_ring_size; j++) {
1341 /* Set the last descriptor in the ring to indicate wrap */
1343 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
1347 rfbptr = ®s->rfbptr0;
1348 for (i = 0; i < priv->num_rx_queues; i++) {
1349 rx_queue = priv->rx_queue[i];
1351 rx_queue->next_to_clean = 0;
1352 rx_queue->next_to_use = 0;
1353 rx_queue->next_to_alloc = 0;
1355 /* make sure next_to_clean != next_to_use after this
1356 * by leaving at least 1 unused descriptor
1358 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
1360 rx_queue->rfbptr = rfbptr;
1365 static int gfar_alloc_skb_resources(struct net_device *ndev)
1370 struct gfar_private *priv = netdev_priv(ndev);
1371 struct device *dev = priv->dev;
1372 struct gfar_priv_tx_q *tx_queue = NULL;
1373 struct gfar_priv_rx_q *rx_queue = NULL;
1375 priv->total_tx_ring_size = 0;
1376 for (i = 0; i < priv->num_tx_queues; i++)
1377 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
1379 priv->total_rx_ring_size = 0;
1380 for (i = 0; i < priv->num_rx_queues; i++)
1381 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
1383 /* Allocate memory for the buffer descriptors */
1384 vaddr = dma_alloc_coherent(dev,
1385 (priv->total_tx_ring_size *
1386 sizeof(struct txbd8)) +
1387 (priv->total_rx_ring_size *
1388 sizeof(struct rxbd8)),
1393 for (i = 0; i < priv->num_tx_queues; i++) {
1394 tx_queue = priv->tx_queue[i];
1395 tx_queue->tx_bd_base = vaddr;
1396 tx_queue->tx_bd_dma_base = addr;
1397 tx_queue->dev = ndev;
1398 /* enet DMA only understands physical addresses */
1399 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1400 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1403 /* Start the rx descriptor ring where the tx ring leaves off */
1404 for (i = 0; i < priv->num_rx_queues; i++) {
1405 rx_queue = priv->rx_queue[i];
1406 rx_queue->rx_bd_base = vaddr;
1407 rx_queue->rx_bd_dma_base = addr;
1408 rx_queue->ndev = ndev;
1409 rx_queue->dev = dev;
1410 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1411 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1414 /* Setup the skbuff rings */
1415 for (i = 0; i < priv->num_tx_queues; i++) {
1416 tx_queue = priv->tx_queue[i];
1417 tx_queue->tx_skbuff =
1418 kmalloc_array(tx_queue->tx_ring_size,
1419 sizeof(*tx_queue->tx_skbuff),
1421 if (!tx_queue->tx_skbuff)
1424 for (j = 0; j < tx_queue->tx_ring_size; j++)
1425 tx_queue->tx_skbuff[j] = NULL;
1428 for (i = 0; i < priv->num_rx_queues; i++) {
1429 rx_queue = priv->rx_queue[i];
1430 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
1431 sizeof(*rx_queue->rx_buff),
1433 if (!rx_queue->rx_buff)
1437 gfar_init_bds(ndev);
1442 free_skb_resources(priv);
1446 /* Bring the controller up and running */
1447 int startup_gfar(struct net_device *ndev)
1449 struct gfar_private *priv = netdev_priv(ndev);
1452 gfar_mac_reset(priv);
1454 err = gfar_alloc_skb_resources(ndev);
1458 gfar_init_tx_rx_base(priv);
1460 smp_mb__before_atomic();
1461 clear_bit(GFAR_DOWN, &priv->state);
1462 smp_mb__after_atomic();
1464 /* Start Rx/Tx DMA and enable the interrupts */
1467 /* force link state update after mac reset */
1470 priv->oldduplex = -1;
1472 phy_start(ndev->phydev);
1476 netif_tx_wake_all_queues(ndev);
1481 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
1483 struct net_device *ndev = priv->ndev;
1484 struct phy_device *phydev = ndev->phydev;
1487 if (!phydev->duplex)
1490 if (!priv->pause_aneg_en) {
1491 if (priv->tx_pause_en)
1492 val |= MACCFG1_TX_FLOW;
1493 if (priv->rx_pause_en)
1494 val |= MACCFG1_RX_FLOW;
1496 u16 lcl_adv, rmt_adv;
1498 /* get link partner capabilities */
1501 rmt_adv = LPA_PAUSE_CAP;
1502 if (phydev->asym_pause)
1503 rmt_adv |= LPA_PAUSE_ASYM;
1505 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1506 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1507 if (flowctrl & FLOW_CTRL_TX)
1508 val |= MACCFG1_TX_FLOW;
1509 if (flowctrl & FLOW_CTRL_RX)
1510 val |= MACCFG1_RX_FLOW;
1516 static noinline void gfar_update_link_state(struct gfar_private *priv)
1518 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1519 struct net_device *ndev = priv->ndev;
1520 struct phy_device *phydev = ndev->phydev;
1521 struct gfar_priv_rx_q *rx_queue = NULL;
1524 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
1528 u32 tempval1 = gfar_read(®s->maccfg1);
1529 u32 tempval = gfar_read(®s->maccfg2);
1530 u32 ecntrl = gfar_read(®s->ecntrl);
1531 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
1533 if (phydev->duplex != priv->oldduplex) {
1534 if (!(phydev->duplex))
1535 tempval &= ~(MACCFG2_FULL_DUPLEX);
1537 tempval |= MACCFG2_FULL_DUPLEX;
1539 priv->oldduplex = phydev->duplex;
1542 if (phydev->speed != priv->oldspeed) {
1543 switch (phydev->speed) {
1546 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1548 ecntrl &= ~(ECNTRL_R100);
1553 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1555 /* Reduced mode distinguishes
1556 * between 10 and 100
1558 if (phydev->speed == SPEED_100)
1559 ecntrl |= ECNTRL_R100;
1561 ecntrl &= ~(ECNTRL_R100);
1564 netif_warn(priv, link, priv->ndev,
1565 "Ack! Speed (%d) is not 10/100/1000!\n",
1570 priv->oldspeed = phydev->speed;
1573 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1574 tempval1 |= gfar_get_flowctrl_cfg(priv);
1576 /* Turn last free buffer recording on */
1577 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
1578 for (i = 0; i < priv->num_rx_queues; i++) {
1581 rx_queue = priv->rx_queue[i];
1582 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
1583 gfar_write(rx_queue->rfbptr, bdp_dma);
1586 priv->tx_actual_en = 1;
1589 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
1590 priv->tx_actual_en = 0;
1592 gfar_write(®s->maccfg1, tempval1);
1593 gfar_write(®s->maccfg2, tempval);
1594 gfar_write(®s->ecntrl, ecntrl);
1599 } else if (priv->oldlink) {
1602 priv->oldduplex = -1;
1605 if (netif_msg_link(priv))
1606 phy_print_status(phydev);
1609 /* Called every time the controller might need to be made
1610 * aware of new link state. The PHY code conveys this
1611 * information through variables in the phydev structure, and this
1612 * function converts those variables into the appropriate
1613 * register values, and can bring down the device if needed.
1615 static void adjust_link(struct net_device *dev)
1617 struct gfar_private *priv = netdev_priv(dev);
1618 struct phy_device *phydev = dev->phydev;
1620 if (unlikely(phydev->link != priv->oldlink ||
1621 (phydev->link && (phydev->duplex != priv->oldduplex ||
1622 phydev->speed != priv->oldspeed))))
1623 gfar_update_link_state(priv);
1626 /* Initialize TBI PHY interface for communicating with the
1627 * SERDES lynx PHY on the chip. We communicate with this PHY
1628 * through the MDIO bus on each controller, treating it as a
1629 * "normal" PHY at the address found in the TBIPA register. We assume
1630 * that the TBIPA register is valid. Either the MDIO bus code will set
1631 * it to a value that doesn't conflict with other PHYs on the bus, or the
1632 * value doesn't matter, as there are no other PHYs on the bus.
1634 static void gfar_configure_serdes(struct net_device *dev)
1636 struct gfar_private *priv = netdev_priv(dev);
1637 struct phy_device *tbiphy;
1639 if (!priv->tbi_node) {
1640 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1641 "device tree specify a tbi-handle\n");
1645 tbiphy = of_phy_find_device(priv->tbi_node);
1647 dev_err(&dev->dev, "error: Could not get TBI device\n");
1651 /* If the link is already up, we must already be ok, and don't need to
1652 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1653 * everything for us? Resetting it takes the link down and requires
1654 * several seconds for it to come back.
1656 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1657 put_device(&tbiphy->mdio.dev);
1661 /* Single clk mode, mii mode off(for serdes communication) */
1662 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1664 phy_write(tbiphy, MII_ADVERTISE,
1665 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1666 ADVERTISE_1000XPSE_ASYM);
1668 phy_write(tbiphy, MII_BMCR,
1669 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1672 put_device(&tbiphy->mdio.dev);
1675 /* Initializes driver's PHY state, and attaches to the PHY.
1676 * Returns 0 on success.
1678 static int init_phy(struct net_device *dev)
1680 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1681 struct gfar_private *priv = netdev_priv(dev);
1682 phy_interface_t interface = priv->interface;
1683 struct phy_device *phydev;
1684 struct ethtool_eee edata;
1686 linkmode_set_bit_array(phy_10_100_features_array,
1687 ARRAY_SIZE(phy_10_100_features_array),
1689 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
1690 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
1691 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
1692 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
1696 priv->oldduplex = -1;
1698 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1701 dev_err(&dev->dev, "could not attach to PHY\n");
1705 if (interface == PHY_INTERFACE_MODE_SGMII)
1706 gfar_configure_serdes(dev);
1708 /* Remove any features not supported by the controller */
1709 linkmode_and(phydev->supported, phydev->supported, mask);
1710 linkmode_copy(phydev->advertising, phydev->supported);
1712 /* Add support for flow control */
1713 phy_support_asym_pause(phydev);
1715 /* disable EEE autoneg, EEE not supported by eTSEC */
1716 memset(&edata, 0, sizeof(struct ethtool_eee));
1717 phy_ethtool_set_eee(phydev, &edata);
1722 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1724 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1726 memset(fcb, 0, GMAC_FCB_LEN);
1731 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1734 /* If we're here, it's a IP packet with a TCP or UDP
1735 * payload. We set it to checksum, using a pseudo-header
1738 u8 flags = TXFCB_DEFAULT;
1740 /* Tell the controller what the protocol is
1741 * And provide the already calculated phcs
1743 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1745 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1747 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1749 /* l3os is the distance between the start of the
1750 * frame (skb->data) and the start of the IP hdr.
1751 * l4os is the distance between the start of the
1752 * l3 hdr and the l4 hdr
1754 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1755 fcb->l4os = skb_network_header_len(skb);
1760 static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1762 fcb->flags |= TXFCB_VLN;
1763 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1766 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1767 struct txbd8 *base, int ring_size)
1769 struct txbd8 *new_bd = bdp + stride;
1771 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1774 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1777 return skip_txbd(bdp, 1, base, ring_size);
1780 /* eTSEC12: csum generation not supported for some fcb offsets */
1781 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
1782 unsigned long fcb_addr)
1784 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
1785 (fcb_addr % 0x20) > 0x18);
1788 /* eTSEC76: csum generation for frames larger than 2500 may
1789 * cause excess delays before start of transmission
1791 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
1794 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
1798 /* This is called by the kernel when a frame is ready for transmission.
1799 * It is pointed to by the dev->hard_start_xmit function pointer
1801 static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1803 struct gfar_private *priv = netdev_priv(dev);
1804 struct gfar_priv_tx_q *tx_queue = NULL;
1805 struct netdev_queue *txq;
1806 struct gfar __iomem *regs = NULL;
1807 struct txfcb *fcb = NULL;
1808 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1812 int do_tstamp, do_csum, do_vlan;
1814 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1816 rq = skb->queue_mapping;
1817 tx_queue = priv->tx_queue[rq];
1818 txq = netdev_get_tx_queue(dev, rq);
1819 base = tx_queue->tx_bd_base;
1820 regs = tx_queue->grp->regs;
1822 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1823 do_vlan = skb_vlan_tag_present(skb);
1824 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1827 if (do_csum || do_vlan)
1828 fcb_len = GMAC_FCB_LEN;
1830 /* check if time stamp should be generated */
1831 if (unlikely(do_tstamp))
1832 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1834 /* make space for additional header when fcb is needed */
1836 if (unlikely(skb_cow_head(skb, fcb_len))) {
1837 dev->stats.tx_errors++;
1838 dev_kfree_skb_any(skb);
1839 return NETDEV_TX_OK;
1843 /* total number of fragments in the SKB */
1844 nr_frags = skb_shinfo(skb)->nr_frags;
1846 /* calculate the required number of TxBDs for this skb */
1847 if (unlikely(do_tstamp))
1848 nr_txbds = nr_frags + 2;
1850 nr_txbds = nr_frags + 1;
1852 /* check if there is space to queue this packet */
1853 if (nr_txbds > tx_queue->num_txbdfree) {
1854 /* no space, stop the queue */
1855 netif_tx_stop_queue(txq);
1856 dev->stats.tx_fifo_errors++;
1857 return NETDEV_TX_BUSY;
1860 /* Update transmit stats */
1861 bytes_sent = skb->len;
1862 tx_queue->stats.tx_bytes += bytes_sent;
1863 /* keep Tx bytes on wire for BQL accounting */
1864 GFAR_CB(skb)->bytes_sent = bytes_sent;
1865 tx_queue->stats.tx_packets++;
1867 txbdp = txbdp_start = tx_queue->cur_tx;
1868 lstatus = be32_to_cpu(txbdp->lstatus);
1870 /* Add TxPAL between FCB and frame if required */
1871 if (unlikely(do_tstamp)) {
1872 skb_push(skb, GMAC_TXPAL_LEN);
1873 memset(skb->data, 0, GMAC_TXPAL_LEN);
1876 /* Add TxFCB if required */
1878 fcb = gfar_add_fcb(skb);
1879 lstatus |= BD_LFLAG(TXBD_TOE);
1882 /* Set up checksumming */
1884 gfar_tx_checksum(skb, fcb, fcb_len);
1886 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
1887 unlikely(gfar_csum_errata_76(priv, skb->len))) {
1888 __skb_pull(skb, GMAC_FCB_LEN);
1889 skb_checksum_help(skb);
1890 if (do_vlan || do_tstamp) {
1891 /* put back a new fcb for vlan/tstamp TOE */
1892 fcb = gfar_add_fcb(skb);
1894 /* Tx TOE not used */
1895 lstatus &= ~(BD_LFLAG(TXBD_TOE));
1902 gfar_tx_vlan(skb, fcb);
1904 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
1906 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1909 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1911 /* Time stamp insertion requires one additional TxBD */
1912 if (unlikely(do_tstamp))
1913 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1914 tx_queue->tx_ring_size);
1916 if (likely(!nr_frags)) {
1917 if (likely(!do_tstamp))
1918 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1920 u32 lstatus_start = lstatus;
1922 /* Place the fragment addresses and lengths into the TxBDs */
1923 frag = &skb_shinfo(skb)->frags[0];
1924 for (i = 0; i < nr_frags; i++, frag++) {
1927 /* Point at the next BD, wrapping as needed */
1928 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1930 size = skb_frag_size(frag);
1932 lstatus = be32_to_cpu(txbdp->lstatus) | size |
1933 BD_LFLAG(TXBD_READY);
1935 /* Handle the last BD specially */
1936 if (i == nr_frags - 1)
1937 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1939 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
1940 size, DMA_TO_DEVICE);
1941 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1944 /* set the TxBD length and buffer pointer */
1945 txbdp->bufPtr = cpu_to_be32(bufaddr);
1946 txbdp->lstatus = cpu_to_be32(lstatus);
1949 lstatus = lstatus_start;
1952 /* If time stamping is requested one additional TxBD must be set up. The
1953 * first TxBD points to the FCB and must have a data length of
1954 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
1955 * the full frame length.
1957 if (unlikely(do_tstamp)) {
1958 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1960 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1963 lstatus_ts |= BD_LFLAG(TXBD_READY) |
1964 (skb_headlen(skb) - fcb_len);
1966 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1968 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1969 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1970 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1972 /* Setup tx hardware time stamping */
1973 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1976 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1979 netdev_tx_sent_queue(txq, bytes_sent);
1983 txbdp_start->lstatus = cpu_to_be32(lstatus);
1985 gfar_wmb(); /* force lstatus write before tx_skbuff */
1987 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1989 /* Update the current skb pointer to the next entry we will use
1990 * (wrapping if necessary)
1992 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1993 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1995 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1997 /* We can work in parallel with gfar_clean_tx_ring(), except
1998 * when modifying num_txbdfree. Note that we didn't grab the lock
1999 * when we were reading the num_txbdfree and checking for available
2000 * space, that's because outside of this function it can only grow.
2002 spin_lock_bh(&tx_queue->txlock);
2003 /* reduce TxBD free count */
2004 tx_queue->num_txbdfree -= (nr_txbds);
2005 spin_unlock_bh(&tx_queue->txlock);
2007 /* If the next BD still needs to be cleaned up, then the bds
2008 * are full. We need to tell the kernel to stop sending us stuff.
2010 if (!tx_queue->num_txbdfree) {
2011 netif_tx_stop_queue(txq);
2013 dev->stats.tx_fifo_errors++;
2016 /* Tell the DMA to go go go */
2017 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
2019 return NETDEV_TX_OK;
2022 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
2024 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2025 for (i = 0; i < nr_frags; i++) {
2026 lstatus = be32_to_cpu(txbdp->lstatus);
2027 if (!(lstatus & BD_LFLAG(TXBD_READY)))
2030 lstatus &= ~BD_LFLAG(TXBD_READY);
2031 txbdp->lstatus = cpu_to_be32(lstatus);
2032 bufaddr = be32_to_cpu(txbdp->bufPtr);
2033 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
2035 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2038 dev_kfree_skb_any(skb);
2039 return NETDEV_TX_OK;
2042 /* Changes the mac address if the controller is not running. */
2043 static int gfar_set_mac_address(struct net_device *dev)
2045 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2050 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2052 struct gfar_private *priv = netdev_priv(dev);
2054 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2057 if (dev->flags & IFF_UP)
2062 if (dev->flags & IFF_UP)
2065 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2070 static void reset_gfar(struct net_device *ndev)
2072 struct gfar_private *priv = netdev_priv(ndev);
2074 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2080 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2083 /* gfar_reset_task gets scheduled when a packet has not been
2084 * transmitted after a set amount of time.
2085 * For now, assume that clearing out all the structures, and
2086 * starting over will fix the problem.
2088 static void gfar_reset_task(struct work_struct *work)
2090 struct gfar_private *priv = container_of(work, struct gfar_private,
2092 reset_gfar(priv->ndev);
2095 static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2097 struct gfar_private *priv = netdev_priv(dev);
2099 dev->stats.tx_errors++;
2100 schedule_work(&priv->reset_task);
2103 static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
2105 struct hwtstamp_config config;
2106 struct gfar_private *priv = netdev_priv(netdev);
2108 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2111 /* reserved for future extensions */
2115 switch (config.tx_type) {
2116 case HWTSTAMP_TX_OFF:
2117 priv->hwts_tx_en = 0;
2119 case HWTSTAMP_TX_ON:
2120 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2122 priv->hwts_tx_en = 1;
2128 switch (config.rx_filter) {
2129 case HWTSTAMP_FILTER_NONE:
2130 if (priv->hwts_rx_en) {
2131 priv->hwts_rx_en = 0;
2136 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2138 if (!priv->hwts_rx_en) {
2139 priv->hwts_rx_en = 1;
2142 config.rx_filter = HWTSTAMP_FILTER_ALL;
2146 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2150 static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
2152 struct hwtstamp_config config;
2153 struct gfar_private *priv = netdev_priv(netdev);
2156 config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2157 config.rx_filter = (priv->hwts_rx_en ?
2158 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
2160 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
2164 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2166 struct phy_device *phydev = dev->phydev;
2168 if (!netif_running(dev))
2171 if (cmd == SIOCSHWTSTAMP)
2172 return gfar_hwtstamp_set(dev, rq);
2173 if (cmd == SIOCGHWTSTAMP)
2174 return gfar_hwtstamp_get(dev, rq);
2179 return phy_mii_ioctl(phydev, rq, cmd);
2182 /* Interrupt Handler for Transmit complete */
2183 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2185 struct net_device *dev = tx_queue->dev;
2186 struct netdev_queue *txq;
2187 struct gfar_private *priv = netdev_priv(dev);
2188 struct txbd8 *bdp, *next = NULL;
2189 struct txbd8 *lbdp = NULL;
2190 struct txbd8 *base = tx_queue->tx_bd_base;
2191 struct sk_buff *skb;
2193 int tx_ring_size = tx_queue->tx_ring_size;
2194 int frags = 0, nr_txbds = 0;
2197 int tqi = tx_queue->qindex;
2198 unsigned int bytes_sent = 0;
2202 txq = netdev_get_tx_queue(dev, tqi);
2203 bdp = tx_queue->dirty_tx;
2204 skb_dirtytx = tx_queue->skb_dirtytx;
2206 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2209 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2212 frags = skb_shinfo(skb)->nr_frags;
2214 /* When time stamping, one additional TxBD must be freed.
2215 * Also, we need to dma_unmap_single() the TxPAL.
2217 if (unlikely(do_tstamp))
2218 nr_txbds = frags + 2;
2220 nr_txbds = frags + 1;
2222 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2224 lstatus = be32_to_cpu(lbdp->lstatus);
2226 /* Only clean completed frames */
2227 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2228 (lstatus & BD_LENGTH_MASK))
2231 if (unlikely(do_tstamp)) {
2232 next = next_txbd(bdp, base, tx_ring_size);
2233 buflen = be16_to_cpu(next->length) +
2234 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2236 buflen = be16_to_cpu(bdp->length);
2238 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2239 buflen, DMA_TO_DEVICE);
2241 if (unlikely(do_tstamp)) {
2242 struct skb_shared_hwtstamps shhwtstamps;
2243 u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2246 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2247 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2248 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2249 skb_tstamp_tx(skb, &shhwtstamps);
2250 gfar_clear_txbd_status(bdp);
2254 gfar_clear_txbd_status(bdp);
2255 bdp = next_txbd(bdp, base, tx_ring_size);
2257 for (i = 0; i < frags; i++) {
2258 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2259 be16_to_cpu(bdp->length),
2261 gfar_clear_txbd_status(bdp);
2262 bdp = next_txbd(bdp, base, tx_ring_size);
2265 bytes_sent += GFAR_CB(skb)->bytes_sent;
2267 dev_kfree_skb_any(skb);
2269 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2271 skb_dirtytx = (skb_dirtytx + 1) &
2272 TX_RING_MOD_MASK(tx_ring_size);
2275 spin_lock(&tx_queue->txlock);
2276 tx_queue->num_txbdfree += nr_txbds;
2277 spin_unlock(&tx_queue->txlock);
2280 /* If we freed a buffer, we can restart transmission, if necessary */
2281 if (tx_queue->num_txbdfree &&
2282 netif_tx_queue_stopped(txq) &&
2283 !(test_bit(GFAR_DOWN, &priv->state)))
2284 netif_wake_subqueue(priv->ndev, tqi);
2286 /* Update dirty indicators */
2287 tx_queue->skb_dirtytx = skb_dirtytx;
2288 tx_queue->dirty_tx = bdp;
2290 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2293 static void count_errors(u32 lstatus, struct net_device *ndev)
2295 struct gfar_private *priv = netdev_priv(ndev);
2296 struct net_device_stats *stats = &ndev->stats;
2297 struct gfar_extra_stats *estats = &priv->extra_stats;
2299 /* If the packet was truncated, none of the other errors matter */
2300 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2301 stats->rx_length_errors++;
2303 atomic64_inc(&estats->rx_trunc);
2307 /* Count the errors, if there were any */
2308 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2309 stats->rx_length_errors++;
2311 if (lstatus & BD_LFLAG(RXBD_LARGE))
2312 atomic64_inc(&estats->rx_large);
2314 atomic64_inc(&estats->rx_short);
2316 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2317 stats->rx_frame_errors++;
2318 atomic64_inc(&estats->rx_nonoctet);
2320 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2321 atomic64_inc(&estats->rx_crcerr);
2322 stats->rx_crc_errors++;
2324 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2325 atomic64_inc(&estats->rx_overrun);
2326 stats->rx_over_errors++;
2330 static irqreturn_t gfar_receive(int irq, void *grp_id)
2332 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2333 unsigned long flags;
2336 ievent = gfar_read(&grp->regs->ievent);
2338 if (unlikely(ievent & IEVENT_FGPI)) {
2339 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2343 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2344 spin_lock_irqsave(&grp->grplock, flags);
2345 imask = gfar_read(&grp->regs->imask);
2346 imask &= IMASK_RX_DISABLED;
2347 gfar_write(&grp->regs->imask, imask);
2348 spin_unlock_irqrestore(&grp->grplock, flags);
2349 __napi_schedule(&grp->napi_rx);
2351 /* Clear IEVENT, so interrupts aren't called again
2352 * because of the packets that have already arrived.
2354 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2360 /* Interrupt Handler for Transmit complete */
2361 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2363 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2364 unsigned long flags;
2367 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2368 spin_lock_irqsave(&grp->grplock, flags);
2369 imask = gfar_read(&grp->regs->imask);
2370 imask &= IMASK_TX_DISABLED;
2371 gfar_write(&grp->regs->imask, imask);
2372 spin_unlock_irqrestore(&grp->grplock, flags);
2373 __napi_schedule(&grp->napi_tx);
2375 /* Clear IEVENT, so interrupts aren't called again
2376 * because of the packets that have already arrived.
2378 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2384 static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2385 struct sk_buff *skb, bool first)
2387 int size = lstatus & BD_LENGTH_MASK;
2388 struct page *page = rxb->page;
2390 if (likely(first)) {
2393 /* the last fragments' length contains the full frame length */
2394 if (lstatus & BD_LFLAG(RXBD_LAST))
2397 WARN(size < 0, "gianfar: rx fragment size underflow");
2401 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2402 rxb->page_offset + RXBUF_ALIGNMENT,
2403 size, GFAR_RXB_TRUESIZE);
2406 /* try reuse page */
2407 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2410 /* change offset to the other half */
2411 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2418 static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2419 struct gfar_rx_buff *old_rxb)
2421 struct gfar_rx_buff *new_rxb;
2422 u16 nta = rxq->next_to_alloc;
2424 new_rxb = &rxq->rx_buff[nta];
2426 /* find next buf that can reuse a page */
2428 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2430 /* copy page reference */
2431 *new_rxb = *old_rxb;
2433 /* sync for use by the device */
2434 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2435 old_rxb->page_offset,
2436 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2439 static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2440 u32 lstatus, struct sk_buff *skb)
2442 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2443 struct page *page = rxb->page;
2447 void *buff_addr = page_address(page) + rxb->page_offset;
2449 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2450 if (unlikely(!skb)) {
2451 gfar_rx_alloc_err(rx_queue);
2454 skb_reserve(skb, RXBUF_ALIGNMENT);
2458 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2459 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2461 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
2462 /* reuse the free half of the page */
2463 gfar_reuse_rx_page(rx_queue, rxb);
2465 /* page cannot be reused, unmap it */
2466 dma_unmap_page(rx_queue->dev, rxb->dma,
2467 PAGE_SIZE, DMA_FROM_DEVICE);
2470 /* clear rxb content */
2476 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2478 /* If valid headers were found, and valid sums
2479 * were verified, then we tell the kernel that no
2480 * checksumming is necessary. Otherwise, it is [FIXME]
2482 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2483 (RXFCB_CIP | RXFCB_CTU))
2484 skb->ip_summed = CHECKSUM_UNNECESSARY;
2486 skb_checksum_none_assert(skb);
2489 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2490 static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2492 struct gfar_private *priv = netdev_priv(ndev);
2493 struct rxfcb *fcb = NULL;
2495 /* fcb is at the beginning if exists */
2496 fcb = (struct rxfcb *)skb->data;
2498 /* Remove the FCB from the skb
2499 * Remove the padded bytes, if there are any
2501 if (priv->uses_rxfcb)
2502 skb_pull(skb, GMAC_FCB_LEN);
2504 /* Get receive timestamp from the skb */
2505 if (priv->hwts_rx_en) {
2506 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2507 u64 *ns = (u64 *) skb->data;
2509 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2510 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2514 skb_pull(skb, priv->padding);
2516 /* Trim off the FCS */
2517 pskb_trim(skb, skb->len - ETH_FCS_LEN);
2519 if (ndev->features & NETIF_F_RXCSUM)
2520 gfar_rx_checksum(skb, fcb);
2522 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2523 * Even if vlan rx accel is disabled, on some chips
2524 * RXFCB_VLN is pseudo randomly set.
2526 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2527 be16_to_cpu(fcb->flags) & RXFCB_VLN)
2528 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2529 be16_to_cpu(fcb->vlctl));
2532 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2533 * until the budget/quota has been reached. Returns the number
2536 static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2539 struct net_device *ndev = rx_queue->ndev;
2540 struct gfar_private *priv = netdev_priv(ndev);
2543 struct sk_buff *skb = rx_queue->skb;
2544 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
2545 unsigned int total_bytes = 0, total_pkts = 0;
2547 /* Get the first full descriptor */
2548 i = rx_queue->next_to_clean;
2550 while (rx_work_limit--) {
2553 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
2554 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2558 bdp = &rx_queue->rx_bd_base[i];
2559 lstatus = be32_to_cpu(bdp->lstatus);
2560 if (lstatus & BD_LFLAG(RXBD_EMPTY))
2563 /* lost RXBD_LAST descriptor due to overrun */
2565 (lstatus & BD_LFLAG(RXBD_FIRST))) {
2566 /* discard faulty buffer */
2569 rx_queue->stats.rx_dropped++;
2571 /* can continue normally */
2574 /* order rx buffer descriptor reads */
2577 /* fetch next to clean buffer from the ring */
2578 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
2585 if (unlikely(++i == rx_queue->rx_ring_size))
2588 rx_queue->next_to_clean = i;
2590 /* fetch next buffer if not the last in frame */
2591 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
2594 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2595 count_errors(lstatus, ndev);
2597 /* discard faulty buffer */
2600 rx_queue->stats.rx_dropped++;
2604 gfar_process_frame(ndev, skb);
2606 /* Increment the number of packets */
2608 total_bytes += skb->len;
2610 skb_record_rx_queue(skb, rx_queue->qindex);
2612 skb->protocol = eth_type_trans(skb, ndev);
2614 /* Send the packet up the stack */
2615 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2620 /* Store incomplete frames for completion */
2621 rx_queue->skb = skb;
2623 rx_queue->stats.rx_packets += total_pkts;
2624 rx_queue->stats.rx_bytes += total_bytes;
2627 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2629 /* Update Last Free RxBD pointer for LFC */
2630 if (unlikely(priv->tx_actual_en)) {
2631 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2633 gfar_write(rx_queue->rfbptr, bdp_dma);
2639 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2641 struct gfar_priv_grp *gfargrp =
2642 container_of(napi, struct gfar_priv_grp, napi_rx);
2643 struct gfar __iomem *regs = gfargrp->regs;
2644 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2647 /* Clear IEVENT, so interrupts aren't called again
2648 * because of the packets that have already arrived
2650 gfar_write(®s->ievent, IEVENT_RX_MASK);
2652 work_done = gfar_clean_rx_ring(rx_queue, budget);
2654 if (work_done < budget) {
2656 napi_complete_done(napi, work_done);
2657 /* Clear the halt bit in RSTAT */
2658 gfar_write(®s->rstat, gfargrp->rstat);
2660 spin_lock_irq(&gfargrp->grplock);
2661 imask = gfar_read(®s->imask);
2662 imask |= IMASK_RX_DEFAULT;
2663 gfar_write(®s->imask, imask);
2664 spin_unlock_irq(&gfargrp->grplock);
2670 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2672 struct gfar_priv_grp *gfargrp =
2673 container_of(napi, struct gfar_priv_grp, napi_tx);
2674 struct gfar __iomem *regs = gfargrp->regs;
2675 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2678 /* Clear IEVENT, so interrupts aren't called again
2679 * because of the packets that have already arrived
2681 gfar_write(®s->ievent, IEVENT_TX_MASK);
2683 /* run Tx cleanup to completion */
2684 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2685 gfar_clean_tx_ring(tx_queue);
2687 napi_complete(napi);
2689 spin_lock_irq(&gfargrp->grplock);
2690 imask = gfar_read(®s->imask);
2691 imask |= IMASK_TX_DEFAULT;
2692 gfar_write(®s->imask, imask);
2693 spin_unlock_irq(&gfargrp->grplock);
2698 static int gfar_poll_rx(struct napi_struct *napi, int budget)
2700 struct gfar_priv_grp *gfargrp =
2701 container_of(napi, struct gfar_priv_grp, napi_rx);
2702 struct gfar_private *priv = gfargrp->priv;
2703 struct gfar __iomem *regs = gfargrp->regs;
2704 struct gfar_priv_rx_q *rx_queue = NULL;
2705 int work_done = 0, work_done_per_q = 0;
2706 int i, budget_per_q = 0;
2707 unsigned long rstat_rxf;
2710 /* Clear IEVENT, so interrupts aren't called again
2711 * because of the packets that have already arrived
2713 gfar_write(®s->ievent, IEVENT_RX_MASK);
2715 rstat_rxf = gfar_read(®s->rstat) & RSTAT_RXF_MASK;
2717 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2719 budget_per_q = budget/num_act_queues;
2721 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2722 /* skip queue if not active */
2723 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2726 rx_queue = priv->rx_queue[i];
2728 gfar_clean_rx_ring(rx_queue, budget_per_q);
2729 work_done += work_done_per_q;
2731 /* finished processing this queue */
2732 if (work_done_per_q < budget_per_q) {
2733 /* clear active queue hw indication */
2734 gfar_write(®s->rstat,
2735 RSTAT_CLEAR_RXF0 >> i);
2738 if (!num_act_queues)
2743 if (!num_act_queues) {
2745 napi_complete_done(napi, work_done);
2747 /* Clear the halt bit in RSTAT */
2748 gfar_write(®s->rstat, gfargrp->rstat);
2750 spin_lock_irq(&gfargrp->grplock);
2751 imask = gfar_read(®s->imask);
2752 imask |= IMASK_RX_DEFAULT;
2753 gfar_write(®s->imask, imask);
2754 spin_unlock_irq(&gfargrp->grplock);
2760 static int gfar_poll_tx(struct napi_struct *napi, int budget)
2762 struct gfar_priv_grp *gfargrp =
2763 container_of(napi, struct gfar_priv_grp, napi_tx);
2764 struct gfar_private *priv = gfargrp->priv;
2765 struct gfar __iomem *regs = gfargrp->regs;
2766 struct gfar_priv_tx_q *tx_queue = NULL;
2767 int has_tx_work = 0;
2770 /* Clear IEVENT, so interrupts aren't called again
2771 * because of the packets that have already arrived
2773 gfar_write(®s->ievent, IEVENT_TX_MASK);
2775 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2776 tx_queue = priv->tx_queue[i];
2777 /* run Tx cleanup to completion */
2778 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2779 gfar_clean_tx_ring(tx_queue);
2786 napi_complete(napi);
2788 spin_lock_irq(&gfargrp->grplock);
2789 imask = gfar_read(®s->imask);
2790 imask |= IMASK_TX_DEFAULT;
2791 gfar_write(®s->imask, imask);
2792 spin_unlock_irq(&gfargrp->grplock);
2798 /* GFAR error interrupt handler */
2799 static irqreturn_t gfar_error(int irq, void *grp_id)
2801 struct gfar_priv_grp *gfargrp = grp_id;
2802 struct gfar __iomem *regs = gfargrp->regs;
2803 struct gfar_private *priv= gfargrp->priv;
2804 struct net_device *dev = priv->ndev;
2806 /* Save ievent for future reference */
2807 u32 events = gfar_read(®s->ievent);
2810 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
2812 /* Magic Packet is not an error. */
2813 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2814 (events & IEVENT_MAG))
2815 events &= ~IEVENT_MAG;
2818 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2820 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
2821 events, gfar_read(®s->imask));
2823 /* Update the error counters */
2824 if (events & IEVENT_TXE) {
2825 dev->stats.tx_errors++;
2827 if (events & IEVENT_LC)
2828 dev->stats.tx_window_errors++;
2829 if (events & IEVENT_CRL)
2830 dev->stats.tx_aborted_errors++;
2831 if (events & IEVENT_XFUN) {
2832 netif_dbg(priv, tx_err, dev,
2833 "TX FIFO underrun, packet dropped\n");
2834 dev->stats.tx_dropped++;
2835 atomic64_inc(&priv->extra_stats.tx_underrun);
2837 schedule_work(&priv->reset_task);
2839 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
2841 if (events & IEVENT_BSY) {
2842 dev->stats.rx_over_errors++;
2843 atomic64_inc(&priv->extra_stats.rx_bsy);
2845 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
2846 gfar_read(®s->rstat));
2848 if (events & IEVENT_BABR) {
2849 dev->stats.rx_errors++;
2850 atomic64_inc(&priv->extra_stats.rx_babr);
2852 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
2854 if (events & IEVENT_EBERR) {
2855 atomic64_inc(&priv->extra_stats.eberr);
2856 netif_dbg(priv, rx_err, dev, "bus error\n");
2858 if (events & IEVENT_RXC)
2859 netif_dbg(priv, rx_status, dev, "control frame\n");
2861 if (events & IEVENT_BABT) {
2862 atomic64_inc(&priv->extra_stats.tx_babt);
2863 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
2868 /* The interrupt handler for devices with one interrupt */
2869 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2871 struct gfar_priv_grp *gfargrp = grp_id;
2873 /* Save ievent for future reference */
2874 u32 events = gfar_read(&gfargrp->regs->ievent);
2876 /* Check for reception */
2877 if (events & IEVENT_RX_MASK)
2878 gfar_receive(irq, grp_id);
2880 /* Check for transmit completion */
2881 if (events & IEVENT_TX_MASK)
2882 gfar_transmit(irq, grp_id);
2884 /* Check for errors */
2885 if (events & IEVENT_ERR_MASK)
2886 gfar_error(irq, grp_id);
2891 #ifdef CONFIG_NET_POLL_CONTROLLER
2892 /* Polling 'interrupt' - used by things like netconsole to send skbs
2893 * without having to re-enable interrupts. It's not called while
2894 * the interrupt routine is executing.
2896 static void gfar_netpoll(struct net_device *dev)
2898 struct gfar_private *priv = netdev_priv(dev);
2901 /* If the device has multiple interrupts, run tx/rx */
2902 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2903 for (i = 0; i < priv->num_grps; i++) {
2904 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2906 disable_irq(gfar_irq(grp, TX)->irq);
2907 disable_irq(gfar_irq(grp, RX)->irq);
2908 disable_irq(gfar_irq(grp, ER)->irq);
2909 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2910 enable_irq(gfar_irq(grp, ER)->irq);
2911 enable_irq(gfar_irq(grp, RX)->irq);
2912 enable_irq(gfar_irq(grp, TX)->irq);
2915 for (i = 0; i < priv->num_grps; i++) {
2916 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2918 disable_irq(gfar_irq(grp, TX)->irq);
2919 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2920 enable_irq(gfar_irq(grp, TX)->irq);
2926 static void free_grp_irqs(struct gfar_priv_grp *grp)
2928 free_irq(gfar_irq(grp, TX)->irq, grp);
2929 free_irq(gfar_irq(grp, RX)->irq, grp);
2930 free_irq(gfar_irq(grp, ER)->irq, grp);
2933 static int register_grp_irqs(struct gfar_priv_grp *grp)
2935 struct gfar_private *priv = grp->priv;
2936 struct net_device *dev = priv->ndev;
2939 /* If the device has multiple interrupts, register for
2940 * them. Otherwise, only register for the one
2942 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2943 /* Install our interrupt handlers for Error,
2944 * Transmit, and Receive
2946 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2947 gfar_irq(grp, ER)->name, grp);
2949 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2950 gfar_irq(grp, ER)->irq);
2954 enable_irq_wake(gfar_irq(grp, ER)->irq);
2956 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2957 gfar_irq(grp, TX)->name, grp);
2959 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2960 gfar_irq(grp, TX)->irq);
2963 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2964 gfar_irq(grp, RX)->name, grp);
2966 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2967 gfar_irq(grp, RX)->irq);
2970 enable_irq_wake(gfar_irq(grp, RX)->irq);
2973 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2974 gfar_irq(grp, TX)->name, grp);
2976 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2977 gfar_irq(grp, TX)->irq);
2980 enable_irq_wake(gfar_irq(grp, TX)->irq);
2986 free_irq(gfar_irq(grp, TX)->irq, grp);
2988 free_irq(gfar_irq(grp, ER)->irq, grp);
2994 static void gfar_free_irq(struct gfar_private *priv)
2999 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3000 for (i = 0; i < priv->num_grps; i++)
3001 free_grp_irqs(&priv->gfargrp[i]);
3003 for (i = 0; i < priv->num_grps; i++)
3004 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
3009 static int gfar_request_irq(struct gfar_private *priv)
3013 for (i = 0; i < priv->num_grps; i++) {
3014 err = register_grp_irqs(&priv->gfargrp[i]);
3016 for (j = 0; j < i; j++)
3017 free_grp_irqs(&priv->gfargrp[j]);
3025 /* Called when something needs to use the ethernet device
3026 * Returns 0 for success.
3028 static int gfar_enet_open(struct net_device *dev)
3030 struct gfar_private *priv = netdev_priv(dev);
3033 err = init_phy(dev);
3037 err = gfar_request_irq(priv);
3041 err = startup_gfar(dev);
3048 /* Stops the kernel queue, and halts the controller */
3049 static int gfar_close(struct net_device *dev)
3051 struct gfar_private *priv = netdev_priv(dev);
3053 cancel_work_sync(&priv->reset_task);
3056 /* Disconnect from the PHY */
3057 phy_disconnect(dev->phydev);
3059 gfar_free_irq(priv);
3064 /* Clears each of the exact match registers to zero, so they
3065 * don't interfere with normal reception
3067 static void gfar_clear_exact_match(struct net_device *dev)
3070 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
3072 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
3073 gfar_set_mac_for_addr(dev, idx, zero_arr);
3076 /* Update the hash table based on the current list of multicast
3077 * addresses we subscribe to. Also, change the promiscuity of
3078 * the device based on the flags (this function is called
3079 * whenever dev->flags is changed
3081 static void gfar_set_multi(struct net_device *dev)
3083 struct netdev_hw_addr *ha;
3084 struct gfar_private *priv = netdev_priv(dev);
3085 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3088 if (dev->flags & IFF_PROMISC) {
3089 /* Set RCTRL to PROM */
3090 tempval = gfar_read(®s->rctrl);
3091 tempval |= RCTRL_PROM;
3092 gfar_write(®s->rctrl, tempval);
3094 /* Set RCTRL to not PROM */
3095 tempval = gfar_read(®s->rctrl);
3096 tempval &= ~(RCTRL_PROM);
3097 gfar_write(®s->rctrl, tempval);
3100 if (dev->flags & IFF_ALLMULTI) {
3101 /* Set the hash to rx all multicast frames */
3102 gfar_write(®s->igaddr0, 0xffffffff);
3103 gfar_write(®s->igaddr1, 0xffffffff);
3104 gfar_write(®s->igaddr2, 0xffffffff);
3105 gfar_write(®s->igaddr3, 0xffffffff);
3106 gfar_write(®s->igaddr4, 0xffffffff);
3107 gfar_write(®s->igaddr5, 0xffffffff);
3108 gfar_write(®s->igaddr6, 0xffffffff);
3109 gfar_write(®s->igaddr7, 0xffffffff);
3110 gfar_write(®s->gaddr0, 0xffffffff);
3111 gfar_write(®s->gaddr1, 0xffffffff);
3112 gfar_write(®s->gaddr2, 0xffffffff);
3113 gfar_write(®s->gaddr3, 0xffffffff);
3114 gfar_write(®s->gaddr4, 0xffffffff);
3115 gfar_write(®s->gaddr5, 0xffffffff);
3116 gfar_write(®s->gaddr6, 0xffffffff);
3117 gfar_write(®s->gaddr7, 0xffffffff);
3122 /* zero out the hash */
3123 gfar_write(®s->igaddr0, 0x0);
3124 gfar_write(®s->igaddr1, 0x0);
3125 gfar_write(®s->igaddr2, 0x0);
3126 gfar_write(®s->igaddr3, 0x0);
3127 gfar_write(®s->igaddr4, 0x0);
3128 gfar_write(®s->igaddr5, 0x0);
3129 gfar_write(®s->igaddr6, 0x0);
3130 gfar_write(®s->igaddr7, 0x0);
3131 gfar_write(®s->gaddr0, 0x0);
3132 gfar_write(®s->gaddr1, 0x0);
3133 gfar_write(®s->gaddr2, 0x0);
3134 gfar_write(®s->gaddr3, 0x0);
3135 gfar_write(®s->gaddr4, 0x0);
3136 gfar_write(®s->gaddr5, 0x0);
3137 gfar_write(®s->gaddr6, 0x0);
3138 gfar_write(®s->gaddr7, 0x0);
3140 /* If we have extended hash tables, we need to
3141 * clear the exact match registers to prepare for
3144 if (priv->extended_hash) {
3145 em_num = GFAR_EM_NUM + 1;
3146 gfar_clear_exact_match(dev);
3153 if (netdev_mc_empty(dev))
3156 /* Parse the list, and set the appropriate bits */
3157 netdev_for_each_mc_addr(ha, dev) {
3159 gfar_set_mac_for_addr(dev, idx, ha->addr);
3162 gfar_set_hash_for_addr(dev, ha->addr);
3167 void gfar_mac_reset(struct gfar_private *priv)
3169 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3172 /* Reset MAC layer */
3173 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
3175 /* We need to delay at least 3 TX clocks */
3178 /* the soft reset bit is not self-resetting, so we need to
3179 * clear it before resuming normal operation
3181 gfar_write(®s->maccfg1, 0);
3185 gfar_rx_offload_en(priv);
3187 /* Initialize the max receive frame/buffer lengths */
3188 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
3189 gfar_write(®s->mrblr, GFAR_RXB_SIZE);
3191 /* Initialize the Minimum Frame Length Register */
3192 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
3194 /* Initialize MACCFG2. */
3195 tempval = MACCFG2_INIT_SETTINGS;
3197 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
3198 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
3199 * and by checking RxBD[LG] and discarding larger than MAXFRM.
3201 if (gfar_has_errata(priv, GFAR_ERRATA_74))
3202 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
3204 gfar_write(®s->maccfg2, tempval);
3206 /* Clear mac addr hash registers */
3207 gfar_write(®s->igaddr0, 0);
3208 gfar_write(®s->igaddr1, 0);
3209 gfar_write(®s->igaddr2, 0);
3210 gfar_write(®s->igaddr3, 0);
3211 gfar_write(®s->igaddr4, 0);
3212 gfar_write(®s->igaddr5, 0);
3213 gfar_write(®s->igaddr6, 0);
3214 gfar_write(®s->igaddr7, 0);
3216 gfar_write(®s->gaddr0, 0);
3217 gfar_write(®s->gaddr1, 0);
3218 gfar_write(®s->gaddr2, 0);
3219 gfar_write(®s->gaddr3, 0);
3220 gfar_write(®s->gaddr4, 0);
3221 gfar_write(®s->gaddr5, 0);
3222 gfar_write(®s->gaddr6, 0);
3223 gfar_write(®s->gaddr7, 0);
3225 if (priv->extended_hash)
3226 gfar_clear_exact_match(priv->ndev);
3228 gfar_mac_rx_config(priv);
3230 gfar_mac_tx_config(priv);
3232 gfar_set_mac_address(priv->ndev);
3234 gfar_set_multi(priv->ndev);
3236 /* clear ievent and imask before configuring coalescing */
3237 gfar_ints_disable(priv);
3239 /* Configure the coalescing support */
3240 gfar_configure_coalescing_all(priv);
3243 static void gfar_hw_init(struct gfar_private *priv)
3245 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3248 /* Stop the DMA engine now, in case it was running before
3249 * (The firmware could have used it, and left it running).
3253 gfar_mac_reset(priv);
3255 /* Zero out the rmon mib registers if it has them */
3256 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3257 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
3259 /* Mask off the CAM interrupts */
3260 gfar_write(®s->rmon.cam1, 0xffffffff);
3261 gfar_write(®s->rmon.cam2, 0xffffffff);
3264 /* Initialize ECNTRL */
3265 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
3267 /* Set the extraction length and index */
3268 attrs = ATTRELI_EL(priv->rx_stash_size) |
3269 ATTRELI_EI(priv->rx_stash_index);
3271 gfar_write(®s->attreli, attrs);
3273 /* Start with defaults, and add stashing
3274 * depending on driver parameters
3276 attrs = ATTR_INIT_SETTINGS;
3278 if (priv->bd_stash_en)
3279 attrs |= ATTR_BDSTASH;
3281 if (priv->rx_stash_size != 0)
3282 attrs |= ATTR_BUFSTASH;
3284 gfar_write(®s->attr, attrs);
3287 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
3288 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
3289 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
3291 /* Program the interrupt steering regs, only for MG devices */
3292 if (priv->num_grps > 1)
3293 gfar_write_isrg(priv);
3296 static const struct net_device_ops gfar_netdev_ops = {
3297 .ndo_open = gfar_enet_open,
3298 .ndo_start_xmit = gfar_start_xmit,
3299 .ndo_stop = gfar_close,
3300 .ndo_change_mtu = gfar_change_mtu,
3301 .ndo_set_features = gfar_set_features,
3302 .ndo_set_rx_mode = gfar_set_multi,
3303 .ndo_tx_timeout = gfar_timeout,
3304 .ndo_do_ioctl = gfar_ioctl,
3305 .ndo_get_stats = gfar_get_stats,
3306 .ndo_change_carrier = fixed_phy_change_carrier,
3307 .ndo_set_mac_address = gfar_set_mac_addr,
3308 .ndo_validate_addr = eth_validate_addr,
3309 #ifdef CONFIG_NET_POLL_CONTROLLER
3310 .ndo_poll_controller = gfar_netpoll,
3314 /* Set up the ethernet device structure, private data,
3315 * and anything else we need before we start
3317 static int gfar_probe(struct platform_device *ofdev)
3319 struct device_node *np = ofdev->dev.of_node;
3320 struct net_device *dev = NULL;
3321 struct gfar_private *priv = NULL;
3324 err = gfar_of_init(ofdev, &dev);
3329 priv = netdev_priv(dev);
3331 priv->ofdev = ofdev;
3332 priv->dev = &ofdev->dev;
3333 SET_NETDEV_DEV(dev, &ofdev->dev);
3335 INIT_WORK(&priv->reset_task, gfar_reset_task);
3337 platform_set_drvdata(ofdev, priv);
3339 gfar_detect_errata(priv);
3341 /* Set the dev->base_addr to the gfar reg region */
3342 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
3344 /* Fill in the dev structure */
3345 dev->watchdog_timeo = TX_TIMEOUT;
3346 /* MTU range: 50 - 9586 */
3349 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
3350 dev->netdev_ops = &gfar_netdev_ops;
3351 dev->ethtool_ops = &gfar_ethtool_ops;
3353 /* Register for napi ...We are registering NAPI for each grp */
3354 for (i = 0; i < priv->num_grps; i++) {
3355 if (priv->poll_mode == GFAR_SQ_POLLING) {
3356 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3357 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
3358 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
3359 gfar_poll_tx_sq, 2);
3361 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3362 gfar_poll_rx, GFAR_DEV_WEIGHT);
3363 netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
3368 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
3369 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3371 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
3372 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
3375 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
3376 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
3377 NETIF_F_HW_VLAN_CTAG_RX;
3378 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3381 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3383 gfar_init_addr_hash_table(priv);
3385 /* Insert receive time stamps into padding alignment bytes, and
3386 * plus 2 bytes padding to ensure the cpu alignment.
3388 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3389 priv->padding = 8 + DEFAULT_PADDING;
3391 if (dev->features & NETIF_F_IP_CSUM ||
3392 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3393 dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
3395 /* Initializing some of the rx/tx queue level parameters */
3396 for (i = 0; i < priv->num_tx_queues; i++) {
3397 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
3398 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
3399 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
3400 priv->tx_queue[i]->txic = DEFAULT_TXIC;
3403 for (i = 0; i < priv->num_rx_queues; i++) {
3404 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
3405 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
3406 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
3409 /* Always enable rx filer if available */
3410 priv->rx_filer_enable =
3411 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
3412 /* Enable most messages by default */
3413 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
3414 /* use pritority h/w tx queue scheduling for single queue devices */
3415 if (priv->num_tx_queues == 1)
3416 priv->prio_sched_en = 1;
3418 set_bit(GFAR_DOWN, &priv->state);
3422 /* Carrier starts down, phylib will bring it up */
3423 netif_carrier_off(dev);
3425 err = register_netdev(dev);
3428 pr_err("%s: Cannot register net device, aborting\n", dev->name);
3432 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
3433 priv->wol_supported |= GFAR_WOL_MAGIC;
3435 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
3436 priv->rx_filer_enable)
3437 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
3439 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
3441 /* fill out IRQ number and name fields */
3442 for (i = 0; i < priv->num_grps; i++) {
3443 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3444 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3445 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
3446 dev->name, "_g", '0' + i, "_tx");
3447 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
3448 dev->name, "_g", '0' + i, "_rx");
3449 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
3450 dev->name, "_g", '0' + i, "_er");
3452 strcpy(gfar_irq(grp, TX)->name, dev->name);
3455 /* Initialize the filer table */
3456 gfar_init_filer_table(priv);
3458 /* Print out the device info */
3459 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
3461 /* Even more device info helps when determining which kernel
3462 * provided which set of benchmarks.
3464 netdev_info(dev, "Running with NAPI enabled\n");
3465 for (i = 0; i < priv->num_rx_queues; i++)
3466 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
3467 i, priv->rx_queue[i]->rx_ring_size);
3468 for (i = 0; i < priv->num_tx_queues; i++)
3469 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
3470 i, priv->tx_queue[i]->tx_ring_size);
3475 if (of_phy_is_fixed_link(np))
3476 of_phy_deregister_fixed_link(np);
3477 unmap_group_regs(priv);
3478 gfar_free_rx_queues(priv);
3479 gfar_free_tx_queues(priv);
3480 of_node_put(priv->phy_node);
3481 of_node_put(priv->tbi_node);
3482 free_gfar_dev(priv);
3486 static int gfar_remove(struct platform_device *ofdev)
3488 struct gfar_private *priv = platform_get_drvdata(ofdev);
3489 struct device_node *np = ofdev->dev.of_node;
3491 of_node_put(priv->phy_node);
3492 of_node_put(priv->tbi_node);
3494 unregister_netdev(priv->ndev);
3496 if (of_phy_is_fixed_link(np))
3497 of_phy_deregister_fixed_link(np);
3499 unmap_group_regs(priv);
3500 gfar_free_rx_queues(priv);
3501 gfar_free_tx_queues(priv);
3502 free_gfar_dev(priv);
3509 static void __gfar_filer_disable(struct gfar_private *priv)
3511 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3514 temp = gfar_read(®s->rctrl);
3515 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
3516 gfar_write(®s->rctrl, temp);
3519 static void __gfar_filer_enable(struct gfar_private *priv)
3521 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3524 temp = gfar_read(®s->rctrl);
3525 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
3526 gfar_write(®s->rctrl, temp);
3529 /* Filer rules implementing wol capabilities */
3530 static void gfar_filer_config_wol(struct gfar_private *priv)
3535 __gfar_filer_disable(priv);
3537 /* clear the filer table, reject any packet by default */
3538 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
3539 for (i = 0; i <= MAX_FILER_IDX; i++)
3540 gfar_write_filer(priv, i, rqfcr, 0);
3543 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
3544 /* unicast packet, accept it */
3545 struct net_device *ndev = priv->ndev;
3546 /* get the default rx queue index */
3547 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
3548 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
3549 (ndev->dev_addr[1] << 8) |
3552 rqfcr = (qindex << 10) | RQFCR_AND |
3553 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
3555 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3557 dest_mac_addr = (ndev->dev_addr[3] << 16) |
3558 (ndev->dev_addr[4] << 8) |
3560 rqfcr = (qindex << 10) | RQFCR_GPI |
3561 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
3562 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3565 __gfar_filer_enable(priv);
3568 static void gfar_filer_restore_table(struct gfar_private *priv)
3573 __gfar_filer_disable(priv);
3575 for (i = 0; i <= MAX_FILER_IDX; i++) {
3576 rqfcr = priv->ftp_rqfcr[i];
3577 rqfpr = priv->ftp_rqfpr[i];
3578 gfar_write_filer(priv, i, rqfcr, rqfpr);
3581 __gfar_filer_enable(priv);
3584 /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
3585 static void gfar_start_wol_filer(struct gfar_private *priv)
3587 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3591 /* Enable Rx hw queues */
3592 gfar_write(®s->rqueue, priv->rqueue);
3594 /* Initialize DMACTRL to have WWR and WOP */
3595 tempval = gfar_read(®s->dmactrl);
3596 tempval |= DMACTRL_INIT_SETTINGS;
3597 gfar_write(®s->dmactrl, tempval);
3599 /* Make sure we aren't stopped */
3600 tempval = gfar_read(®s->dmactrl);
3601 tempval &= ~DMACTRL_GRS;
3602 gfar_write(®s->dmactrl, tempval);
3604 for (i = 0; i < priv->num_grps; i++) {
3605 regs = priv->gfargrp[i].regs;
3606 /* Clear RHLT, so that the DMA starts polling now */
3607 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
3608 /* enable the Filer General Purpose Interrupt */
3609 gfar_write(®s->imask, IMASK_FGPI);
3613 tempval = gfar_read(®s->maccfg1);
3614 tempval |= MACCFG1_RX_EN;
3615 gfar_write(®s->maccfg1, tempval);
3618 static int gfar_suspend(struct device *dev)
3620 struct gfar_private *priv = dev_get_drvdata(dev);
3621 struct net_device *ndev = priv->ndev;
3622 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3624 u16 wol = priv->wol_opts;
3626 if (!netif_running(ndev))
3630 netif_tx_lock(ndev);
3631 netif_device_detach(ndev);
3632 netif_tx_unlock(ndev);
3636 if (wol & GFAR_WOL_MAGIC) {
3637 /* Enable interrupt on Magic Packet */
3638 gfar_write(®s->imask, IMASK_MAG);
3640 /* Enable Magic Packet mode */
3641 tempval = gfar_read(®s->maccfg2);
3642 tempval |= MACCFG2_MPEN;
3643 gfar_write(®s->maccfg2, tempval);
3645 /* re-enable the Rx block */
3646 tempval = gfar_read(®s->maccfg1);
3647 tempval |= MACCFG1_RX_EN;
3648 gfar_write(®s->maccfg1, tempval);
3650 } else if (wol & GFAR_WOL_FILER_UCAST) {
3651 gfar_filer_config_wol(priv);
3652 gfar_start_wol_filer(priv);
3655 phy_stop(ndev->phydev);
3661 static int gfar_resume(struct device *dev)
3663 struct gfar_private *priv = dev_get_drvdata(dev);
3664 struct net_device *ndev = priv->ndev;
3665 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3667 u16 wol = priv->wol_opts;
3669 if (!netif_running(ndev))
3672 if (wol & GFAR_WOL_MAGIC) {
3673 /* Disable Magic Packet mode */
3674 tempval = gfar_read(®s->maccfg2);
3675 tempval &= ~MACCFG2_MPEN;
3676 gfar_write(®s->maccfg2, tempval);
3678 } else if (wol & GFAR_WOL_FILER_UCAST) {
3679 /* need to stop rx only, tx is already down */
3681 gfar_filer_restore_table(priv);
3684 phy_start(ndev->phydev);
3689 netif_device_attach(ndev);
3695 static int gfar_restore(struct device *dev)
3697 struct gfar_private *priv = dev_get_drvdata(dev);
3698 struct net_device *ndev = priv->ndev;
3700 if (!netif_running(ndev)) {
3701 netif_device_attach(ndev);
3706 gfar_init_bds(ndev);
3708 gfar_mac_reset(priv);
3710 gfar_init_tx_rx_base(priv);
3716 priv->oldduplex = -1;
3719 phy_start(ndev->phydev);
3721 netif_device_attach(ndev);
3727 static const struct dev_pm_ops gfar_pm_ops = {
3728 .suspend = gfar_suspend,
3729 .resume = gfar_resume,
3730 .freeze = gfar_suspend,
3731 .thaw = gfar_resume,
3732 .restore = gfar_restore,
3735 #define GFAR_PM_OPS (&gfar_pm_ops)
3739 #define GFAR_PM_OPS NULL
3743 static const struct of_device_id gfar_match[] =
3747 .compatible = "gianfar",
3750 .compatible = "fsl,etsec2",
3754 MODULE_DEVICE_TABLE(of, gfar_match);
3756 /* Structure for a device driver */
3757 static struct platform_driver gfar_driver = {
3759 .name = "fsl-gianfar",
3761 .of_match_table = gfar_match,
3763 .probe = gfar_probe,
3764 .remove = gfar_remove,
3767 module_platform_driver(gfar_driver);