1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017, National Instruments Corp.
4 * Author: Moritz Fischer <mdf@kernel.org>
7 #include <linux/etherdevice.h>
8 #include <linux/module.h>
9 #include <linux/netdevice.h>
10 #include <linux/of_address.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13 #include <linux/of_platform.h>
14 #include <linux/of_irq.h>
15 #include <linux/skbuff.h>
16 #include <linux/phy.h>
17 #include <linux/mii.h>
18 #include <linux/nvmem-consumer.h>
19 #include <linux/ethtool.h>
20 #include <linux/iopoll.h>
25 /* Axi DMA Register definitions */
26 #define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */
27 #define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */
28 #define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */
29 #define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */
31 #define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */
32 #define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */
33 #define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */
34 #define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */
36 #define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */
37 #define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */
39 #define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
40 #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
41 #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
42 #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
44 #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
45 #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
47 #define XAXIDMA_DELAY_SHIFT 24
48 #define XAXIDMA_COALESCE_SHIFT 16
50 #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
51 #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
52 #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
53 #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
55 /* Default TX/RX Threshold and waitbound values for SGDMA mode */
56 #define XAXIDMA_DFT_TX_THRESHOLD 24
57 #define XAXIDMA_DFT_TX_WAITBOUND 254
58 #define XAXIDMA_DFT_RX_THRESHOLD 24
59 #define XAXIDMA_DFT_RX_WAITBOUND 254
61 #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
62 #define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
63 #define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
64 #define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
65 #define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
66 #define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
67 #define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
68 #define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
69 #define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
71 #define NIXGE_REG_CTRL_OFFSET 0x4000
72 #define NIXGE_REG_INFO 0x00
73 #define NIXGE_REG_MAC_CTL 0x04
74 #define NIXGE_REG_PHY_CTL 0x08
75 #define NIXGE_REG_LED_CTL 0x0c
76 #define NIXGE_REG_MDIO_DATA 0x10
77 #define NIXGE_REG_MDIO_ADDR 0x14
78 #define NIXGE_REG_MDIO_OP 0x18
79 #define NIXGE_REG_MDIO_CTRL 0x1c
81 #define NIXGE_ID_LED_CTL_EN BIT(0)
82 #define NIXGE_ID_LED_CTL_VAL BIT(1)
84 #define NIXGE_MDIO_CLAUSE45 BIT(12)
85 #define NIXGE_MDIO_CLAUSE22 0
86 #define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
87 #define NIXGE_MDIO_OP_ADDRESS 0
88 #define NIXGE_MDIO_C45_WRITE BIT(0)
89 #define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0))
90 #define NIXGE_MDIO_C22_WRITE BIT(0)
91 #define NIXGE_MDIO_C22_READ BIT(1)
92 #define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
93 #define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
95 #define NIXGE_REG_MAC_LSB 0x1000
96 #define NIXGE_REG_MAC_MSB 0x1004
98 /* Packet size info */
99 #define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */
100 #define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
101 #define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */
102 #define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
104 #define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
105 #define NIXGE_MAX_JUMBO_FRAME_SIZE \
106 (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
108 struct nixge_hw_dma_bd {
127 #ifdef CONFIG_PHYS_ADDR_T_64BIT
128 #define nixge_hw_dma_bd_set_addr(bd, field, addr) \
130 (bd)->field##_lo = lower_32_bits((addr)); \
131 (bd)->field##_hi = upper_32_bits((addr)); \
134 #define nixge_hw_dma_bd_set_addr(bd, field, addr) \
135 ((bd)->field##_lo = lower_32_bits((addr)))
138 #define nixge_hw_dma_bd_set_phys(bd, addr) \
139 nixge_hw_dma_bd_set_addr((bd), phys, (addr))
141 #define nixge_hw_dma_bd_set_next(bd, addr) \
142 nixge_hw_dma_bd_set_addr((bd), next, (addr))
144 #define nixge_hw_dma_bd_set_offset(bd, addr) \
145 nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr))
147 #ifdef CONFIG_PHYS_ADDR_T_64BIT
148 #define nixge_hw_dma_bd_get_addr(bd, field) \
149 (dma_addr_t)((((u64)(bd)->field##_hi) << 32) | ((bd)->field##_lo))
151 #define nixge_hw_dma_bd_get_addr(bd, field) \
152 (dma_addr_t)((bd)->field##_lo)
155 struct nixge_tx_skb {
163 struct net_device *ndev;
164 struct napi_struct napi;
167 /* Connection to PHY device */
168 struct device_node *phy_node;
169 phy_interface_t phy_mode;
176 struct mii_bus *mii_bus; /* MII bus reference */
178 /* IO registers, dma functions and IRQs */
179 void __iomem *ctrl_regs;
180 void __iomem *dma_regs;
182 struct tasklet_struct dma_err_tasklet;
187 /* Buffer descriptors */
188 struct nixge_hw_dma_bd *tx_bd_v;
189 struct nixge_tx_skb *tx_skb;
192 struct nixge_hw_dma_bd *rx_bd_v;
198 u32 coalesce_count_rx;
199 u32 coalesce_count_tx;
202 static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
204 writel(val, priv->dma_regs + offset);
207 static void nixge_dma_write_desc_reg(struct nixge_priv *priv, off_t offset,
210 writel(lower_32_bits(addr), priv->dma_regs + offset);
211 #ifdef CONFIG_PHYS_ADDR_T_64BIT
212 writel(upper_32_bits(addr), priv->dma_regs + offset + 4);
216 static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset)
218 return readl(priv->dma_regs + offset);
221 static void nixge_ctrl_write_reg(struct nixge_priv *priv, off_t offset, u32 val)
223 writel(val, priv->ctrl_regs + offset);
226 static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset)
228 return readl(priv->ctrl_regs + offset);
231 #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
232 readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
233 (sleep_us), (timeout_us))
235 #define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
236 readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
237 (sleep_us), (timeout_us))
239 static void nixge_hw_dma_bd_release(struct net_device *ndev)
241 struct nixge_priv *priv = netdev_priv(ndev);
242 dma_addr_t phys_addr;
246 for (i = 0; i < RX_BD_NUM; i++) {
247 phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
250 dma_unmap_single(ndev->dev.parent, phys_addr,
251 NIXGE_MAX_JUMBO_FRAME_SIZE,
254 skb = (struct sk_buff *)(uintptr_t)
255 nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i],
261 dma_free_coherent(ndev->dev.parent,
262 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
267 devm_kfree(ndev->dev.parent, priv->tx_skb);
270 dma_free_coherent(ndev->dev.parent,
271 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
276 static int nixge_hw_dma_bd_init(struct net_device *ndev)
278 struct nixge_priv *priv = netdev_priv(ndev);
284 /* Reset the indexes which are used for accessing the BDs */
286 priv->tx_bd_tail = 0;
289 /* Allocate the Tx and Rx buffer descriptors. */
290 priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
291 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
292 &priv->tx_bd_p, GFP_KERNEL);
296 priv->tx_skb = devm_kcalloc(ndev->dev.parent,
297 TX_BD_NUM, sizeof(*priv->tx_skb),
302 priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
303 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
304 &priv->rx_bd_p, GFP_KERNEL);
308 for (i = 0; i < TX_BD_NUM; i++) {
309 nixge_hw_dma_bd_set_next(&priv->tx_bd_v[i],
311 sizeof(*priv->tx_bd_v) *
312 ((i + 1) % TX_BD_NUM));
315 for (i = 0; i < RX_BD_NUM; i++) {
316 nixge_hw_dma_bd_set_next(&priv->rx_bd_v[i],
318 + sizeof(*priv->rx_bd_v) *
319 ((i + 1) % RX_BD_NUM));
321 skb = netdev_alloc_skb_ip_align(ndev,
322 NIXGE_MAX_JUMBO_FRAME_SIZE);
326 nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], (uintptr_t)skb);
327 phys = dma_map_single(ndev->dev.parent, skb->data,
328 NIXGE_MAX_JUMBO_FRAME_SIZE,
331 nixge_hw_dma_bd_set_phys(&priv->rx_bd_v[i], phys);
333 priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
336 /* Start updating the Rx channel control register */
337 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
338 /* Update the interrupt coalesce count */
339 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
340 ((priv->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
341 /* Update the delay timer count */
342 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
343 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
344 /* Enable coalesce, delay timer and error interrupts */
345 cr |= XAXIDMA_IRQ_ALL_MASK;
346 /* Write to the Rx channel control register */
347 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
349 /* Start updating the Tx channel control register */
350 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
351 /* Update the interrupt coalesce count */
352 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
353 ((priv->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
354 /* Update the delay timer count */
355 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
356 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
357 /* Enable coalesce, delay timer and error interrupts */
358 cr |= XAXIDMA_IRQ_ALL_MASK;
359 /* Write to the Tx channel control register */
360 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
362 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
363 * halted state. This will make the Rx side ready for reception.
365 nixge_dma_write_desc_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p);
366 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
367 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
368 cr | XAXIDMA_CR_RUNSTOP_MASK);
369 nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p +
370 (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1)));
372 /* Write to the RS (Run-stop) bit in the Tx channel control register.
373 * Tx channel is now ready to run. But only after we write to the
374 * tail pointer register that the Tx channel will start transmitting.
376 nixge_dma_write_desc_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p);
377 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
378 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
379 cr | XAXIDMA_CR_RUNSTOP_MASK);
383 nixge_hw_dma_bd_release(ndev);
387 static void __nixge_device_reset(struct nixge_priv *priv, off_t offset)
392 /* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
393 * The reset process of Axi DMA takes a while to complete as all
394 * pending commands/transfers will be flushed or completed during
395 * this reset process.
397 nixge_dma_write_reg(priv, offset, XAXIDMA_CR_RESET_MASK);
398 err = nixge_dma_poll_timeout(priv, offset, status,
399 !(status & XAXIDMA_CR_RESET_MASK), 10,
402 netdev_err(priv->ndev, "%s: DMA reset timeout!\n", __func__);
405 static void nixge_device_reset(struct net_device *ndev)
407 struct nixge_priv *priv = netdev_priv(ndev);
409 __nixge_device_reset(priv, XAXIDMA_TX_CR_OFFSET);
410 __nixge_device_reset(priv, XAXIDMA_RX_CR_OFFSET);
412 if (nixge_hw_dma_bd_init(ndev))
413 netdev_err(ndev, "%s: descriptor allocation failed\n",
416 netif_trans_update(ndev);
419 static void nixge_handle_link_change(struct net_device *ndev)
421 struct nixge_priv *priv = netdev_priv(ndev);
422 struct phy_device *phydev = ndev->phydev;
424 if (phydev->link != priv->link || phydev->speed != priv->speed ||
425 phydev->duplex != priv->duplex) {
426 priv->link = phydev->link;
427 priv->speed = phydev->speed;
428 priv->duplex = phydev->duplex;
429 phy_print_status(phydev);
433 static void nixge_tx_skb_unmap(struct nixge_priv *priv,
434 struct nixge_tx_skb *tx_skb)
436 if (tx_skb->mapping) {
437 if (tx_skb->mapped_as_page)
438 dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping,
439 tx_skb->size, DMA_TO_DEVICE);
441 dma_unmap_single(priv->ndev->dev.parent,
443 tx_skb->size, DMA_TO_DEVICE);
448 dev_kfree_skb_any(tx_skb->skb);
453 static void nixge_start_xmit_done(struct net_device *ndev)
455 struct nixge_priv *priv = netdev_priv(ndev);
456 struct nixge_hw_dma_bd *cur_p;
457 struct nixge_tx_skb *tx_skb;
458 unsigned int status = 0;
462 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
463 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
465 status = cur_p->status;
467 while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
468 nixge_tx_skb_unmap(priv, tx_skb);
471 size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
475 priv->tx_bd_ci %= TX_BD_NUM;
476 cur_p = &priv->tx_bd_v[priv->tx_bd_ci];
477 tx_skb = &priv->tx_skb[priv->tx_bd_ci];
478 status = cur_p->status;
481 ndev->stats.tx_packets += packets;
482 ndev->stats.tx_bytes += size;
485 netif_wake_queue(ndev);
488 static int nixge_check_tx_bd_space(struct nixge_priv *priv,
491 struct nixge_hw_dma_bd *cur_p;
493 cur_p = &priv->tx_bd_v[(priv->tx_bd_tail + num_frag) % TX_BD_NUM];
494 if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
495 return NETDEV_TX_BUSY;
499 static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
501 struct nixge_priv *priv = netdev_priv(ndev);
502 struct nixge_hw_dma_bd *cur_p;
503 struct nixge_tx_skb *tx_skb;
504 dma_addr_t tail_p, cur_phys;
509 num_frag = skb_shinfo(skb)->nr_frags;
510 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
511 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
513 if (nixge_check_tx_bd_space(priv, num_frag)) {
514 if (!netif_queue_stopped(ndev))
515 netif_stop_queue(ndev);
519 cur_phys = dma_map_single(ndev->dev.parent, skb->data,
520 skb_headlen(skb), DMA_TO_DEVICE);
521 if (dma_mapping_error(ndev->dev.parent, cur_phys))
523 nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
525 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
528 tx_skb->mapping = cur_phys;
529 tx_skb->size = skb_headlen(skb);
530 tx_skb->mapped_as_page = false;
532 for (ii = 0; ii < num_frag; ii++) {
534 priv->tx_bd_tail %= TX_BD_NUM;
535 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
536 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
537 frag = &skb_shinfo(skb)->frags[ii];
539 cur_phys = skb_frag_dma_map(ndev->dev.parent, frag, 0,
542 if (dma_mapping_error(ndev->dev.parent, cur_phys))
544 nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
546 cur_p->cntrl = skb_frag_size(frag);
549 tx_skb->mapping = cur_phys;
550 tx_skb->size = skb_frag_size(frag);
551 tx_skb->mapped_as_page = true;
554 /* last buffer of the frame */
557 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
559 tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail;
560 /* Start the transfer */
561 nixge_dma_write_desc_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p);
563 priv->tx_bd_tail %= TX_BD_NUM;
567 for (; ii > 0; ii--) {
568 if (priv->tx_bd_tail)
571 priv->tx_bd_tail = TX_BD_NUM - 1;
573 tx_skb = &priv->tx_skb[priv->tx_bd_tail];
574 nixge_tx_skb_unmap(priv, tx_skb);
576 cur_p = &priv->tx_bd_v[priv->tx_bd_tail];
579 dma_unmap_single(priv->ndev->dev.parent,
581 tx_skb->size, DMA_TO_DEVICE);
583 ndev->stats.tx_dropped++;
587 static int nixge_recv(struct net_device *ndev, int budget)
589 struct nixge_priv *priv = netdev_priv(ndev);
590 struct sk_buff *skb, *new_skb;
591 struct nixge_hw_dma_bd *cur_p;
592 dma_addr_t tail_p = 0, cur_phys = 0;
597 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
599 while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK &&
601 tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) *
604 skb = (struct sk_buff *)(uintptr_t)
605 nixge_hw_dma_bd_get_addr(cur_p, sw_id_offset);
607 length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
608 if (length > NIXGE_MAX_JUMBO_FRAME_SIZE)
609 length = NIXGE_MAX_JUMBO_FRAME_SIZE;
611 dma_unmap_single(ndev->dev.parent,
612 nixge_hw_dma_bd_get_addr(cur_p, phys),
613 NIXGE_MAX_JUMBO_FRAME_SIZE,
616 skb_put(skb, length);
618 skb->protocol = eth_type_trans(skb, ndev);
619 skb_checksum_none_assert(skb);
621 /* For now mark them as CHECKSUM_NONE since
622 * we don't have offload capabilities
624 skb->ip_summed = CHECKSUM_NONE;
626 napi_gro_receive(&priv->napi, skb);
631 new_skb = netdev_alloc_skb_ip_align(ndev,
632 NIXGE_MAX_JUMBO_FRAME_SIZE);
636 cur_phys = dma_map_single(ndev->dev.parent, new_skb->data,
637 NIXGE_MAX_JUMBO_FRAME_SIZE,
639 if (dma_mapping_error(ndev->dev.parent, cur_phys)) {
640 /* FIXME: bail out and clean up */
641 netdev_err(ndev, "Failed to map ...\n");
643 nixge_hw_dma_bd_set_phys(cur_p, cur_phys);
644 cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE;
646 nixge_hw_dma_bd_set_offset(cur_p, (uintptr_t)new_skb);
649 priv->rx_bd_ci %= RX_BD_NUM;
650 cur_p = &priv->rx_bd_v[priv->rx_bd_ci];
653 ndev->stats.rx_packets += packets;
654 ndev->stats.rx_bytes += size;
657 nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p);
662 static int nixge_poll(struct napi_struct *napi, int budget)
664 struct nixge_priv *priv = container_of(napi, struct nixge_priv, napi);
670 work_done = nixge_recv(priv->ndev, budget);
671 if (work_done < budget) {
672 napi_complete_done(napi, work_done);
673 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
675 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
676 /* If there's more, reschedule, but clear */
677 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
678 napi_reschedule(napi);
680 /* if not, turn on RX IRQs again ... */
681 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
682 cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
683 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
690 static irqreturn_t nixge_tx_irq(int irq, void *_ndev)
692 struct nixge_priv *priv = netdev_priv(_ndev);
693 struct net_device *ndev = _ndev;
698 status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET);
699 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
700 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
701 nixge_start_xmit_done(priv->ndev);
704 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
705 netdev_err(ndev, "No interrupts asserted in Tx path\n");
708 if (status & XAXIDMA_IRQ_ERROR_MASK) {
709 phys = nixge_hw_dma_bd_get_addr(&priv->tx_bd_v[priv->tx_bd_ci],
712 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
713 netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys);
715 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
716 /* Disable coalesce, delay timer and error interrupts */
717 cr &= (~XAXIDMA_IRQ_ALL_MASK);
718 /* Write to the Tx channel control register */
719 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
721 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
722 /* Disable coalesce, delay timer and error interrupts */
723 cr &= (~XAXIDMA_IRQ_ALL_MASK);
724 /* Write to the Rx channel control register */
725 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
727 tasklet_schedule(&priv->dma_err_tasklet);
728 nixge_dma_write_reg(priv, XAXIDMA_TX_SR_OFFSET, status);
734 static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
736 struct nixge_priv *priv = netdev_priv(_ndev);
737 struct net_device *ndev = _ndev;
742 status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET);
743 if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
744 /* Turn of IRQs because NAPI */
745 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
746 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
747 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
748 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
750 if (napi_schedule_prep(&priv->napi))
751 __napi_schedule(&priv->napi);
754 if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
755 netdev_err(ndev, "No interrupts asserted in Rx path\n");
758 if (status & XAXIDMA_IRQ_ERROR_MASK) {
759 phys = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[priv->rx_bd_ci],
761 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
762 netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys);
764 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
765 /* Disable coalesce, delay timer and error interrupts */
766 cr &= (~XAXIDMA_IRQ_ALL_MASK);
767 /* Finally write to the Tx channel control register */
768 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr);
770 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
771 /* Disable coalesce, delay timer and error interrupts */
772 cr &= (~XAXIDMA_IRQ_ALL_MASK);
773 /* write to the Rx channel control register */
774 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
776 tasklet_schedule(&priv->dma_err_tasklet);
777 nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
783 static void nixge_dma_err_handler(unsigned long data)
785 struct nixge_priv *lp = (struct nixge_priv *)data;
786 struct nixge_hw_dma_bd *cur_p;
787 struct nixge_tx_skb *tx_skb;
790 __nixge_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
791 __nixge_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
793 for (i = 0; i < TX_BD_NUM; i++) {
794 cur_p = &lp->tx_bd_v[i];
795 tx_skb = &lp->tx_skb[i];
796 nixge_tx_skb_unmap(lp, tx_skb);
798 nixge_hw_dma_bd_set_phys(cur_p, 0);
801 nixge_hw_dma_bd_set_offset(cur_p, 0);
804 for (i = 0; i < RX_BD_NUM; i++) {
805 cur_p = &lp->rx_bd_v[i];
813 /* Start updating the Rx channel control register */
814 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
815 /* Update the interrupt coalesce count */
816 cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
817 (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
818 /* Update the delay timer count */
819 cr = ((cr & ~XAXIDMA_DELAY_MASK) |
820 (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
821 /* Enable coalesce, delay timer and error interrupts */
822 cr |= XAXIDMA_IRQ_ALL_MASK;
823 /* Finally write to the Rx channel control register */
824 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr);
826 /* Start updating the Tx channel control register */
827 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
828 /* Update the interrupt coalesce count */
829 cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
830 (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
831 /* Update the delay timer count */
832 cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
833 (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
834 /* Enable coalesce, delay timer and error interrupts */
835 cr |= XAXIDMA_IRQ_ALL_MASK;
836 /* Finally write to the Tx channel control register */
837 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr);
839 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
840 * halted state. This will make the Rx side ready for reception.
842 nixge_dma_write_desc_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
843 cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET);
844 nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET,
845 cr | XAXIDMA_CR_RUNSTOP_MASK);
846 nixge_dma_write_desc_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
847 (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
849 /* Write to the RS (Run-stop) bit in the Tx channel control register.
850 * Tx channel is now ready to run. But only after we write to the
851 * tail pointer register that the Tx channel will start transmitting
853 nixge_dma_write_desc_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
854 cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET);
855 nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET,
856 cr | XAXIDMA_CR_RUNSTOP_MASK);
859 static int nixge_open(struct net_device *ndev)
861 struct nixge_priv *priv = netdev_priv(ndev);
862 struct phy_device *phy;
865 nixge_device_reset(ndev);
867 phy = of_phy_connect(ndev, priv->phy_node,
868 &nixge_handle_link_change, 0, priv->phy_mode);
874 /* Enable tasklets for Axi DMA error handling */
875 tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler,
876 (unsigned long)priv);
878 napi_enable(&priv->napi);
880 /* Enable interrupts for Axi DMA Tx */
881 ret = request_irq(priv->tx_irq, nixge_tx_irq, 0, ndev->name, ndev);
884 /* Enable interrupts for Axi DMA Rx */
885 ret = request_irq(priv->rx_irq, nixge_rx_irq, 0, ndev->name, ndev);
889 netif_start_queue(ndev);
894 free_irq(priv->tx_irq, ndev);
898 tasklet_kill(&priv->dma_err_tasklet);
899 netdev_err(ndev, "request_irq() failed\n");
903 static int nixge_stop(struct net_device *ndev)
905 struct nixge_priv *priv = netdev_priv(ndev);
908 netif_stop_queue(ndev);
909 napi_disable(&priv->napi);
912 phy_stop(ndev->phydev);
913 phy_disconnect(ndev->phydev);
916 cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
917 nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET,
918 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
919 cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
920 nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET,
921 cr & (~XAXIDMA_CR_RUNSTOP_MASK));
923 tasklet_kill(&priv->dma_err_tasklet);
925 free_irq(priv->tx_irq, ndev);
926 free_irq(priv->rx_irq, ndev);
928 nixge_hw_dma_bd_release(ndev);
933 static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
935 if (netif_running(ndev))
938 if ((new_mtu + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) >
939 NIXGE_MAX_JUMBO_FRAME_SIZE)
947 static s32 __nixge_hw_set_mac_address(struct net_device *ndev)
949 struct nixge_priv *priv = netdev_priv(ndev);
951 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_LSB,
952 (ndev->dev_addr[2]) << 24 |
953 (ndev->dev_addr[3] << 16) |
954 (ndev->dev_addr[4] << 8) |
955 (ndev->dev_addr[5] << 0));
957 nixge_ctrl_write_reg(priv, NIXGE_REG_MAC_MSB,
958 (ndev->dev_addr[1] | (ndev->dev_addr[0] << 8)));
963 static int nixge_net_set_mac_address(struct net_device *ndev, void *p)
967 err = eth_mac_addr(ndev, p);
969 __nixge_hw_set_mac_address(ndev);
974 static const struct net_device_ops nixge_netdev_ops = {
975 .ndo_open = nixge_open,
976 .ndo_stop = nixge_stop,
977 .ndo_start_xmit = nixge_start_xmit,
978 .ndo_change_mtu = nixge_change_mtu,
979 .ndo_set_mac_address = nixge_net_set_mac_address,
980 .ndo_validate_addr = eth_validate_addr,
983 static void nixge_ethtools_get_drvinfo(struct net_device *ndev,
984 struct ethtool_drvinfo *ed)
986 strlcpy(ed->driver, "nixge", sizeof(ed->driver));
987 strlcpy(ed->bus_info, "platform", sizeof(ed->driver));
990 static int nixge_ethtools_get_coalesce(struct net_device *ndev,
991 struct ethtool_coalesce *ecoalesce)
993 struct nixge_priv *priv = netdev_priv(ndev);
996 regval = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
997 ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
998 >> XAXIDMA_COALESCE_SHIFT;
999 regval = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET);
1000 ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1001 >> XAXIDMA_COALESCE_SHIFT;
1005 static int nixge_ethtools_set_coalesce(struct net_device *ndev,
1006 struct ethtool_coalesce *ecoalesce)
1008 struct nixge_priv *priv = netdev_priv(ndev);
1010 if (netif_running(ndev)) {
1012 "Please stop netif before applying configuration\n");
1016 if (ecoalesce->rx_coalesce_usecs ||
1017 ecoalesce->rx_coalesce_usecs_irq ||
1018 ecoalesce->rx_max_coalesced_frames_irq ||
1019 ecoalesce->tx_coalesce_usecs ||
1020 ecoalesce->tx_coalesce_usecs_irq ||
1021 ecoalesce->tx_max_coalesced_frames_irq ||
1022 ecoalesce->stats_block_coalesce_usecs ||
1023 ecoalesce->use_adaptive_rx_coalesce ||
1024 ecoalesce->use_adaptive_tx_coalesce ||
1025 ecoalesce->pkt_rate_low ||
1026 ecoalesce->rx_coalesce_usecs_low ||
1027 ecoalesce->rx_max_coalesced_frames_low ||
1028 ecoalesce->tx_coalesce_usecs_low ||
1029 ecoalesce->tx_max_coalesced_frames_low ||
1030 ecoalesce->pkt_rate_high ||
1031 ecoalesce->rx_coalesce_usecs_high ||
1032 ecoalesce->rx_max_coalesced_frames_high ||
1033 ecoalesce->tx_coalesce_usecs_high ||
1034 ecoalesce->tx_max_coalesced_frames_high ||
1035 ecoalesce->rate_sample_interval)
1037 if (ecoalesce->rx_max_coalesced_frames)
1038 priv->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1039 if (ecoalesce->tx_max_coalesced_frames)
1040 priv->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1045 static int nixge_ethtools_set_phys_id(struct net_device *ndev,
1046 enum ethtool_phys_id_state state)
1048 struct nixge_priv *priv = netdev_priv(ndev);
1051 ctrl = nixge_ctrl_read_reg(priv, NIXGE_REG_LED_CTL);
1053 case ETHTOOL_ID_ACTIVE:
1054 ctrl |= NIXGE_ID_LED_CTL_EN;
1055 /* Enable identification LED override*/
1056 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1060 ctrl |= NIXGE_ID_LED_CTL_VAL;
1061 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1064 case ETHTOOL_ID_OFF:
1065 ctrl &= ~NIXGE_ID_LED_CTL_VAL;
1066 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1069 case ETHTOOL_ID_INACTIVE:
1070 /* Restore LED settings */
1071 ctrl &= ~NIXGE_ID_LED_CTL_EN;
1072 nixge_ctrl_write_reg(priv, NIXGE_REG_LED_CTL, ctrl);
1079 static const struct ethtool_ops nixge_ethtool_ops = {
1080 .get_drvinfo = nixge_ethtools_get_drvinfo,
1081 .get_coalesce = nixge_ethtools_get_coalesce,
1082 .set_coalesce = nixge_ethtools_set_coalesce,
1083 .set_phys_id = nixge_ethtools_set_phys_id,
1084 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1085 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1086 .get_link = ethtool_op_get_link,
1089 static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
1091 struct nixge_priv *priv = bus->priv;
1096 if (reg & MII_ADDR_C45) {
1097 device = (reg >> 16) & 0x1f;
1099 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1101 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1102 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1104 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1105 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1107 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1110 dev_err(priv->dev, "timeout setting address");
1114 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
1115 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1117 device = reg & 0x1f;
1119 tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
1120 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1123 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1124 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1126 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1129 dev_err(priv->dev, "timeout setting read command");
1133 status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
1138 static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
1140 struct nixge_priv *priv = bus->priv;
1145 if (reg & MII_ADDR_C45) {
1146 device = (reg >> 16) & 0x1f;
1148 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
1150 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
1151 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1153 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1154 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1156 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1159 dev_err(priv->dev, "timeout setting address");
1163 tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
1164 | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1166 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1167 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1168 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1171 dev_err(priv->dev, "timeout setting write command");
1173 device = reg & 0x1f;
1175 tmp = NIXGE_MDIO_CLAUSE22 |
1176 NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
1177 NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
1179 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
1180 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
1181 nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
1183 err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
1186 dev_err(priv->dev, "timeout setting write command");
1192 static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
1194 struct mii_bus *bus;
1196 bus = devm_mdiobus_alloc(priv->dev);
1200 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
1202 bus->name = "nixge_mii_bus";
1203 bus->read = nixge_mdio_read;
1204 bus->write = nixge_mdio_write;
1205 bus->parent = priv->dev;
1207 priv->mii_bus = bus;
1209 return of_mdiobus_register(bus, np);
1212 static void *nixge_get_nvmem_address(struct device *dev)
1214 struct nvmem_cell *cell;
1218 cell = nvmem_cell_get(dev, "address");
1222 mac = nvmem_cell_read(cell, &cell_size);
1223 nvmem_cell_put(cell);
1228 static int nixge_probe(struct platform_device *pdev)
1230 struct nixge_priv *priv;
1231 struct net_device *ndev;
1232 struct resource *dmares;
1236 ndev = alloc_etherdev(sizeof(*priv));
1240 platform_set_drvdata(pdev, ndev);
1241 SET_NETDEV_DEV(ndev, &pdev->dev);
1243 ndev->features = NETIF_F_SG;
1244 ndev->netdev_ops = &nixge_netdev_ops;
1245 ndev->ethtool_ops = &nixge_ethtool_ops;
1247 /* MTU range: 64 - 9000 */
1249 ndev->max_mtu = NIXGE_JUMBO_MTU;
1251 mac_addr = nixge_get_nvmem_address(&pdev->dev);
1252 if (mac_addr && is_valid_ether_addr(mac_addr)) {
1253 ether_addr_copy(ndev->dev_addr, mac_addr);
1256 eth_hw_addr_random(ndev);
1259 priv = netdev_priv(ndev);
1261 priv->dev = &pdev->dev;
1263 netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
1265 dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1266 priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
1267 if (IS_ERR(priv->dma_regs)) {
1268 netdev_err(ndev, "failed to map dma regs\n");
1269 return PTR_ERR(priv->dma_regs);
1271 priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET;
1272 __nixge_hw_set_mac_address(ndev);
1274 priv->tx_irq = platform_get_irq_byname(pdev, "tx");
1275 if (priv->tx_irq < 0) {
1276 netdev_err(ndev, "could not find 'tx' irq");
1277 return priv->tx_irq;
1280 priv->rx_irq = platform_get_irq_byname(pdev, "rx");
1281 if (priv->rx_irq < 0) {
1282 netdev_err(ndev, "could not find 'rx' irq");
1283 return priv->rx_irq;
1286 priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1287 priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
1289 err = nixge_mdio_setup(priv, pdev->dev.of_node);
1291 netdev_err(ndev, "error registering mdio bus");
1295 priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
1296 if (priv->phy_mode < 0) {
1297 netdev_err(ndev, "not find \"phy-mode\" property\n");
1299 goto unregister_mdio;
1302 priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1303 if (!priv->phy_node) {
1304 netdev_err(ndev, "not find \"phy-handle\" property\n");
1306 goto unregister_mdio;
1309 err = register_netdev(priv->ndev);
1311 netdev_err(ndev, "register_netdev() error (%i)\n", err);
1312 goto unregister_mdio;
1318 mdiobus_unregister(priv->mii_bus);
1326 static int nixge_remove(struct platform_device *pdev)
1328 struct net_device *ndev = platform_get_drvdata(pdev);
1329 struct nixge_priv *priv = netdev_priv(ndev);
1331 unregister_netdev(ndev);
1333 mdiobus_unregister(priv->mii_bus);
1340 /* Match table for of_platform binding */
1341 static const struct of_device_id nixge_dt_ids[] = {
1342 { .compatible = "ni,xge-enet-2.00", },
1345 MODULE_DEVICE_TABLE(of, nixge_dt_ids);
1347 static struct platform_driver nixge_driver = {
1348 .probe = nixge_probe,
1349 .remove = nixge_remove,
1352 .of_match_table = of_match_ptr(nixge_dt_ids),
1355 module_platform_driver(nixge_driver);
1357 MODULE_LICENSE("GPL v2");
1358 MODULE_DESCRIPTION("National Instruments XGE Management MAC");
1359 MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>");