1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
15 #include <linux/of_device.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/regmap.h>
20 #include <linux/clk.h>
21 #include <linux/if_vlan.h>
22 #include <linux/reset.h>
23 #include <linux/tcp.h>
25 #include "mtk_eth_soc.h"
27 static int mtk_msg_level = -1;
28 module_param_named(msg_level, mtk_msg_level, int, 0);
29 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
31 #define MTK_ETHTOOL_STAT(x) { #x, \
32 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
34 /* strings used by ethtool */
35 static const struct mtk_ethtool_stats {
36 char str[ETH_GSTRING_LEN];
38 } mtk_ethtool_stats[] = {
39 MTK_ETHTOOL_STAT(tx_bytes),
40 MTK_ETHTOOL_STAT(tx_packets),
41 MTK_ETHTOOL_STAT(tx_skip),
42 MTK_ETHTOOL_STAT(tx_collisions),
43 MTK_ETHTOOL_STAT(rx_bytes),
44 MTK_ETHTOOL_STAT(rx_packets),
45 MTK_ETHTOOL_STAT(rx_overflow),
46 MTK_ETHTOOL_STAT(rx_fcs_errors),
47 MTK_ETHTOOL_STAT(rx_short_errors),
48 MTK_ETHTOOL_STAT(rx_long_errors),
49 MTK_ETHTOOL_STAT(rx_checksum_errors),
50 MTK_ETHTOOL_STAT(rx_flow_control_packets),
53 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
55 __raw_writel(val, eth->base + reg);
58 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
60 return __raw_readl(eth->base + reg);
63 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
65 unsigned long t_start = jiffies;
68 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
70 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
75 dev_err(eth->dev, "mdio: MDIO timeout\n");
79 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
80 u32 phy_register, u32 write_data)
82 if (mtk_mdio_busy_wait(eth))
87 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
88 (phy_register << PHY_IAC_REG_SHIFT) |
89 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
92 if (mtk_mdio_busy_wait(eth))
98 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
102 if (mtk_mdio_busy_wait(eth))
105 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
106 (phy_reg << PHY_IAC_REG_SHIFT) |
107 (phy_addr << PHY_IAC_ADDR_SHIFT),
110 if (mtk_mdio_busy_wait(eth))
113 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
118 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
119 int phy_reg, u16 val)
121 struct mtk_eth *eth = bus->priv;
123 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
126 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
128 struct mtk_eth *eth = bus->priv;
130 return _mtk_mdio_read(eth, phy_addr, phy_reg);
133 static void mtk_phy_link_adjust(struct net_device *dev)
135 struct mtk_mac *mac = netdev_priv(dev);
136 u16 lcl_adv = 0, rmt_adv = 0;
138 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
139 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
140 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
143 switch (mac->phy_dev->speed) {
145 mcr |= MAC_MCR_SPEED_1000;
148 mcr |= MAC_MCR_SPEED_100;
152 if (mac->phy_dev->link)
153 mcr |= MAC_MCR_FORCE_LINK;
155 if (mac->phy_dev->duplex) {
156 mcr |= MAC_MCR_FORCE_DPX;
158 if (mac->phy_dev->pause)
159 rmt_adv = LPA_PAUSE_CAP;
160 if (mac->phy_dev->asym_pause)
161 rmt_adv |= LPA_PAUSE_ASYM;
163 if (mac->phy_dev->advertising & ADVERTISED_Pause)
164 lcl_adv |= ADVERTISE_PAUSE_CAP;
165 if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
166 lcl_adv |= ADVERTISE_PAUSE_ASYM;
168 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
170 if (flowctrl & FLOW_CTRL_TX)
171 mcr |= MAC_MCR_FORCE_TX_FC;
172 if (flowctrl & FLOW_CTRL_RX)
173 mcr |= MAC_MCR_FORCE_RX_FC;
175 netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
176 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
177 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
180 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
182 if (mac->phy_dev->link)
183 netif_carrier_on(dev);
185 netif_carrier_off(dev);
188 static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
189 struct device_node *phy_node)
191 const __be32 *_addr = NULL;
192 struct phy_device *phydev;
195 _addr = of_get_property(phy_node, "reg", NULL);
197 if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
198 pr_err("%s: invalid phy address\n", phy_node->name);
201 addr = be32_to_cpu(*_addr);
202 phy_mode = of_get_phy_mode(phy_node);
204 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
208 phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
209 mtk_phy_link_adjust, 0, phy_mode);
211 dev_err(eth->dev, "could not connect to PHY\n");
216 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
217 mac->id, phydev_name(phydev), phydev->phy_id,
220 mac->phy_dev = phydev;
225 static int mtk_phy_connect(struct mtk_mac *mac)
227 struct mtk_eth *eth = mac->hw;
228 struct device_node *np;
231 np = of_parse_phandle(mac->of_node, "phy-handle", 0);
232 if (!np && of_phy_is_fixed_link(mac->of_node))
233 if (!of_phy_register_fixed_link(mac->of_node))
234 np = of_node_get(mac->of_node);
238 switch (of_get_phy_mode(np)) {
239 case PHY_INTERFACE_MODE_RGMII_TXID:
240 case PHY_INTERFACE_MODE_RGMII_RXID:
241 case PHY_INTERFACE_MODE_RGMII_ID:
242 case PHY_INTERFACE_MODE_RGMII:
245 case PHY_INTERFACE_MODE_MII:
248 case PHY_INTERFACE_MODE_REVMII:
251 case PHY_INTERFACE_MODE_RMII:
260 /* put the gmac into the right mode */
261 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
262 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
263 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
264 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
266 mtk_phy_connect_node(eth, mac, np);
267 mac->phy_dev->autoneg = AUTONEG_ENABLE;
268 mac->phy_dev->speed = 0;
269 mac->phy_dev->duplex = 0;
271 if (of_phy_is_fixed_link(mac->of_node))
272 mac->phy_dev->supported |=
273 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
275 mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
276 SUPPORTED_Asym_Pause;
277 mac->phy_dev->advertising = mac->phy_dev->supported |
279 phy_start_aneg(mac->phy_dev);
287 dev_err(eth->dev, "invalid phy_mode\n");
291 static int mtk_mdio_init(struct mtk_eth *eth)
293 struct device_node *mii_np;
296 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
298 dev_err(eth->dev, "no %s child node found", "mdio-bus");
302 if (!of_device_is_available(mii_np)) {
307 eth->mii_bus = mdiobus_alloc();
313 eth->mii_bus->name = "mdio";
314 eth->mii_bus->read = mtk_mdio_read;
315 eth->mii_bus->write = mtk_mdio_write;
316 eth->mii_bus->priv = eth;
317 eth->mii_bus->parent = eth->dev;
319 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
320 err = of_mdiobus_register(eth->mii_bus, mii_np);
327 mdiobus_free(eth->mii_bus);
335 static void mtk_mdio_cleanup(struct mtk_eth *eth)
340 mdiobus_unregister(eth->mii_bus);
341 of_node_put(eth->mii_bus->dev.of_node);
342 mdiobus_free(eth->mii_bus);
345 static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
350 spin_lock_irqsave(ð->irq_lock, flags);
351 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
352 mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
353 spin_unlock_irqrestore(ð->irq_lock, flags);
356 static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
361 spin_lock_irqsave(ð->irq_lock, flags);
362 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
363 mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
364 spin_unlock_irqrestore(ð->irq_lock, flags);
367 static int mtk_set_mac_address(struct net_device *dev, void *p)
369 int ret = eth_mac_addr(dev, p);
370 struct mtk_mac *mac = netdev_priv(dev);
371 const char *macaddr = dev->dev_addr;
376 spin_lock_bh(&mac->hw->page_lock);
377 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
378 MTK_GDMA_MAC_ADRH(mac->id));
379 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
380 (macaddr[4] << 8) | macaddr[5],
381 MTK_GDMA_MAC_ADRL(mac->id));
382 spin_unlock_bh(&mac->hw->page_lock);
387 void mtk_stats_update_mac(struct mtk_mac *mac)
389 struct mtk_hw_stats *hw_stats = mac->hw_stats;
390 unsigned int base = MTK_GDM1_TX_GBCNT;
393 base += hw_stats->reg_offset;
395 u64_stats_update_begin(&hw_stats->syncp);
397 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
398 stats = mtk_r32(mac->hw, base + 0x04);
400 hw_stats->rx_bytes += (stats << 32);
401 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
402 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
403 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
404 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
405 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
406 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
407 hw_stats->rx_flow_control_packets +=
408 mtk_r32(mac->hw, base + 0x24);
409 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
410 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
411 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
412 stats = mtk_r32(mac->hw, base + 0x34);
414 hw_stats->tx_bytes += (stats << 32);
415 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
416 u64_stats_update_end(&hw_stats->syncp);
419 static void mtk_stats_update(struct mtk_eth *eth)
423 for (i = 0; i < MTK_MAC_COUNT; i++) {
424 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
426 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
427 mtk_stats_update_mac(eth->mac[i]);
428 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
433 static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
434 struct rtnl_link_stats64 *storage)
436 struct mtk_mac *mac = netdev_priv(dev);
437 struct mtk_hw_stats *hw_stats = mac->hw_stats;
440 if (netif_running(dev) && netif_device_present(dev)) {
441 if (spin_trylock(&hw_stats->stats_lock)) {
442 mtk_stats_update_mac(mac);
443 spin_unlock(&hw_stats->stats_lock);
448 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
449 storage->rx_packets = hw_stats->rx_packets;
450 storage->tx_packets = hw_stats->tx_packets;
451 storage->rx_bytes = hw_stats->rx_bytes;
452 storage->tx_bytes = hw_stats->tx_bytes;
453 storage->collisions = hw_stats->tx_collisions;
454 storage->rx_length_errors = hw_stats->rx_short_errors +
455 hw_stats->rx_long_errors;
456 storage->rx_over_errors = hw_stats->rx_overflow;
457 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
458 storage->rx_errors = hw_stats->rx_checksum_errors;
459 storage->tx_aborted_errors = hw_stats->tx_skip;
460 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
462 storage->tx_errors = dev->stats.tx_errors;
463 storage->rx_dropped = dev->stats.rx_dropped;
464 storage->tx_dropped = dev->stats.tx_dropped;
469 static inline int mtk_max_frag_size(int mtu)
471 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
472 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
473 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
475 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
476 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
479 static inline int mtk_max_buf_size(int frag_size)
481 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
482 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
484 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
489 static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
490 struct mtk_rx_dma *dma_rxd)
492 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
493 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
494 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
495 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
498 /* the qdma core needs scratch memory to be setup */
499 static int mtk_init_fq_dma(struct mtk_eth *eth)
501 dma_addr_t phy_ring_tail;
502 int cnt = MTK_DMA_SIZE;
506 eth->scratch_ring = dma_alloc_coherent(eth->dev,
507 cnt * sizeof(struct mtk_tx_dma),
508 ð->phy_scratch_ring,
509 GFP_ATOMIC | __GFP_ZERO);
510 if (unlikely(!eth->scratch_ring))
513 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
515 if (unlikely(!eth->scratch_head))
518 dma_addr = dma_map_single(eth->dev,
519 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
521 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
524 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
525 phy_ring_tail = eth->phy_scratch_ring +
526 (sizeof(struct mtk_tx_dma) * (cnt - 1));
528 for (i = 0; i < cnt; i++) {
529 eth->scratch_ring[i].txd1 =
530 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
532 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
533 ((i + 1) * sizeof(struct mtk_tx_dma)));
534 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
537 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
538 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
539 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
540 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
545 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
547 void *ret = ring->dma;
549 return ret + (desc - ring->phys);
552 static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
553 struct mtk_tx_dma *txd)
555 int idx = txd - ring->dma;
557 return &ring->buf[idx];
560 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
562 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
563 dma_unmap_single(eth->dev,
564 dma_unmap_addr(tx_buf, dma_addr0),
565 dma_unmap_len(tx_buf, dma_len0),
567 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
568 dma_unmap_page(eth->dev,
569 dma_unmap_addr(tx_buf, dma_addr0),
570 dma_unmap_len(tx_buf, dma_len0),
575 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
576 dev_kfree_skb_any(tx_buf->skb);
580 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
581 int tx_num, struct mtk_tx_ring *ring, bool gso)
583 struct mtk_mac *mac = netdev_priv(dev);
584 struct mtk_eth *eth = mac->hw;
585 struct mtk_tx_dma *itxd, *txd;
586 struct mtk_tx_buf *tx_buf;
587 dma_addr_t mapped_addr;
588 unsigned int nr_frags;
592 itxd = ring->next_free;
593 if (itxd == ring->last_free)
596 /* set the forward port */
597 txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
599 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
600 memset(tx_buf, 0, sizeof(*tx_buf));
605 /* TX Checksum offload */
606 if (skb->ip_summed == CHECKSUM_PARTIAL)
607 txd4 |= TX_DMA_CHKSUM;
609 /* VLAN header offload */
610 if (skb_vlan_tag_present(skb))
611 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
613 mapped_addr = dma_map_single(eth->dev, skb->data,
614 skb_headlen(skb), DMA_TO_DEVICE);
615 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
618 WRITE_ONCE(itxd->txd1, mapped_addr);
619 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
620 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
621 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
625 nr_frags = skb_shinfo(skb)->nr_frags;
626 for (i = 0; i < nr_frags; i++) {
627 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
628 unsigned int offset = 0;
629 int frag_size = skb_frag_size(frag);
632 bool last_frag = false;
633 unsigned int frag_map_size;
635 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
636 if (txd == ring->last_free)
640 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
641 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
644 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
647 if (i == nr_frags - 1 &&
648 (frag_size - frag_map_size) == 0)
651 WRITE_ONCE(txd->txd1, mapped_addr);
652 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
653 TX_DMA_PLEN0(frag_map_size) |
654 last_frag * TX_DMA_LS0));
655 WRITE_ONCE(txd->txd4, 0);
657 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
658 tx_buf = mtk_desc_to_tx_buf(ring, txd);
659 memset(tx_buf, 0, sizeof(*tx_buf));
661 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
662 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
663 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
664 frag_size -= frag_map_size;
665 offset += frag_map_size;
669 /* store skb to cleanup */
672 WRITE_ONCE(itxd->txd4, txd4);
673 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
674 (!nr_frags * TX_DMA_LS0)));
676 netdev_sent_queue(dev, skb->len);
677 skb_tx_timestamp(skb);
679 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
680 atomic_sub(n_desc, &ring->free_count);
682 /* make sure that all changes to the dma ring are flushed before we
687 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
688 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
694 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
697 mtk_tx_unmap(eth, tx_buf);
699 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
700 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
701 } while (itxd != txd);
706 static inline int mtk_cal_txd_req(struct sk_buff *skb)
709 struct skb_frag_struct *frag;
712 if (skb_is_gso(skb)) {
713 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
714 frag = &skb_shinfo(skb)->frags[i];
715 nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
718 nfrags += skb_shinfo(skb)->nr_frags;
724 static int mtk_queue_stopped(struct mtk_eth *eth)
728 for (i = 0; i < MTK_MAC_COUNT; i++) {
731 if (netif_queue_stopped(eth->netdev[i]))
738 static void mtk_wake_queue(struct mtk_eth *eth)
742 for (i = 0; i < MTK_MAC_COUNT; i++) {
745 netif_wake_queue(eth->netdev[i]);
749 static void mtk_stop_queue(struct mtk_eth *eth)
753 for (i = 0; i < MTK_MAC_COUNT; i++) {
756 netif_stop_queue(eth->netdev[i]);
760 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
762 struct mtk_mac *mac = netdev_priv(dev);
763 struct mtk_eth *eth = mac->hw;
764 struct mtk_tx_ring *ring = ð->tx_ring;
765 struct net_device_stats *stats = &dev->stats;
769 /* normally we can rely on the stack not calling this more than once,
770 * however we have 2 queues running on the same ring so we need to lock
773 spin_lock(ð->page_lock);
775 tx_num = mtk_cal_txd_req(skb);
776 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
778 netif_err(eth, tx_queued, dev,
779 "Tx Ring full when queue awake!\n");
780 spin_unlock(ð->page_lock);
781 return NETDEV_TX_BUSY;
784 /* TSO: fill MSS info in tcp checksum field */
785 if (skb_is_gso(skb)) {
786 if (skb_cow_head(skb, 0)) {
787 netif_warn(eth, tx_err, dev,
788 "GSO expand head fail.\n");
792 if (skb_shinfo(skb)->gso_type &
793 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
795 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
799 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
802 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
805 spin_unlock(ð->page_lock);
810 spin_unlock(ð->page_lock);
816 static int mtk_poll_rx(struct napi_struct *napi, int budget,
819 struct mtk_rx_ring *ring = ð->rx_ring;
820 int idx = ring->calc_idx;
823 struct mtk_rx_dma *rxd, trxd;
826 while (done < budget) {
827 struct net_device *netdev;
832 idx = NEXT_RX_DESP_IDX(idx);
833 rxd = &ring->dma[idx];
834 data = ring->data[idx];
836 mtk_rx_get_desc(&trxd, rxd);
837 if (!(trxd.rxd2 & RX_DMA_DONE))
840 /* find out which mac the packet come from. values start at 1 */
841 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
845 netdev = eth->netdev[mac];
847 /* alloc new buffer */
848 new_data = napi_alloc_frag(ring->frag_size);
849 if (unlikely(!new_data)) {
850 netdev->stats.rx_dropped++;
853 dma_addr = dma_map_single(eth->dev,
854 new_data + NET_SKB_PAD,
857 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
858 skb_free_frag(new_data);
859 netdev->stats.rx_dropped++;
864 skb = build_skb(data, ring->frag_size);
865 if (unlikely(!skb)) {
866 put_page(virt_to_head_page(new_data));
867 netdev->stats.rx_dropped++;
870 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
872 dma_unmap_single(eth->dev, trxd.rxd1,
873 ring->buf_size, DMA_FROM_DEVICE);
874 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
876 skb_put(skb, pktlen);
877 if (trxd.rxd4 & RX_DMA_L4_VALID)
878 skb->ip_summed = CHECKSUM_UNNECESSARY;
880 skb_checksum_none_assert(skb);
881 skb->protocol = eth_type_trans(skb, netdev);
883 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
884 RX_DMA_VID(trxd.rxd3))
885 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
886 RX_DMA_VID(trxd.rxd3));
887 napi_gro_receive(napi, skb);
889 ring->data[idx] = new_data;
890 rxd->rxd1 = (unsigned int)dma_addr;
893 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
895 ring->calc_idx = idx;
896 /* make sure that all changes to the dma ring are flushed before
900 mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0);
905 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
910 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
912 struct mtk_tx_ring *ring = ð->tx_ring;
913 struct mtk_tx_dma *desc;
915 struct mtk_tx_buf *tx_buf;
916 unsigned int done[MTK_MAX_DEVS];
917 unsigned int bytes[MTK_MAX_DEVS];
919 static int condition;
922 memset(done, 0, sizeof(done));
923 memset(bytes, 0, sizeof(bytes));
925 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
926 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
928 desc = mtk_qdma_phys_to_virt(ring, cpu);
930 while ((cpu != dma) && budget) {
931 u32 next_cpu = desc->txd2;
934 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
935 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
938 mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
942 tx_buf = mtk_desc_to_tx_buf(ring, desc);
949 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
950 bytes[mac] += skb->len;
954 mtk_tx_unmap(eth, tx_buf);
956 ring->last_free = desc;
957 atomic_inc(&ring->free_count);
962 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
964 for (i = 0; i < MTK_MAC_COUNT; i++) {
965 if (!eth->netdev[i] || !done[i])
967 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
971 if (mtk_queue_stopped(eth) &&
972 (atomic_read(&ring->free_count) > ring->thresh))
978 static void mtk_handle_status_irq(struct mtk_eth *eth)
980 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
982 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
983 mtk_stats_update(eth);
984 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
989 static int mtk_napi_tx(struct napi_struct *napi, int budget)
991 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
995 mtk_handle_status_irq(eth);
996 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
997 tx_done = mtk_poll_tx(eth, budget);
999 if (unlikely(netif_msg_intr(eth))) {
1000 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1001 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
1003 "done tx %d, intr 0x%08x/0x%x\n",
1004 tx_done, status, mask);
1007 if (tx_done == budget)
1010 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1011 if (status & MTK_TX_DONE_INT)
1014 napi_complete(napi);
1015 mtk_irq_enable(eth, MTK_TX_DONE_INT);
1020 static int mtk_napi_rx(struct napi_struct *napi, int budget)
1022 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1026 mtk_handle_status_irq(eth);
1027 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
1028 rx_done = mtk_poll_rx(napi, budget, eth);
1030 if (unlikely(netif_msg_intr(eth))) {
1031 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1032 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
1034 "done rx %d, intr 0x%08x/0x%x\n",
1035 rx_done, status, mask);
1038 if (rx_done == budget)
1041 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1042 if (status & MTK_RX_DONE_INT)
1045 napi_complete(napi);
1046 mtk_irq_enable(eth, MTK_RX_DONE_INT);
1051 static int mtk_tx_alloc(struct mtk_eth *eth)
1053 struct mtk_tx_ring *ring = ð->tx_ring;
1054 int i, sz = sizeof(*ring->dma);
1056 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1061 ring->dma = dma_alloc_coherent(eth->dev,
1064 GFP_ATOMIC | __GFP_ZERO);
1068 memset(ring->dma, 0, MTK_DMA_SIZE * sz);
1069 for (i = 0; i < MTK_DMA_SIZE; i++) {
1070 int next = (i + 1) % MTK_DMA_SIZE;
1071 u32 next_ptr = ring->phys + next * sz;
1073 ring->dma[i].txd2 = next_ptr;
1074 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1077 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1078 ring->next_free = &ring->dma[0];
1079 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1080 ring->thresh = MAX_SKB_FRAGS;
1082 /* make sure that all changes to the dma ring are flushed before we
1087 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1088 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1090 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1093 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1102 static void mtk_tx_clean(struct mtk_eth *eth)
1104 struct mtk_tx_ring *ring = ð->tx_ring;
1108 for (i = 0; i < MTK_DMA_SIZE; i++)
1109 mtk_tx_unmap(eth, &ring->buf[i]);
1115 dma_free_coherent(eth->dev,
1116 MTK_DMA_SIZE * sizeof(*ring->dma),
1123 static int mtk_rx_alloc(struct mtk_eth *eth)
1125 struct mtk_rx_ring *ring = ð->rx_ring;
1128 ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
1129 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1130 ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
1135 for (i = 0; i < MTK_DMA_SIZE; i++) {
1136 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1141 ring->dma = dma_alloc_coherent(eth->dev,
1142 MTK_DMA_SIZE * sizeof(*ring->dma),
1144 GFP_ATOMIC | __GFP_ZERO);
1148 for (i = 0; i < MTK_DMA_SIZE; i++) {
1149 dma_addr_t dma_addr = dma_map_single(eth->dev,
1150 ring->data[i] + NET_SKB_PAD,
1153 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1155 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1157 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1159 ring->calc_idx = MTK_DMA_SIZE - 1;
1160 /* make sure that all changes to the dma ring are flushed before we
1165 mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0);
1166 mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0);
1167 mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0);
1168 mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
1169 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1174 static void mtk_rx_clean(struct mtk_eth *eth)
1176 struct mtk_rx_ring *ring = ð->rx_ring;
1179 if (ring->data && ring->dma) {
1180 for (i = 0; i < MTK_DMA_SIZE; i++) {
1183 if (!ring->dma[i].rxd1)
1185 dma_unmap_single(eth->dev,
1189 skb_free_frag(ring->data[i]);
1196 dma_free_coherent(eth->dev,
1197 MTK_DMA_SIZE * sizeof(*ring->dma),
1204 /* wait for DMA to finish whatever it is doing before we start using it again */
1205 static int mtk_dma_busy_wait(struct mtk_eth *eth)
1207 unsigned long t_start = jiffies;
1210 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1211 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1213 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
1217 dev_err(eth->dev, "DMA init timeout\n");
1221 static int mtk_dma_init(struct mtk_eth *eth)
1225 if (mtk_dma_busy_wait(eth))
1228 /* QDMA needs scratch memory for internal reordering of the
1231 err = mtk_init_fq_dma(eth);
1235 err = mtk_tx_alloc(eth);
1239 err = mtk_rx_alloc(eth);
1243 /* Enable random early drop and set drop threshold automatically */
1244 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
1246 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1251 static void mtk_dma_free(struct mtk_eth *eth)
1255 for (i = 0; i < MTK_MAC_COUNT; i++)
1257 netdev_reset_queue(eth->netdev[i]);
1258 if (eth->scratch_ring) {
1259 dma_free_coherent(eth->dev,
1260 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
1262 eth->phy_scratch_ring);
1263 eth->scratch_ring = NULL;
1264 eth->phy_scratch_ring = 0;
1268 kfree(eth->scratch_head);
1271 static void mtk_tx_timeout(struct net_device *dev)
1273 struct mtk_mac *mac = netdev_priv(dev);
1274 struct mtk_eth *eth = mac->hw;
1276 eth->netdev[mac->id]->stats.tx_errors++;
1277 netif_err(eth, tx_err, dev,
1278 "transmit timed out\n");
1279 schedule_work(ð->pending_work);
1282 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
1284 struct mtk_eth *eth = _eth;
1286 if (likely(napi_schedule_prep(ð->rx_napi))) {
1287 __napi_schedule(ð->rx_napi);
1288 mtk_irq_disable(eth, MTK_RX_DONE_INT);
1294 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
1296 struct mtk_eth *eth = _eth;
1298 if (likely(napi_schedule_prep(ð->tx_napi))) {
1299 __napi_schedule(ð->tx_napi);
1300 mtk_irq_disable(eth, MTK_TX_DONE_INT);
1306 #ifdef CONFIG_NET_POLL_CONTROLLER
1307 static void mtk_poll_controller(struct net_device *dev)
1309 struct mtk_mac *mac = netdev_priv(dev);
1310 struct mtk_eth *eth = mac->hw;
1311 u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
1313 mtk_irq_disable(eth, int_mask);
1314 mtk_handle_irq_rx(eth->irq[2], dev);
1315 mtk_irq_enable(eth, int_mask);
1319 static int mtk_start_dma(struct mtk_eth *eth)
1323 err = mtk_dma_init(eth);
1330 MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
1331 MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
1332 MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
1338 static int mtk_open(struct net_device *dev)
1340 struct mtk_mac *mac = netdev_priv(dev);
1341 struct mtk_eth *eth = mac->hw;
1343 /* we run 2 netdevs on the same dma ring so we only bring it up once */
1344 if (!atomic_read(ð->dma_refcnt)) {
1345 int err = mtk_start_dma(eth);
1350 napi_enable(ð->tx_napi);
1351 napi_enable(ð->rx_napi);
1352 mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1354 atomic_inc(ð->dma_refcnt);
1356 phy_start(mac->phy_dev);
1357 netif_start_queue(dev);
1362 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1367 /* stop the dma engine */
1368 spin_lock_bh(ð->page_lock);
1369 val = mtk_r32(eth, glo_cfg);
1370 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1372 spin_unlock_bh(ð->page_lock);
1374 /* wait for dma stop */
1375 for (i = 0; i < 10; i++) {
1376 val = mtk_r32(eth, glo_cfg);
1377 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1385 static int mtk_stop(struct net_device *dev)
1387 struct mtk_mac *mac = netdev_priv(dev);
1388 struct mtk_eth *eth = mac->hw;
1390 netif_tx_disable(dev);
1391 phy_stop(mac->phy_dev);
1393 /* only shutdown DMA if this is the last user */
1394 if (!atomic_dec_and_test(ð->dma_refcnt))
1397 mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
1398 napi_disable(ð->tx_napi);
1399 napi_disable(ð->rx_napi);
1401 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1408 static int __init mtk_hw_init(struct mtk_eth *eth)
1412 /* reset the frame engine */
1413 reset_control_assert(eth->rstc);
1414 usleep_range(10, 20);
1415 reset_control_deassert(eth->rstc);
1416 usleep_range(10, 20);
1418 /* Set GE2 driving and slew rate */
1419 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
1422 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
1425 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1427 /* GE1, Force 1000M/FD, FC ON */
1428 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
1430 /* GE2, Force 1000M/FD, FC ON */
1431 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
1433 /* Enable RX VLan Offloading */
1434 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
1436 err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
1437 dev_name(eth->dev), eth);
1440 err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
1441 dev_name(eth->dev), eth);
1445 err = mtk_mdio_init(eth);
1449 /* disable delay and normal interrupt */
1450 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1451 mtk_irq_disable(eth, ~0);
1452 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
1453 mtk_w32(eth, 0, MTK_RST_GL);
1455 /* FE int grouping */
1456 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
1457 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
1458 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
1459 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
1460 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
1462 for (i = 0; i < 2; i++) {
1463 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
1465 /* setup the forward port to send frame to QDMA */
1469 /* Enable RX checksum */
1470 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
1472 /* setup the mac dma */
1473 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
1479 static int __init mtk_init(struct net_device *dev)
1481 struct mtk_mac *mac = netdev_priv(dev);
1482 struct mtk_eth *eth = mac->hw;
1483 const char *mac_addr;
1485 mac_addr = of_get_mac_address(mac->of_node);
1487 ether_addr_copy(dev->dev_addr, mac_addr);
1489 /* If the mac address is invalid, use random mac address */
1490 if (!is_valid_ether_addr(dev->dev_addr)) {
1491 random_ether_addr(dev->dev_addr);
1492 dev_err(eth->dev, "generated random MAC address %pM\n",
1494 dev->addr_assign_type = NET_ADDR_RANDOM;
1497 return mtk_phy_connect(mac);
1500 static void mtk_uninit(struct net_device *dev)
1502 struct mtk_mac *mac = netdev_priv(dev);
1503 struct mtk_eth *eth = mac->hw;
1505 phy_disconnect(mac->phy_dev);
1506 mtk_mdio_cleanup(eth);
1507 mtk_irq_disable(eth, ~0);
1508 free_irq(eth->irq[1], dev);
1509 free_irq(eth->irq[2], dev);
1512 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1514 struct mtk_mac *mac = netdev_priv(dev);
1520 return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
1528 static void mtk_pending_work(struct work_struct *work)
1530 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
1532 unsigned long restart = 0;
1536 /* stop all devices to make sure that dma is properly shut down */
1537 for (i = 0; i < MTK_MAC_COUNT; i++) {
1538 if (!eth->netdev[i])
1540 mtk_stop(eth->netdev[i]);
1541 __set_bit(i, &restart);
1544 /* restart DMA and enable IRQs */
1545 for (i = 0; i < MTK_MAC_COUNT; i++) {
1546 if (!test_bit(i, &restart))
1548 err = mtk_open(eth->netdev[i]);
1550 netif_alert(eth, ifup, eth->netdev[i],
1551 "Driver up/down cycle failed, closing device.\n");
1552 dev_close(eth->netdev[i]);
1558 static int mtk_cleanup(struct mtk_eth *eth)
1562 for (i = 0; i < MTK_MAC_COUNT; i++) {
1563 if (!eth->netdev[i])
1566 unregister_netdev(eth->netdev[i]);
1567 free_netdev(eth->netdev[i]);
1569 cancel_work_sync(ð->pending_work);
1574 static int mtk_get_settings(struct net_device *dev,
1575 struct ethtool_cmd *cmd)
1577 struct mtk_mac *mac = netdev_priv(dev);
1580 err = phy_read_status(mac->phy_dev);
1584 return phy_ethtool_gset(mac->phy_dev, cmd);
1587 static int mtk_set_settings(struct net_device *dev,
1588 struct ethtool_cmd *cmd)
1590 struct mtk_mac *mac = netdev_priv(dev);
1592 if (cmd->phy_address != mac->phy_dev->mdio.addr) {
1593 mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
1599 return phy_ethtool_sset(mac->phy_dev, cmd);
1602 static void mtk_get_drvinfo(struct net_device *dev,
1603 struct ethtool_drvinfo *info)
1605 struct mtk_mac *mac = netdev_priv(dev);
1607 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
1608 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
1609 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
1612 static u32 mtk_get_msglevel(struct net_device *dev)
1614 struct mtk_mac *mac = netdev_priv(dev);
1616 return mac->hw->msg_enable;
1619 static void mtk_set_msglevel(struct net_device *dev, u32 value)
1621 struct mtk_mac *mac = netdev_priv(dev);
1623 mac->hw->msg_enable = value;
1626 static int mtk_nway_reset(struct net_device *dev)
1628 struct mtk_mac *mac = netdev_priv(dev);
1630 return genphy_restart_aneg(mac->phy_dev);
1633 static u32 mtk_get_link(struct net_device *dev)
1635 struct mtk_mac *mac = netdev_priv(dev);
1638 err = genphy_update_link(mac->phy_dev);
1640 return ethtool_op_get_link(dev);
1642 return mac->phy_dev->link;
1645 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1649 switch (stringset) {
1651 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
1652 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
1653 data += ETH_GSTRING_LEN;
1659 static int mtk_get_sset_count(struct net_device *dev, int sset)
1663 return ARRAY_SIZE(mtk_ethtool_stats);
1669 static void mtk_get_ethtool_stats(struct net_device *dev,
1670 struct ethtool_stats *stats, u64 *data)
1672 struct mtk_mac *mac = netdev_priv(dev);
1673 struct mtk_hw_stats *hwstats = mac->hw_stats;
1674 u64 *data_src, *data_dst;
1678 if (netif_running(dev) && netif_device_present(dev)) {
1679 if (spin_trylock(&hwstats->stats_lock)) {
1680 mtk_stats_update_mac(mac);
1681 spin_unlock(&hwstats->stats_lock);
1686 data_src = (u64*)hwstats;
1688 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
1690 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
1691 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
1692 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
1695 static struct ethtool_ops mtk_ethtool_ops = {
1696 .get_settings = mtk_get_settings,
1697 .set_settings = mtk_set_settings,
1698 .get_drvinfo = mtk_get_drvinfo,
1699 .get_msglevel = mtk_get_msglevel,
1700 .set_msglevel = mtk_set_msglevel,
1701 .nway_reset = mtk_nway_reset,
1702 .get_link = mtk_get_link,
1703 .get_strings = mtk_get_strings,
1704 .get_sset_count = mtk_get_sset_count,
1705 .get_ethtool_stats = mtk_get_ethtool_stats,
1708 static const struct net_device_ops mtk_netdev_ops = {
1709 .ndo_init = mtk_init,
1710 .ndo_uninit = mtk_uninit,
1711 .ndo_open = mtk_open,
1712 .ndo_stop = mtk_stop,
1713 .ndo_start_xmit = mtk_start_xmit,
1714 .ndo_set_mac_address = mtk_set_mac_address,
1715 .ndo_validate_addr = eth_validate_addr,
1716 .ndo_do_ioctl = mtk_do_ioctl,
1717 .ndo_change_mtu = eth_change_mtu,
1718 .ndo_tx_timeout = mtk_tx_timeout,
1719 .ndo_get_stats64 = mtk_get_stats64,
1720 #ifdef CONFIG_NET_POLL_CONTROLLER
1721 .ndo_poll_controller = mtk_poll_controller,
1725 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1727 struct mtk_mac *mac;
1728 const __be32 *_id = of_get_property(np, "reg", NULL);
1732 dev_err(eth->dev, "missing mac id\n");
1736 id = be32_to_cpup(_id);
1737 if (id >= MTK_MAC_COUNT) {
1738 dev_err(eth->dev, "%d is not a valid mac id\n", id);
1742 if (eth->netdev[id]) {
1743 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
1747 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
1748 if (!eth->netdev[id]) {
1749 dev_err(eth->dev, "alloc_etherdev failed\n");
1752 mac = netdev_priv(eth->netdev[id]);
1758 mac->hw_stats = devm_kzalloc(eth->dev,
1759 sizeof(*mac->hw_stats),
1761 if (!mac->hw_stats) {
1762 dev_err(eth->dev, "failed to allocate counter memory\n");
1766 spin_lock_init(&mac->hw_stats->stats_lock);
1767 u64_stats_init(&mac->hw_stats->syncp);
1768 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
1770 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
1771 eth->netdev[id]->watchdog_timeo = 5 * HZ;
1772 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
1773 eth->netdev[id]->base_addr = (unsigned long)eth->base;
1774 eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
1775 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1776 eth->netdev[id]->features |= MTK_HW_FEATURES;
1777 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
1779 err = register_netdev(eth->netdev[id]);
1781 dev_err(eth->dev, "error bringing up device\n");
1784 eth->netdev[id]->irq = eth->irq[0];
1785 netif_info(eth, probe, eth->netdev[id],
1786 "mediatek frame engine at 0x%08lx, irq %d\n",
1787 eth->netdev[id]->base_addr, eth->irq[0]);
1792 free_netdev(eth->netdev[id]);
1796 static int mtk_probe(struct platform_device *pdev)
1798 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1799 struct device_node *mac_np;
1800 const struct of_device_id *match;
1801 struct mtk_soc_data *soc;
1802 struct mtk_eth *eth;
1806 match = of_match_device(of_mtk_match, &pdev->dev);
1807 soc = (struct mtk_soc_data *)match->data;
1809 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
1813 eth->base = devm_ioremap_resource(&pdev->dev, res);
1814 if (IS_ERR(eth->base))
1815 return PTR_ERR(eth->base);
1817 spin_lock_init(ð->page_lock);
1818 spin_lock_init(ð->irq_lock);
1820 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1822 if (IS_ERR(eth->ethsys)) {
1823 dev_err(&pdev->dev, "no ethsys regmap found\n");
1824 return PTR_ERR(eth->ethsys);
1827 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1829 if (IS_ERR(eth->pctl)) {
1830 dev_err(&pdev->dev, "no pctl regmap found\n");
1831 return PTR_ERR(eth->pctl);
1834 eth->rstc = devm_reset_control_get(&pdev->dev, "eth");
1835 if (IS_ERR(eth->rstc)) {
1836 dev_err(&pdev->dev, "no eth reset found\n");
1837 return PTR_ERR(eth->rstc);
1840 for (i = 0; i < 3; i++) {
1841 eth->irq[i] = platform_get_irq(pdev, i);
1842 if (eth->irq[i] < 0) {
1843 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
1848 eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
1849 eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
1850 eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
1851 eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
1852 if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
1853 IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
1856 clk_prepare_enable(eth->clk_ethif);
1857 clk_prepare_enable(eth->clk_esw);
1858 clk_prepare_enable(eth->clk_gp1);
1859 clk_prepare_enable(eth->clk_gp2);
1861 eth->dev = &pdev->dev;
1862 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
1863 INIT_WORK(ð->pending_work, mtk_pending_work);
1865 err = mtk_hw_init(eth);
1869 for_each_child_of_node(pdev->dev.of_node, mac_np) {
1870 if (!of_device_is_compatible(mac_np,
1871 "mediatek,eth-mac"))
1874 if (!of_device_is_available(mac_np))
1877 err = mtk_add_mac(eth, mac_np);
1882 /* we run 2 devices on the same DMA ring so we need a dummy device
1885 init_dummy_netdev(ð->dummy_dev);
1886 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
1888 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
1891 platform_set_drvdata(pdev, eth);
1900 static int mtk_remove(struct platform_device *pdev)
1902 struct mtk_eth *eth = platform_get_drvdata(pdev);
1904 clk_disable_unprepare(eth->clk_ethif);
1905 clk_disable_unprepare(eth->clk_esw);
1906 clk_disable_unprepare(eth->clk_gp1);
1907 clk_disable_unprepare(eth->clk_gp2);
1909 netif_napi_del(ð->tx_napi);
1910 netif_napi_del(ð->rx_napi);
1916 const struct of_device_id of_mtk_match[] = {
1917 { .compatible = "mediatek,mt7623-eth" },
1921 static struct platform_driver mtk_driver = {
1923 .remove = mtk_remove,
1925 .name = "mtk_soc_eth",
1926 .of_match_table = of_mtk_match,
1930 module_platform_driver(mtk_driver);
1932 MODULE_LICENSE("GPL");
1933 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1934 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");