1 /* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/platform_device.h>
24 #include <linux/of_device.h>
25 #include <linux/mfd/syscon.h>
26 #include <linux/clk.h>
27 #include <linux/of_net.h>
28 #include <linux/of_mdio.h>
29 #include <linux/if_vlan.h>
30 #include <linux/reset.h>
31 #include <linux/tcp.h>
33 #include <linux/bug.h>
34 #include <linux/regmap.h>
36 #include "mtk_eth_soc.h"
40 #define MAX_RX_LENGTH 1536
41 #define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
42 #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
43 #define DMA_DUMMY_DESC 0xffffffff
44 #define MTK_DEFAULT_MSG_ENABLE \
54 #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
55 #define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1))
56 #define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1))
58 #define SYSC_REG_RSTCTRL 0x34
60 static int mtk_msg_level = -1;
61 module_param_named(msg_level, mtk_msg_level, int, 0);
62 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
64 static const u16 mtk_reg_table_default[MTK_REG_COUNT] = {
65 [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG,
66 [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG,
67 [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG,
68 [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0,
69 [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0,
70 [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0,
71 [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0,
72 [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0,
73 [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0,
74 [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0,
75 [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0,
76 [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE,
77 [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS,
78 [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0,
79 [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT,
80 [MTK_REG_MTK_RST_GL] = MTK_RST_GL,
83 static const u16 *mtk_reg_table = mtk_reg_table_default;
85 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg)
87 __raw_writel(val, eth->base + reg);
90 u32 mtk_r32(struct mtk_eth *eth, unsigned int reg)
92 return __raw_readl(eth->base + reg);
95 static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg)
97 mtk_w32(eth, val, mtk_reg_table[reg]);
100 static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg)
102 return mtk_r32(eth, mtk_reg_table[reg]);
105 /* these bits are also exposed via the reset-controller API. however the switch
106 * and FE need to be brought out of reset in the exakt same moemtn and the
107 * reset-controller api does not provide this feature yet. Do the reset manually
108 * until we fixed the reset-controller api to be able to do this
110 void mtk_reset(struct mtk_eth *eth, u32 reset_bits)
114 regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val);
116 regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
117 usleep_range(10, 20);
119 regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
120 usleep_range(10, 20);
122 EXPORT_SYMBOL(mtk_reset);
124 static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask)
126 if (eth->soc->dma_type & MTK_PDMA)
127 mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS);
128 if (eth->soc->dma_type & MTK_QDMA)
129 mtk_w32(eth, mask, MTK_QMTK_INT_STATUS);
132 static inline u32 mtk_irq_pending(struct mtk_eth *eth)
136 if (eth->soc->dma_type & MTK_PDMA)
137 status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS);
138 if (eth->soc->dma_type & MTK_QDMA)
139 status |= mtk_r32(eth, MTK_QMTK_INT_STATUS);
144 static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask)
146 u32 status_reg = MTK_REG_MTK_INT_STATUS;
148 if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
149 status_reg = MTK_REG_MTK_INT_STATUS2;
151 mtk_reg_w32(eth, mask, status_reg);
154 static u32 mtk_irq_pending_status(struct mtk_eth *eth)
156 u32 status_reg = MTK_REG_MTK_INT_STATUS;
158 if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
159 status_reg = MTK_REG_MTK_INT_STATUS2;
161 return mtk_reg_r32(eth, status_reg);
164 static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
168 if (eth->soc->dma_type & MTK_PDMA) {
169 val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
170 mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE);
172 mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
174 if (eth->soc->dma_type & MTK_QDMA) {
175 val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
176 mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE);
178 mtk_r32(eth, MTK_QMTK_INT_ENABLE);
182 static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
186 if (eth->soc->dma_type & MTK_PDMA) {
187 val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
188 mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE);
190 mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
192 if (eth->soc->dma_type & MTK_QDMA) {
193 val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
194 mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE);
196 mtk_r32(eth, MTK_QMTK_INT_ENABLE);
200 static inline u32 mtk_irq_enabled(struct mtk_eth *eth)
204 if (eth->soc->dma_type & MTK_PDMA)
205 enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
206 if (eth->soc->dma_type & MTK_QDMA)
207 enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE);
212 static inline void mtk_hw_set_macaddr(struct mtk_mac *mac,
213 unsigned char *macaddr)
217 spin_lock_irqsave(&mac->hw->page_lock, flags);
218 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH);
219 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
220 (macaddr[4] << 8) | macaddr[5],
222 spin_unlock_irqrestore(&mac->hw->page_lock, flags);
225 static int mtk_set_mac_address(struct net_device *dev, void *p)
227 int ret = eth_mac_addr(dev, p);
228 struct mtk_mac *mac = netdev_priv(dev);
229 struct mtk_eth *eth = mac->hw;
234 if (eth->soc->set_mac)
235 eth->soc->set_mac(mac, dev->dev_addr);
237 mtk_hw_set_macaddr(mac, p);
242 static inline int mtk_max_frag_size(int mtu)
244 /* make sure buf_size will be at least MAX_RX_LENGTH */
245 if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH)
246 mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
248 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
249 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
252 static inline int mtk_max_buf_size(int frag_size)
254 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
255 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
257 WARN_ON(buf_size < MAX_RX_LENGTH);
262 static inline void mtk_get_rxd(struct mtk_rx_dma *rxd,
263 struct mtk_rx_dma *dma_rxd)
265 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
266 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
267 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
268 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
271 static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd,
272 struct mtk_tx_dma *dma_txd)
274 WRITE_ONCE(dma_txd->txd1, txd->txd1);
275 WRITE_ONCE(dma_txd->txd3, txd->txd3);
276 WRITE_ONCE(dma_txd->txd4, txd->txd4);
277 /* clean dma done flag last */
278 WRITE_ONCE(dma_txd->txd2, txd->txd2);
281 static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
285 if (ring->rx_data && ring->rx_dma) {
286 for (i = 0; i < ring->rx_ring_size; i++) {
287 if (!ring->rx_data[i])
289 if (!ring->rx_dma[i].rxd1)
291 dma_unmap_single(eth->dev,
292 ring->rx_dma[i].rxd1,
295 skb_free_frag(ring->rx_data[i]);
297 kfree(ring->rx_data);
298 ring->rx_data = NULL;
302 dma_free_coherent(eth->dev,
303 ring->rx_ring_size * sizeof(*ring->rx_dma),
310 static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
314 ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
315 ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
316 ring->rx_ring_size = eth->soc->dma_ring_size;
317 ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
322 for (i = 0; i < ring->rx_ring_size; i++) {
323 ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
324 if (!ring->rx_data[i])
329 dma_alloc_coherent(eth->dev,
330 ring->rx_ring_size * sizeof(*ring->rx_dma),
331 &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
335 if (!eth->soc->rx_2b_offset)
338 for (i = 0; i < ring->rx_ring_size; i++) {
339 dma_addr_t dma_addr = dma_map_single(eth->dev,
340 ring->rx_data[i] + NET_SKB_PAD + pad,
343 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
345 ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
347 if (eth->soc->rx_sg_dma)
348 ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
350 ring->rx_dma[i].rxd2 = RX_DMA_LSO;
352 ring->rx_calc_idx = ring->rx_ring_size - 1;
353 /* make sure that all changes to the dma ring are flushed before we
364 static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
366 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
367 dma_unmap_single(dev,
368 dma_unmap_addr(tx_buf, dma_addr0),
369 dma_unmap_len(tx_buf, dma_len0),
371 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
373 dma_unmap_addr(tx_buf, dma_addr0),
374 dma_unmap_len(tx_buf, dma_len0),
377 if (tx_buf->flags & MTK_TX_FLAGS_PAGE1)
379 dma_unmap_addr(tx_buf, dma_addr1),
380 dma_unmap_len(tx_buf, dma_len1),
384 if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
385 dev_kfree_skb_any(tx_buf->skb);
389 static void mtk_pdma_tx_clean(struct mtk_eth *eth)
391 struct mtk_tx_ring *ring = ð->tx_ring;
395 for (i = 0; i < ring->tx_ring_size; i++)
396 mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
402 dma_free_coherent(eth->dev,
403 ring->tx_ring_size * sizeof(*ring->tx_dma),
410 static void mtk_qdma_tx_clean(struct mtk_eth *eth)
412 struct mtk_tx_ring *ring = ð->tx_ring;
416 for (i = 0; i < ring->tx_ring_size; i++)
417 mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
423 dma_free_coherent(eth->dev,
424 ring->tx_ring_size * sizeof(*ring->tx_dma),
431 void mtk_stats_update_mac(struct mtk_mac *mac)
433 struct mtk_hw_stats *hw_stats = mac->hw_stats;
434 unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
437 base += hw_stats->reg_offset;
439 u64_stats_update_begin(&hw_stats->syncp);
441 if (mac->hw->soc->new_stats) {
442 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
443 stats = mtk_r32(mac->hw, base + 0x04);
445 hw_stats->rx_bytes += (stats << 32);
446 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
447 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
448 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
449 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
450 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
451 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
452 hw_stats->rx_flow_control_packets +=
453 mtk_r32(mac->hw, base + 0x24);
454 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
455 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
456 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
457 stats = mtk_r32(mac->hw, base + 0x34);
459 hw_stats->tx_bytes += (stats << 32);
460 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
462 hw_stats->tx_bytes += mtk_r32(mac->hw, base);
463 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04);
464 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08);
465 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c);
466 hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20);
467 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24);
468 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28);
469 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c);
470 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30);
471 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34);
472 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38);
473 hw_stats->rx_flow_control_packets +=
474 mtk_r32(mac->hw, base + 0x3c);
477 u64_stats_update_end(&hw_stats->syncp);
480 static void mtk_get_stats64(struct net_device *dev,
481 struct rtnl_link_stats64 *storage)
483 struct mtk_mac *mac = netdev_priv(dev);
484 struct mtk_hw_stats *hw_stats = mac->hw_stats;
485 unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
489 netdev_stats_to_stats64(storage, &dev->stats);
493 if (netif_running(dev) && netif_device_present(dev)) {
494 if (spin_trylock(&hw_stats->stats_lock)) {
495 mtk_stats_update_mac(mac);
496 spin_unlock(&hw_stats->stats_lock);
501 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
502 storage->rx_packets = hw_stats->rx_packets;
503 storage->tx_packets = hw_stats->tx_packets;
504 storage->rx_bytes = hw_stats->rx_bytes;
505 storage->tx_bytes = hw_stats->tx_bytes;
506 storage->collisions = hw_stats->tx_collisions;
507 storage->rx_length_errors = hw_stats->rx_short_errors +
508 hw_stats->rx_long_errors;
509 storage->rx_over_errors = hw_stats->rx_overflow;
510 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
511 storage->rx_errors = hw_stats->rx_checksum_errors;
512 storage->tx_aborted_errors = hw_stats->tx_skip;
513 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
515 storage->tx_errors = dev->stats.tx_errors;
516 storage->rx_dropped = dev->stats.rx_dropped;
517 storage->tx_dropped = dev->stats.tx_dropped;
520 static int mtk_vlan_rx_add_vid(struct net_device *dev,
521 __be16 proto, u16 vid)
523 struct mtk_mac *mac = netdev_priv(dev);
524 struct mtk_eth *eth = mac->hw;
525 u32 idx = (vid & 0xf);
528 if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
529 (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
532 if (test_bit(idx, ð->vlan_map)) {
533 netdev_warn(dev, "disable tx vlan offload\n");
534 dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
535 netdev_update_features(dev);
537 vlan_cfg = mtk_r32(eth,
538 mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
542 vlan_cfg |= (vid << 16);
544 vlan_cfg &= 0xffff0000;
548 vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
550 set_bit(idx, ð->vlan_map);
556 static int mtk_vlan_rx_kill_vid(struct net_device *dev,
557 __be16 proto, u16 vid)
559 struct mtk_mac *mac = netdev_priv(dev);
560 struct mtk_eth *eth = mac->hw;
561 u32 idx = (vid & 0xf);
563 if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
564 (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
567 clear_bit(idx, ð->vlan_map);
572 static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring)
575 return (u32)(ring->tx_ring_size -
576 ((ring->tx_next_idx - ring->tx_free_idx) &
577 (ring->tx_ring_size - 1)));
580 static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth)
585 if (unlikely(skb->len >= VLAN_ETH_ZLEN))
588 if (eth->soc->padding_64b && !eth->soc->padding_bug)
591 if (skb_vlan_tag_present(skb))
593 else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
595 else if (!eth->soc->padding_64b)
603 ret = skb_pad(skb, len - skb->len);
607 skb_set_tail_pointer(skb, len);
612 static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev,
613 int tx_num, struct mtk_tx_ring *ring, bool gso)
615 struct mtk_mac *mac = netdev_priv(dev);
616 struct mtk_eth *eth = mac->hw;
617 struct skb_frag_struct *frag;
618 struct mtk_tx_dma txd, *ptxd;
619 struct mtk_tx_buf *tx_buf;
620 int i, j, k, frag_size, frag_map_size, offset;
621 dma_addr_t mapped_addr;
622 unsigned int nr_frags;
625 if (mtk_skb_padto(skb, eth)) {
626 netif_warn(eth, tx_err, dev, "tx padding failed!\n");
630 tx_buf = &ring->tx_buf[ring->tx_next_idx];
631 memset(tx_buf, 0, sizeof(*tx_buf));
632 memset(&txd, 0, sizeof(txd));
633 nr_frags = skb_shinfo(skb)->nr_frags;
635 /* init tx descriptor */
636 def_txd4 = eth->soc->txd4;
639 if (eth->soc->mac_count > 1)
640 txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
643 txd.txd4 |= TX_DMA_TSO;
645 /* TX Checksum offload */
646 if (skb->ip_summed == CHECKSUM_PARTIAL)
647 txd.txd4 |= TX_DMA_CHKSUM;
649 /* VLAN header offload */
650 if (skb_vlan_tag_present(skb)) {
651 u16 tag = skb_vlan_tag_get(skb);
653 txd.txd4 |= TX_DMA_INS_VLAN |
654 ((tag >> VLAN_PRIO_SHIFT) << 4) |
658 mapped_addr = dma_map_single(&dev->dev, skb->data,
659 skb_headlen(skb), DMA_TO_DEVICE);
660 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
663 txd.txd1 = mapped_addr;
664 txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
666 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
667 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
668 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
671 j = ring->tx_next_idx;
673 for (i = 0; i < nr_frags; i++) {
675 frag = &skb_shinfo(skb)->frags[i];
676 frag_size = skb_frag_size(frag);
678 while (frag_size > 0) {
679 frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
680 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
683 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
687 j = NEXT_TX_DESP_IDX(j);
688 txd.txd1 = mapped_addr;
689 txd.txd2 = TX_DMA_PLEN0(frag_map_size);
692 tx_buf = &ring->tx_buf[j];
693 memset(tx_buf, 0, sizeof(*tx_buf));
695 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
696 dma_unmap_addr_set(tx_buf, dma_addr0,
698 dma_unmap_len_set(tx_buf, dma_len0,
701 txd.txd3 = mapped_addr;
702 txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
704 tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
705 tx_buf->flags |= MTK_TX_FLAGS_PAGE1;
706 dma_unmap_addr_set(tx_buf, dma_addr1,
708 dma_unmap_len_set(tx_buf, dma_len1,
711 if (!((i == (nr_frags - 1)) &&
712 (frag_map_size == frag_size))) {
713 mtk_set_txd_pdma(&txd,
715 memset(&txd, 0, sizeof(txd));
718 frag_size -= frag_map_size;
719 offset += frag_map_size;
724 /* set last segment */
726 txd.txd2 |= TX_DMA_LS1;
728 txd.txd2 |= TX_DMA_LS0;
729 mtk_set_txd_pdma(&txd, &ring->tx_dma[j]);
731 /* store skb to cleanup */
734 netdev_sent_queue(dev, skb->len);
735 skb_tx_timestamp(skb);
737 ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
738 /* make sure that all changes to the dma ring are flushed before we
742 atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
744 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
745 mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0);
750 j = ring->tx_next_idx;
751 for (i = 0; i < tx_num; i++) {
752 ptxd = &ring->tx_dma[j];
753 tx_buf = &ring->tx_buf[j];
756 mtk_txd_unmap(&dev->dev, tx_buf);
758 ptxd->txd2 = TX_DMA_DESP2_DEF;
759 j = NEXT_TX_DESP_IDX(j);
761 /* make sure that all changes to the dma ring are flushed before we
768 /* the qdma core needs scratch memory to be setup */
769 static int mtk_init_fq_dma(struct mtk_eth *eth)
771 dma_addr_t dma_addr, phy_ring_head, phy_ring_tail;
772 int cnt = eth->soc->dma_ring_size;
775 eth->scratch_ring = dma_alloc_coherent(eth->dev,
776 cnt * sizeof(struct mtk_tx_dma),
778 GFP_ATOMIC | __GFP_ZERO);
779 if (unlikely(!eth->scratch_ring))
782 eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE,
784 dma_addr = dma_map_single(eth->dev,
785 eth->scratch_head, cnt * QDMA_PAGE_SIZE,
787 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
790 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
791 phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1));
793 for (i = 0; i < cnt; i++) {
794 eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE));
796 eth->scratch_ring[i].txd2 = (phy_ring_head +
797 ((i + 1) * sizeof(struct mtk_tx_dma)));
798 eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE);
801 mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
802 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
803 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
804 mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
809 static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
811 void *ret = ring->tx_dma;
813 return ret + (desc - ring->tx_phys);
816 static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring,
817 struct mtk_tx_dma *txd)
819 return mtk_qdma_phys_to_virt(ring, txd->txd2);
822 static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
823 struct mtk_tx_dma *txd)
825 int idx = txd - ring->tx_dma;
827 return &ring->tx_buf[idx];
830 static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev,
831 int tx_num, struct mtk_tx_ring *ring, bool gso)
833 struct mtk_mac *mac = netdev_priv(dev);
834 struct mtk_eth *eth = mac->hw;
835 struct mtk_tx_dma *itxd, *txd;
836 struct mtk_tx_buf *tx_buf;
837 dma_addr_t mapped_addr;
838 unsigned int nr_frags;
840 u32 txd4 = eth->soc->txd4;
842 itxd = ring->tx_next_free;
843 if (itxd == ring->tx_last_free)
846 if (eth->soc->mac_count > 1)
847 txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
849 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
850 memset(tx_buf, 0, sizeof(*tx_buf));
855 /* TX Checksum offload */
856 if (skb->ip_summed == CHECKSUM_PARTIAL)
857 txd4 |= TX_DMA_CHKSUM;
859 /* VLAN header offload */
860 if (skb_vlan_tag_present(skb))
861 txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb);
863 mapped_addr = dma_map_single(&dev->dev, skb->data,
864 skb_headlen(skb), DMA_TO_DEVICE);
865 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
868 WRITE_ONCE(itxd->txd1, mapped_addr);
869 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
870 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
871 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
875 nr_frags = skb_shinfo(skb)->nr_frags;
876 for (i = 0; i < nr_frags; i++) {
877 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
878 unsigned int offset = 0;
879 int frag_size = skb_frag_size(frag);
882 bool last_frag = false;
883 unsigned int frag_map_size;
885 txd = mtk_tx_next_qdma(ring, txd);
886 if (txd == ring->tx_last_free)
890 frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
891 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
894 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
897 if (i == nr_frags - 1 &&
898 (frag_size - frag_map_size) == 0)
901 WRITE_ONCE(txd->txd1, mapped_addr);
902 WRITE_ONCE(txd->txd3, (QDMA_TX_SWC |
903 TX_DMA_PLEN0(frag_map_size) |
904 last_frag * TX_DMA_LS0) |
906 WRITE_ONCE(txd->txd4, 0);
908 tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
909 tx_buf = mtk_desc_to_tx_buf(ring, txd);
910 memset(tx_buf, 0, sizeof(*tx_buf));
912 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
913 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
914 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
915 frag_size -= frag_map_size;
916 offset += frag_map_size;
920 /* store skb to cleanup */
923 WRITE_ONCE(itxd->txd4, txd4);
924 WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
925 (!nr_frags * TX_DMA_LS0)));
927 netdev_sent_queue(dev, skb->len);
928 skb_tx_timestamp(skb);
930 ring->tx_next_free = mtk_tx_next_qdma(ring, txd);
931 atomic_sub(n_desc, &ring->tx_free_count);
933 /* make sure that all changes to the dma ring are flushed before we
938 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
939 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
945 tx_buf = mtk_desc_to_tx_buf(ring, txd);
948 mtk_txd_unmap(&dev->dev, tx_buf);
950 itxd->txd3 = TX_DMA_DESP2_DEF;
951 itxd = mtk_tx_next_qdma(ring, itxd);
952 } while (itxd != txd);
957 static inline int mtk_cal_txd_req(struct sk_buff *skb)
960 struct skb_frag_struct *frag;
963 if (skb_is_gso(skb)) {
964 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
965 frag = &skb_shinfo(skb)->frags[i];
966 nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
969 nfrags += skb_shinfo(skb)->nr_frags;
972 return DIV_ROUND_UP(nfrags, 2);
975 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
977 struct mtk_mac *mac = netdev_priv(dev);
978 struct mtk_eth *eth = mac->hw;
979 struct mtk_tx_ring *ring = ð->tx_ring;
980 struct net_device_stats *stats = &dev->stats;
985 tx_num = mtk_cal_txd_req(skb);
986 if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) {
987 netif_stop_queue(dev);
988 netif_err(eth, tx_queued, dev,
989 "Tx Ring full when queue awake!\n");
990 return NETDEV_TX_BUSY;
993 /* TSO: fill MSS info in tcp checksum field */
994 if (skb_is_gso(skb)) {
995 if (skb_cow_head(skb, 0)) {
996 netif_warn(eth, tx_err, dev,
997 "GSO expand head fail.\n");
1001 if (skb_shinfo(skb)->gso_type &
1002 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1004 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1008 if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0)
1011 stats->tx_packets++;
1012 stats->tx_bytes += len;
1014 if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) {
1015 netif_stop_queue(dev);
1017 if (unlikely(atomic_read(&ring->tx_free_count) >
1019 netif_wake_queue(dev);
1022 return NETDEV_TX_OK;
1025 stats->tx_dropped++;
1027 return NETDEV_TX_OK;
1030 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1031 struct mtk_eth *eth, u32 rx_intr)
1033 struct mtk_soc_data *soc = eth->soc;
1034 struct mtk_rx_ring *ring = ð->rx_ring[0];
1035 int idx = ring->rx_calc_idx;
1037 struct sk_buff *skb;
1038 u8 *data, *new_data;
1039 struct mtk_rx_dma *rxd, trxd;
1042 if (eth->soc->hw_features & NETIF_F_RXCSUM)
1043 checksum_bit = soc->checksum_bit;
1047 if (eth->soc->rx_2b_offset)
1052 while (done < budget) {
1053 struct net_device *netdev;
1054 unsigned int pktlen;
1055 dma_addr_t dma_addr;
1058 idx = NEXT_RX_DESP_IDX(idx);
1059 rxd = &ring->rx_dma[idx];
1060 data = ring->rx_data[idx];
1062 mtk_get_rxd(&trxd, rxd);
1063 if (!(trxd.rxd2 & RX_DMA_DONE))
1066 /* find out which mac the packet come from. values start at 1 */
1067 if (eth->soc->mac_count > 1) {
1068 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1071 if (mac < 0 || mac >= eth->soc->mac_count)
1075 netdev = eth->netdev[mac];
1077 /* alloc new buffer */
1078 new_data = napi_alloc_frag(ring->frag_size);
1079 if (unlikely(!new_data || !netdev)) {
1080 netdev->stats.rx_dropped++;
1083 dma_addr = dma_map_single(&netdev->dev,
1084 new_data + NET_SKB_PAD + pad,
1087 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
1088 skb_free_frag(new_data);
1093 skb = build_skb(data, ring->frag_size);
1094 if (unlikely(!skb)) {
1095 put_page(virt_to_head_page(new_data));
1098 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1100 dma_unmap_single(&netdev->dev, trxd.rxd1,
1101 ring->rx_buf_size, DMA_FROM_DEVICE);
1102 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1104 skb_put(skb, pktlen);
1105 if (trxd.rxd4 & checksum_bit)
1106 skb->ip_summed = CHECKSUM_UNNECESSARY;
1108 skb_checksum_none_assert(skb);
1109 skb->protocol = eth_type_trans(skb, netdev);
1111 netdev->stats.rx_packets++;
1112 netdev->stats.rx_bytes += pktlen;
1114 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1115 RX_DMA_VID(trxd.rxd3))
1116 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1117 RX_DMA_VID(trxd.rxd3));
1118 napi_gro_receive(napi, skb);
1120 ring->rx_data[idx] = new_data;
1121 rxd->rxd1 = (unsigned int)dma_addr;
1124 if (eth->soc->rx_sg_dma)
1125 rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
1127 rxd->rxd2 = RX_DMA_LSO;
1129 ring->rx_calc_idx = idx;
1130 /* make sure that all changes to the dma ring are flushed before
1134 if (eth->soc->dma_type == MTK_QDMA)
1135 mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0);
1137 mtk_reg_w32(eth, ring->rx_calc_idx,
1138 MTK_REG_RX_CALC_IDX0);
1143 mtk_irq_ack(eth, rx_intr);
1148 static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
1150 struct sk_buff *skb;
1151 struct mtk_tx_buf *tx_buf;
1154 struct mtk_tx_ring *ring = ð->tx_ring;
1155 unsigned int bytes = 0;
1157 idx = ring->tx_free_idx;
1158 hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0);
1160 while ((idx != hwidx) && budget) {
1161 tx_buf = &ring->tx_buf[idx];
1167 if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
1172 mtk_txd_unmap(eth->dev, tx_buf);
1173 idx = NEXT_TX_DESP_IDX(idx);
1175 ring->tx_free_idx = idx;
1176 atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
1178 /* read hw index again make sure no new tx packet */
1179 if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0))
1183 netdev_completed_queue(*eth->netdev, done, bytes);
1188 static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
1190 struct mtk_tx_ring *ring = ð->tx_ring;
1191 struct mtk_tx_dma *desc;
1192 struct sk_buff *skb;
1193 struct mtk_tx_buf *tx_buf;
1194 int total = 0, done[MTK_MAX_DEVS];
1195 unsigned int bytes[MTK_MAX_DEVS];
1199 memset(done, 0, sizeof(done));
1200 memset(bytes, 0, sizeof(bytes));
1202 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1203 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1205 desc = mtk_qdma_phys_to_virt(ring, cpu);
1207 while ((cpu != dma) && budget) {
1208 u32 next_cpu = desc->txd2;
1211 desc = mtk_tx_next_qdma(ring, desc);
1212 if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0)
1215 mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
1219 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1224 if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
1225 bytes[mac] += skb->len;
1229 mtk_txd_unmap(eth->dev, tx_buf);
1231 ring->tx_last_free->txd2 = next_cpu;
1232 ring->tx_last_free = desc;
1233 atomic_inc(&ring->tx_free_count);
1238 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1240 /* read hw index again make sure no new tx packet */
1241 if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
1244 for (i = 0; i < eth->soc->mac_count; i++) {
1247 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1254 static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr,
1257 struct mtk_tx_ring *ring = ð->tx_ring;
1258 struct net_device *netdev = eth->netdev[0];
1261 done = eth->tx_ring.tx_poll(eth, budget, tx_again);
1263 mtk_irq_ack(eth, tx_intr);
1269 if (unlikely(!netif_queue_stopped(netdev)))
1272 if (atomic_read(&ring->tx_free_count) > ring->tx_thresh)
1273 netif_wake_queue(netdev);
1278 static void mtk_stats_update(struct mtk_eth *eth)
1282 for (i = 0; i < eth->soc->mac_count; i++) {
1283 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1285 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
1286 mtk_stats_update_mac(eth->mac[i]);
1287 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
1292 static int mtk_poll(struct napi_struct *napi, int budget)
1294 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1295 u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr;
1296 int tx_done, rx_done;
1297 bool tx_again = false;
1299 status = mtk_irq_pending(eth);
1300 mtk_status = mtk_irq_pending_status(eth);
1301 tx_intr = eth->soc->tx_int;
1302 rx_intr = eth->soc->rx_int;
1303 status_intr = eth->soc->status_int;
1308 if (status & tx_intr)
1309 tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again);
1311 if (status & rx_intr)
1312 rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
1314 if (unlikely(mtk_status & status_intr)) {
1315 mtk_stats_update(eth);
1316 mtk_irq_ack_status(eth, status_intr);
1319 if (unlikely(netif_msg_intr(eth))) {
1320 mask = mtk_irq_enabled(eth);
1321 netdev_info(eth->netdev[0],
1322 "done tx %d, rx %d, intr 0x%08x/0x%x\n",
1323 tx_done, rx_done, status, mask);
1326 if (tx_again || rx_done == budget)
1329 status = mtk_irq_pending(eth);
1330 if (status & (tx_intr | rx_intr))
1333 napi_complete(napi);
1334 mtk_irq_enable(eth, tx_intr | rx_intr);
1339 static int mtk_pdma_tx_alloc(struct mtk_eth *eth)
1342 struct mtk_tx_ring *ring = ð->tx_ring;
1344 ring->tx_ring_size = eth->soc->dma_ring_size;
1345 ring->tx_free_idx = 0;
1346 ring->tx_next_idx = 0;
1347 ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
1350 ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
1356 dma_alloc_coherent(eth->dev,
1357 ring->tx_ring_size * sizeof(*ring->tx_dma),
1358 &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO);
1362 for (i = 0; i < ring->tx_ring_size; i++) {
1363 ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
1364 ring->tx_dma[i].txd4 = eth->soc->txd4;
1367 atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
1368 ring->tx_map = mtk_pdma_tx_map;
1369 ring->tx_poll = mtk_pdma_tx_poll;
1370 ring->tx_clean = mtk_pdma_tx_clean;
1372 /* make sure that all changes to the dma ring are flushed before we
1377 mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0);
1378 mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0);
1379 mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0);
1380 mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG);
1388 static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
1390 struct mtk_tx_ring *ring = ð->tx_ring;
1391 int i, sz = sizeof(*ring->tx_dma);
1393 ring->tx_ring_size = eth->soc->dma_ring_size;
1394 ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
1399 ring->tx_dma = dma_zalloc_coherent(eth->dev,
1400 ring->tx_ring_size * sz,
1402 GFP_ATOMIC | __GFP_ZERO);
1406 for (i = 0; i < ring->tx_ring_size; i++) {
1407 int next = (i + 1) % ring->tx_ring_size;
1408 u32 next_ptr = ring->tx_phys + next * sz;
1410 ring->tx_dma[i].txd2 = next_ptr;
1411 ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF;
1414 atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2);
1415 ring->tx_next_free = &ring->tx_dma[0];
1416 ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2];
1417 ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
1420 ring->tx_map = mtk_qdma_tx_map;
1421 ring->tx_poll = mtk_qdma_tx_poll;
1422 ring->tx_clean = mtk_qdma_tx_clean;
1424 /* make sure that all changes to the dma ring are flushed before we
1429 mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR);
1430 mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR);
1432 ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
1435 ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
1444 static int mtk_qdma_init(struct mtk_eth *eth, int ring)
1448 err = mtk_init_fq_dma(eth);
1452 err = mtk_qdma_tx_alloc_tx(eth);
1456 err = mtk_dma_rx_alloc(eth, ð->rx_ring[ring]);
1460 mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0);
1461 mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0);
1462 mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0);
1463 mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
1464 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1466 /* Enable random early drop and set drop threshold automatically */
1467 mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES);
1468 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1473 static int mtk_pdma_qdma_init(struct mtk_eth *eth)
1475 int err = mtk_qdma_init(eth, 1);
1480 err = mtk_dma_rx_alloc(eth, ð->rx_ring[0]);
1484 mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0);
1485 mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0);
1486 mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0);
1487 mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
1492 static int mtk_pdma_init(struct mtk_eth *eth)
1494 struct mtk_rx_ring *ring = ð->rx_ring[0];
1497 err = mtk_pdma_tx_alloc(eth);
1501 err = mtk_dma_rx_alloc(eth, ring);
1505 mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0);
1506 mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0);
1507 mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0);
1508 mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
1513 static void mtk_dma_free(struct mtk_eth *eth)
1517 for (i = 0; i < eth->soc->mac_count; i++)
1519 netdev_reset_queue(eth->netdev[i]);
1520 eth->tx_ring.tx_clean(eth);
1521 mtk_clean_rx(eth, ð->rx_ring[0]);
1522 mtk_clean_rx(eth, ð->rx_ring[1]);
1523 kfree(eth->scratch_head);
1526 static void mtk_tx_timeout(struct net_device *dev)
1528 struct mtk_mac *mac = netdev_priv(dev);
1529 struct mtk_eth *eth = mac->hw;
1530 struct mtk_tx_ring *ring = ð->tx_ring;
1532 eth->netdev[mac->id]->stats.tx_errors++;
1533 netif_err(eth, tx_err, dev,
1534 "transmit timed out\n");
1535 if (eth->soc->dma_type & MTK_PDMA) {
1536 netif_info(eth, drv, dev, "pdma_cfg:%08x\n",
1537 mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG));
1538 netif_info(eth, drv, dev,
1539 "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
1540 0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0),
1541 mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0),
1542 mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0),
1543 mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0),
1547 if (eth->soc->dma_type & MTK_QDMA) {
1548 netif_info(eth, drv, dev, "qdma_cfg:%08x\n",
1549 mtk_r32(eth, MTK_QDMA_GLO_CFG));
1550 netif_info(eth, drv, dev,
1551 "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
1552 0, mtk_r32(eth, MTK_QTX_CTX_PTR),
1553 mtk_r32(eth, MTK_QTX_DTX_PTR),
1554 mtk_r32(eth, MTK_QTX_CRX_PTR),
1555 mtk_r32(eth, MTK_QTX_DRX_PTR),
1556 atomic_read(&ring->tx_free_count));
1558 netif_info(eth, drv, dev,
1559 "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
1560 0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0),
1561 mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0),
1562 mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0),
1563 mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0));
1565 schedule_work(&mac->pending_work);
1568 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
1570 struct mtk_eth *eth = _eth;
1571 u32 status, int_mask;
1573 status = mtk_irq_pending(eth);
1574 if (unlikely(!status))
1577 int_mask = (eth->soc->rx_int | eth->soc->tx_int);
1578 if (likely(status & int_mask)) {
1579 if (likely(napi_schedule_prep(ð->rx_napi)))
1580 __napi_schedule(ð->rx_napi);
1582 mtk_irq_ack(eth, status);
1584 mtk_irq_disable(eth, int_mask);
1589 #ifdef CONFIG_NET_POLL_CONTROLLER
1590 static void mtk_poll_controller(struct net_device *dev)
1592 struct mtk_mac *mac = netdev_priv(dev);
1593 struct mtk_eth *eth = mac->hw;
1594 u32 int_mask = eth->soc->tx_int | eth->soc->rx_int;
1596 mtk_irq_disable(eth, int_mask);
1597 mtk_handle_irq(dev->irq, dev);
1598 mtk_irq_enable(eth, int_mask);
1602 int mtk_set_clock_cycle(struct mtk_eth *eth)
1604 unsigned long sysclk = eth->sysclk;
1606 sysclk /= MTK_US_CYC_CNT_DIVISOR;
1607 sysclk <<= MTK_US_CYC_CNT_SHIFT;
1609 mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) &
1610 ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) |
1616 void mtk_fwd_config(struct mtk_eth *eth)
1620 fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
1622 /* disable jumbo frame */
1623 if (eth->soc->jumbo_frame)
1624 fwd_cfg &= ~MTK_GDM1_JMB_EN;
1626 /* set unicast/multicast/broadcast frame to cpu */
1629 mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
1632 void mtk_csum_config(struct mtk_eth *eth)
1634 if (eth->soc->hw_features & NETIF_F_RXCSUM)
1635 mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) |
1636 (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
1639 mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) &
1640 ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
1642 if (eth->soc->hw_features & NETIF_F_IP_CSUM)
1643 mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) |
1644 (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
1647 mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) &
1648 ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
1652 static int mtk_start_dma(struct mtk_eth *eth)
1654 unsigned long flags;
1658 if (eth->soc->dma_type == MTK_PDMA)
1659 err = mtk_pdma_init(eth);
1660 else if (eth->soc->dma_type == MTK_QDMA)
1661 err = mtk_qdma_init(eth, 0);
1663 err = mtk_pdma_qdma_init(eth);
1669 spin_lock_irqsave(ð->page_lock, flags);
1671 val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN;
1672 if (eth->soc->rx_2b_offset)
1673 val |= MTK_RX_2B_OFFSET;
1674 val |= eth->soc->pdma_glo_cfg;
1676 if (eth->soc->dma_type & MTK_PDMA)
1677 mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG);
1679 if (eth->soc->dma_type & MTK_QDMA)
1680 mtk_w32(eth, val, MTK_QDMA_GLO_CFG);
1682 spin_unlock_irqrestore(ð->page_lock, flags);
1687 static int mtk_open(struct net_device *dev)
1689 struct mtk_mac *mac = netdev_priv(dev);
1690 struct mtk_eth *eth = mac->hw;
1692 dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
1694 if (!atomic_read(ð->dma_refcnt)) {
1695 int err = mtk_start_dma(eth);
1700 napi_enable(ð->rx_napi);
1701 mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int);
1703 atomic_inc(ð->dma_refcnt);
1706 eth->phy->start(mac);
1708 if (eth->soc->has_carrier && eth->soc->has_carrier(eth))
1709 netif_carrier_on(dev);
1711 netif_start_queue(dev);
1712 eth->soc->fwd_config(eth);
1717 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1719 unsigned long flags;
1723 /* stop the dma enfine */
1724 spin_lock_irqsave(ð->page_lock, flags);
1725 val = mtk_r32(eth, glo_cfg);
1726 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1728 spin_unlock_irqrestore(ð->page_lock, flags);
1730 /* wait for dma stop */
1731 for (i = 0; i < 10; i++) {
1732 val = mtk_r32(eth, glo_cfg);
1733 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1741 static int mtk_stop(struct net_device *dev)
1743 struct mtk_mac *mac = netdev_priv(dev);
1744 struct mtk_eth *eth = mac->hw;
1746 netif_tx_disable(dev);
1748 eth->phy->stop(mac);
1750 if (!atomic_dec_and_test(ð->dma_refcnt))
1753 mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
1754 napi_disable(ð->rx_napi);
1756 if (eth->soc->dma_type & MTK_PDMA)
1757 mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]);
1759 if (eth->soc->dma_type & MTK_QDMA)
1760 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1767 static int __init mtk_init_hw(struct mtk_eth *eth)
1771 eth->soc->reset_fe(eth);
1773 if (eth->soc->switch_init)
1774 if (eth->soc->switch_init(eth)) {
1775 dev_err(eth->dev, "failed to initialize switch core\n");
1779 err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
1780 dev_name(eth->dev), eth);
1784 err = mtk_mdio_init(eth);
1788 /* disable delay and normal interrupt */
1789 mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG);
1790 if (eth->soc->dma_type & MTK_QDMA)
1791 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1792 mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
1794 /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */
1795 if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
1796 for (i = 0; i < 16; i += 2)
1797 mtk_w32(eth, ((i + 1) << 16) + i,
1798 mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
1801 if (eth->soc->fwd_config(eth))
1802 dev_err(eth->dev, "unable to get clock\n");
1804 if (mtk_reg_table[MTK_REG_MTK_RST_GL]) {
1805 mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL);
1806 mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL);
1812 static int __init mtk_init(struct net_device *dev)
1814 struct mtk_mac *mac = netdev_priv(dev);
1815 struct mtk_eth *eth = mac->hw;
1816 struct device_node *port;
1817 const char *mac_addr;
1820 mac_addr = of_get_mac_address(mac->of_node);
1822 ether_addr_copy(dev->dev_addr, mac_addr);
1824 /* If the mac address is invalid, use random mac address */
1825 if (!is_valid_ether_addr(dev->dev_addr)) {
1826 eth_hw_addr_random(dev);
1827 dev_err(eth->dev, "generated random MAC address %pM\n",
1830 mac->hw->soc->set_mac(mac, dev->dev_addr);
1832 if (eth->soc->port_init)
1833 for_each_child_of_node(mac->of_node, port)
1834 if (of_device_is_compatible(port,
1835 "mediatek,eth-port") &&
1836 of_device_is_available(port))
1837 eth->soc->port_init(eth, mac, port);
1840 err = eth->phy->connect(mac);
1848 static void mtk_uninit(struct net_device *dev)
1850 struct mtk_mac *mac = netdev_priv(dev);
1851 struct mtk_eth *eth = mac->hw;
1854 eth->phy->disconnect(mac);
1855 mtk_mdio_cleanup(eth);
1857 mtk_irq_disable(eth, ~0);
1858 free_irq(dev->irq, dev);
1861 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1863 struct mtk_mac *mac = netdev_priv(dev);
1872 return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
1880 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
1882 struct mtk_mac *mac = netdev_priv(dev);
1883 struct mtk_eth *eth = mac->hw;
1884 int frag_size, old_mtu;
1887 if (!eth->soc->jumbo_frame)
1888 return eth_change_mtu(dev, new_mtu);
1890 frag_size = mtk_max_frag_size(new_mtu);
1891 if (new_mtu < 68 || frag_size > PAGE_SIZE)
1897 /* return early if the buffer sizes will not change */
1898 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1900 if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
1903 if (new_mtu <= ETH_DATA_LEN)
1904 eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN);
1906 eth->rx_ring[0].frag_size = PAGE_SIZE;
1907 eth->rx_ring[0].rx_buf_size =
1908 mtk_max_buf_size(eth->rx_ring[0].frag_size);
1910 if (!netif_running(dev))
1914 fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
1915 if (new_mtu <= ETH_DATA_LEN) {
1916 fwd_cfg &= ~MTK_GDM1_JMB_EN;
1918 fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT);
1919 fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
1920 MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN;
1922 mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
1924 return mtk_open(dev);
1927 static void mtk_pending_work(struct work_struct *work)
1929 struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
1930 struct mtk_eth *eth = mac->hw;
1931 struct net_device *dev = eth->netdev[mac->id];
1937 err = mtk_open(dev);
1939 netif_alert(eth, ifup, dev,
1940 "Driver up/down cycle failed, closing device.\n");
1946 static int mtk_cleanup(struct mtk_eth *eth)
1950 for (i = 0; i < eth->soc->mac_count; i++) {
1951 struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
1953 if (!eth->netdev[i])
1956 unregister_netdev(eth->netdev[i]);
1957 free_netdev(eth->netdev[i]);
1958 cancel_work_sync(&mac->pending_work);
1964 static const struct net_device_ops mtk_netdev_ops = {
1965 .ndo_init = mtk_init,
1966 .ndo_uninit = mtk_uninit,
1967 .ndo_open = mtk_open,
1968 .ndo_stop = mtk_stop,
1969 .ndo_start_xmit = mtk_start_xmit,
1970 .ndo_set_mac_address = mtk_set_mac_address,
1971 .ndo_validate_addr = eth_validate_addr,
1972 .ndo_do_ioctl = mtk_do_ioctl,
1973 .ndo_change_mtu = mtk_change_mtu,
1974 .ndo_tx_timeout = mtk_tx_timeout,
1975 .ndo_get_stats64 = mtk_get_stats64,
1976 .ndo_vlan_rx_add_vid = mtk_vlan_rx_add_vid,
1977 .ndo_vlan_rx_kill_vid = mtk_vlan_rx_kill_vid,
1978 #ifdef CONFIG_NET_POLL_CONTROLLER
1979 .ndo_poll_controller = mtk_poll_controller,
1983 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1985 struct mtk_mac *mac;
1986 const __be32 *_id = of_get_property(np, "reg", NULL);
1990 dev_err(eth->dev, "missing mac id\n");
1993 id = be32_to_cpup(_id);
1994 if (id >= eth->soc->mac_count || eth->netdev[id]) {
1995 dev_err(eth->dev, "%d is not a valid mac id\n", id);
1999 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2000 if (!eth->netdev[id]) {
2001 dev_err(eth->dev, "alloc_etherdev failed\n");
2004 mac = netdev_priv(eth->netdev[id]);
2009 INIT_WORK(&mac->pending_work, mtk_pending_work);
2011 if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) {
2012 mac->hw_stats = devm_kzalloc(eth->dev,
2013 sizeof(*mac->hw_stats),
2015 if (!mac->hw_stats) {
2019 spin_lock_init(&mac->hw_stats->stats_lock);
2020 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2023 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2024 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2025 eth->netdev[id]->base_addr = (unsigned long)eth->base;
2027 if (eth->soc->init_data)
2028 eth->soc->init_data(eth->soc, eth->netdev[id]);
2030 eth->netdev[id]->vlan_features = eth->soc->hw_features &
2031 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2032 eth->netdev[id]->features |= eth->soc->hw_features;
2034 if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
2035 eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2037 mtk_set_ethtool_ops(eth->netdev[id]);
2039 err = register_netdev(eth->netdev[id]);
2041 dev_err(eth->dev, "error bringing up device\n");
2045 eth->netdev[id]->irq = eth->irq;
2046 netif_info(eth, probe, eth->netdev[id],
2047 "mediatek frame engine at 0x%08lx, irq %d\n",
2048 eth->netdev[id]->base_addr, eth->netdev[id]->irq);
2053 free_netdev(eth->netdev[id]);
2057 static int mtk_probe(struct platform_device *pdev)
2059 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2060 const struct of_device_id *match;
2061 struct device_node *mac_np;
2062 struct mtk_soc_data *soc;
2063 struct mtk_eth *eth;
2067 device_reset(&pdev->dev);
2069 match = of_match_device(of_mtk_match, &pdev->dev);
2070 soc = (struct mtk_soc_data *)match->data;
2073 mtk_reg_table = soc->reg_table;
2075 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2079 eth->base = devm_ioremap_resource(&pdev->dev, res);
2080 if (IS_ERR(eth->base))
2081 return PTR_ERR(eth->base);
2083 spin_lock_init(ð->page_lock);
2085 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2087 if (IS_ERR(eth->ethsys))
2088 return PTR_ERR(eth->ethsys);
2090 eth->irq = platform_get_irq(pdev, 0);
2092 dev_err(&pdev->dev, "no IRQ resource found\n");
2096 sysclk = devm_clk_get(&pdev->dev, NULL);
2097 if (IS_ERR(sysclk)) {
2099 "the clock is not defined in the devicetree\n");
2102 eth->sysclk = clk_get_rate(sysclk);
2104 eth->switch_np = of_parse_phandle(pdev->dev.of_node,
2105 "mediatek,switch", 0);
2106 if (soc->has_switch && !eth->switch_np) {
2107 dev_err(&pdev->dev, "failed to read switch phandle\n");
2111 eth->dev = &pdev->dev;
2113 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2115 err = mtk_init_hw(eth);
2119 if (eth->soc->mac_count > 1) {
2120 for_each_child_of_node(pdev->dev.of_node, mac_np) {
2121 if (!of_device_is_compatible(mac_np,
2122 "mediatek,eth-mac"))
2125 if (!of_device_is_available(mac_np))
2128 err = mtk_add_mac(eth, mac_np);
2133 init_dummy_netdev(ð->dummy_dev);
2134 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_poll,
2137 err = mtk_add_mac(eth, pdev->dev.of_node);
2140 netif_napi_add(eth->netdev[0], ð->rx_napi, mtk_poll,
2144 platform_set_drvdata(pdev, eth);
2153 static int mtk_remove(struct platform_device *pdev)
2155 struct mtk_eth *eth = platform_get_drvdata(pdev);
2157 netif_napi_del(ð->rx_napi);
2159 platform_set_drvdata(pdev, NULL);
2164 static struct platform_driver mtk_driver = {
2166 .remove = mtk_remove,
2168 .name = "mtk_soc_eth",
2169 .of_match_table = of_mtk_match,
2173 module_platform_driver(mtk_driver);
2175 MODULE_LICENSE("GPL");
2176 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
2177 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");