1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
6 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
9 #include <linux/of_device.h>
10 #include <linux/of_mdio.h>
11 #include <linux/of_net.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/regmap.h>
14 #include <linux/clk.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/if_vlan.h>
17 #include <linux/reset.h>
18 #include <linux/tcp.h>
19 #include <linux/interrupt.h>
20 #include <linux/pinctrl/devinfo.h>
21 #include <linux/phylink.h>
24 #include "mtk_eth_soc.h"
26 static int mtk_msg_level = -1;
27 module_param_named(msg_level, mtk_msg_level, int, 0);
28 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
30 #define MTK_ETHTOOL_STAT(x) { #x, \
31 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
33 /* strings used by ethtool */
34 static const struct mtk_ethtool_stats {
35 char str[ETH_GSTRING_LEN];
37 } mtk_ethtool_stats[] = {
38 MTK_ETHTOOL_STAT(tx_bytes),
39 MTK_ETHTOOL_STAT(tx_packets),
40 MTK_ETHTOOL_STAT(tx_skip),
41 MTK_ETHTOOL_STAT(tx_collisions),
42 MTK_ETHTOOL_STAT(rx_bytes),
43 MTK_ETHTOOL_STAT(rx_packets),
44 MTK_ETHTOOL_STAT(rx_overflow),
45 MTK_ETHTOOL_STAT(rx_fcs_errors),
46 MTK_ETHTOOL_STAT(rx_short_errors),
47 MTK_ETHTOOL_STAT(rx_long_errors),
48 MTK_ETHTOOL_STAT(rx_checksum_errors),
49 MTK_ETHTOOL_STAT(rx_flow_control_packets),
52 static const char * const mtk_clks_source_name[] = {
53 "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
54 "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
55 "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
56 "sgmii_ck", "eth2pll",
59 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
61 __raw_writel(val, eth->base + reg);
64 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
66 return __raw_readl(eth->base + reg);
69 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
73 val = mtk_r32(eth, reg);
76 mtk_w32(eth, val, reg);
80 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
82 unsigned long t_start = jiffies;
85 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
87 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
92 dev_err(eth->dev, "mdio: MDIO timeout\n");
96 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
97 u32 phy_register, u32 write_data)
99 if (mtk_mdio_busy_wait(eth))
102 write_data &= 0xffff;
104 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
105 (phy_register << PHY_IAC_REG_SHIFT) |
106 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
109 if (mtk_mdio_busy_wait(eth))
115 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
119 if (mtk_mdio_busy_wait(eth))
122 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
123 (phy_reg << PHY_IAC_REG_SHIFT) |
124 (phy_addr << PHY_IAC_ADDR_SHIFT),
127 if (mtk_mdio_busy_wait(eth))
130 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
135 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
136 int phy_reg, u16 val)
138 struct mtk_eth *eth = bus->priv;
140 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
143 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
145 struct mtk_eth *eth = bus->priv;
147 return _mtk_mdio_read(eth, phy_addr, phy_reg);
150 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
151 phy_interface_t interface)
155 /* Check DDR memory type.
156 * Currently TRGMII mode with DDR2 memory is not supported.
158 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
159 if (interface == PHY_INTERFACE_MODE_TRGMII &&
160 val & SYSCFG_DRAM_TYPE_DDR2) {
162 "TRGMII mode with DDR2 memory is not supported!\n");
166 val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
167 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
169 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
170 ETHSYS_TRGMII_MT7621_MASK, val);
175 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
176 phy_interface_t interface, int speed)
181 if (interface == PHY_INTERFACE_MODE_TRGMII) {
182 mtk_w32(eth, TRGMII_MODE, INTF_MODE);
184 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
186 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
190 val = (speed == SPEED_1000) ?
191 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
192 mtk_w32(eth, val, INTF_MODE);
194 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
195 ETHSYS_TRGMII_CLK_SEL362_5,
196 ETHSYS_TRGMII_CLK_SEL362_5);
198 val = (speed == SPEED_1000) ? 250000000 : 500000000;
199 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
201 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
203 val = (speed == SPEED_1000) ?
204 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
205 mtk_w32(eth, val, TRGMII_RCK_CTRL);
207 val = (speed == SPEED_1000) ?
208 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
209 mtk_w32(eth, val, TRGMII_TCK_CTRL);
212 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
213 const struct phylink_link_state *state)
215 struct mtk_mac *mac = container_of(config, struct mtk_mac,
217 struct mtk_eth *eth = mac->hw;
218 u32 mcr_cur, mcr_new, sid, i;
219 int val, ge_mode, err;
221 /* MT76x8 has no hardware settings between for the MAC */
222 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
223 mac->interface != state->interface) {
224 /* Setup soc pin functions */
225 switch (state->interface) {
226 case PHY_INTERFACE_MODE_TRGMII:
229 if (!MTK_HAS_CAPS(mac->hw->soc->caps,
233 case PHY_INTERFACE_MODE_RGMII_TXID:
234 case PHY_INTERFACE_MODE_RGMII_RXID:
235 case PHY_INTERFACE_MODE_RGMII_ID:
236 case PHY_INTERFACE_MODE_RGMII:
237 case PHY_INTERFACE_MODE_MII:
238 case PHY_INTERFACE_MODE_REVMII:
239 case PHY_INTERFACE_MODE_RMII:
240 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
241 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
246 case PHY_INTERFACE_MODE_1000BASEX:
247 case PHY_INTERFACE_MODE_2500BASEX:
248 case PHY_INTERFACE_MODE_SGMII:
249 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
250 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
255 case PHY_INTERFACE_MODE_GMII:
256 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
257 err = mtk_gmac_gephy_path_setup(eth, mac->id);
266 /* Setup clock for 1st gmac */
267 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
268 !phy_interface_mode_is_8023z(state->interface) &&
269 MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
270 if (MTK_HAS_CAPS(mac->hw->soc->caps,
271 MTK_TRGMII_MT7621_CLK)) {
272 if (mt7621_gmac0_rgmii_adjust(mac->hw,
276 mtk_gmac0_rgmii_adjust(mac->hw,
280 /* mt7623_pad_clk_setup */
281 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
283 TD_DM_DRVP(8) | TD_DM_DRVN(8),
286 /* Assert/release MT7623 RXC reset */
287 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
289 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
294 switch (state->interface) {
295 case PHY_INTERFACE_MODE_MII:
296 case PHY_INTERFACE_MODE_GMII:
299 case PHY_INTERFACE_MODE_REVMII:
302 case PHY_INTERFACE_MODE_RMII:
311 /* put the gmac into the right mode */
312 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
313 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
314 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
315 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
317 mac->interface = state->interface;
321 if (state->interface == PHY_INTERFACE_MODE_SGMII ||
322 phy_interface_mode_is_8023z(state->interface)) {
323 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
326 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
328 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
330 ~(u32)SYSCFG0_SGMII_MASK);
332 /* Decide how GMAC and SGMIISYS be mapped */
333 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
336 /* Setup SGMIISYS with the determined property */
337 if (state->interface != PHY_INTERFACE_MODE_SGMII)
338 err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
340 else if (phylink_autoneg_inband(mode))
341 err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
346 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
347 SYSCFG0_SGMII_MASK, val);
348 } else if (phylink_autoneg_inband(mode)) {
350 "In-band mode not supported in non SGMII mode!\n");
355 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
357 mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
358 MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
360 /* Only update control register when needed! */
361 if (mcr_new != mcr_cur)
362 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
367 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
368 mac->id, phy_modes(state->interface));
372 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
373 mac->id, phy_modes(state->interface), err);
376 static void mtk_mac_pcs_get_state(struct phylink_config *config,
377 struct phylink_link_state *state)
379 struct mtk_mac *mac = container_of(config, struct mtk_mac,
381 u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
383 state->link = (pmsr & MAC_MSR_LINK);
384 state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
386 switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
388 state->speed = SPEED_10;
390 case MAC_MSR_SPEED_100:
391 state->speed = SPEED_100;
393 case MAC_MSR_SPEED_1000:
394 state->speed = SPEED_1000;
397 state->speed = SPEED_UNKNOWN;
401 state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
402 if (pmsr & MAC_MSR_RX_FC)
403 state->pause |= MLO_PAUSE_RX;
404 if (pmsr & MAC_MSR_TX_FC)
405 state->pause |= MLO_PAUSE_TX;
408 static void mtk_mac_an_restart(struct phylink_config *config)
410 struct mtk_mac *mac = container_of(config, struct mtk_mac,
413 mtk_sgmii_restart_an(mac->hw, mac->id);
416 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
417 phy_interface_t interface)
419 struct mtk_mac *mac = container_of(config, struct mtk_mac,
421 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
423 mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
424 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
427 static void mtk_mac_link_up(struct phylink_config *config,
428 struct phy_device *phy,
429 unsigned int mode, phy_interface_t interface,
430 int speed, int duplex, bool tx_pause, bool rx_pause)
432 struct mtk_mac *mac = container_of(config, struct mtk_mac,
434 u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
436 mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
437 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
438 MAC_MCR_FORCE_RX_FC);
440 /* Configure speed */
444 mcr |= MAC_MCR_SPEED_1000;
447 mcr |= MAC_MCR_SPEED_100;
451 /* Configure duplex */
452 if (duplex == DUPLEX_FULL)
453 mcr |= MAC_MCR_FORCE_DPX;
455 /* Configure pause modes - phylink will avoid these for half duplex */
457 mcr |= MAC_MCR_FORCE_TX_FC;
459 mcr |= MAC_MCR_FORCE_RX_FC;
461 mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
462 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
465 static void mtk_validate(struct phylink_config *config,
466 unsigned long *supported,
467 struct phylink_link_state *state)
469 struct mtk_mac *mac = container_of(config, struct mtk_mac,
471 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
473 if (state->interface != PHY_INTERFACE_MODE_NA &&
474 state->interface != PHY_INTERFACE_MODE_MII &&
475 state->interface != PHY_INTERFACE_MODE_GMII &&
476 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
477 phy_interface_mode_is_rgmii(state->interface)) &&
478 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
479 !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
480 !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
481 (state->interface == PHY_INTERFACE_MODE_SGMII ||
482 phy_interface_mode_is_8023z(state->interface)))) {
483 linkmode_zero(supported);
487 phylink_set_port_modes(mask);
488 phylink_set(mask, Autoneg);
490 switch (state->interface) {
491 case PHY_INTERFACE_MODE_TRGMII:
492 phylink_set(mask, 1000baseT_Full);
494 case PHY_INTERFACE_MODE_1000BASEX:
495 case PHY_INTERFACE_MODE_2500BASEX:
496 phylink_set(mask, 1000baseX_Full);
497 phylink_set(mask, 2500baseX_Full);
499 case PHY_INTERFACE_MODE_GMII:
500 case PHY_INTERFACE_MODE_RGMII:
501 case PHY_INTERFACE_MODE_RGMII_ID:
502 case PHY_INTERFACE_MODE_RGMII_RXID:
503 case PHY_INTERFACE_MODE_RGMII_TXID:
504 phylink_set(mask, 1000baseT_Half);
506 case PHY_INTERFACE_MODE_SGMII:
507 phylink_set(mask, 1000baseT_Full);
508 phylink_set(mask, 1000baseX_Full);
510 case PHY_INTERFACE_MODE_MII:
511 case PHY_INTERFACE_MODE_RMII:
512 case PHY_INTERFACE_MODE_REVMII:
513 case PHY_INTERFACE_MODE_NA:
515 phylink_set(mask, 10baseT_Half);
516 phylink_set(mask, 10baseT_Full);
517 phylink_set(mask, 100baseT_Half);
518 phylink_set(mask, 100baseT_Full);
522 if (state->interface == PHY_INTERFACE_MODE_NA) {
523 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
524 phylink_set(mask, 1000baseT_Full);
525 phylink_set(mask, 1000baseX_Full);
526 phylink_set(mask, 2500baseX_Full);
528 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
529 phylink_set(mask, 1000baseT_Full);
530 phylink_set(mask, 1000baseT_Half);
531 phylink_set(mask, 1000baseX_Full);
533 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
534 phylink_set(mask, 1000baseT_Full);
535 phylink_set(mask, 1000baseT_Half);
539 phylink_set(mask, Pause);
540 phylink_set(mask, Asym_Pause);
542 linkmode_and(supported, supported, mask);
543 linkmode_and(state->advertising, state->advertising, mask);
545 /* We can only operate at 2500BaseX or 1000BaseX. If requested
546 * to advertise both, only report advertising at 2500BaseX.
548 phylink_helper_basex_speed(state);
551 static const struct phylink_mac_ops mtk_phylink_ops = {
552 .validate = mtk_validate,
553 .mac_pcs_get_state = mtk_mac_pcs_get_state,
554 .mac_an_restart = mtk_mac_an_restart,
555 .mac_config = mtk_mac_config,
556 .mac_link_down = mtk_mac_link_down,
557 .mac_link_up = mtk_mac_link_up,
560 static int mtk_mdio_init(struct mtk_eth *eth)
562 struct device_node *mii_np;
565 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
567 dev_err(eth->dev, "no %s child node found", "mdio-bus");
571 if (!of_device_is_available(mii_np)) {
576 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
582 eth->mii_bus->name = "mdio";
583 eth->mii_bus->read = mtk_mdio_read;
584 eth->mii_bus->write = mtk_mdio_write;
585 eth->mii_bus->priv = eth;
586 eth->mii_bus->parent = eth->dev;
588 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
589 ret = of_mdiobus_register(eth->mii_bus, mii_np);
596 static void mtk_mdio_cleanup(struct mtk_eth *eth)
601 mdiobus_unregister(eth->mii_bus);
604 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
609 spin_lock_irqsave(ð->tx_irq_lock, flags);
610 val = mtk_r32(eth, eth->tx_int_mask_reg);
611 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
612 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
615 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
620 spin_lock_irqsave(ð->tx_irq_lock, flags);
621 val = mtk_r32(eth, eth->tx_int_mask_reg);
622 mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
623 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
626 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
631 spin_lock_irqsave(ð->rx_irq_lock, flags);
632 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
633 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
634 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
637 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
642 spin_lock_irqsave(ð->rx_irq_lock, flags);
643 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
644 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
645 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
648 static int mtk_set_mac_address(struct net_device *dev, void *p)
650 int ret = eth_mac_addr(dev, p);
651 struct mtk_mac *mac = netdev_priv(dev);
652 struct mtk_eth *eth = mac->hw;
653 const char *macaddr = dev->dev_addr;
658 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
661 spin_lock_bh(&mac->hw->page_lock);
662 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
663 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
664 MT7628_SDM_MAC_ADRH);
665 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
666 (macaddr[4] << 8) | macaddr[5],
667 MT7628_SDM_MAC_ADRL);
669 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
670 MTK_GDMA_MAC_ADRH(mac->id));
671 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
672 (macaddr[4] << 8) | macaddr[5],
673 MTK_GDMA_MAC_ADRL(mac->id));
675 spin_unlock_bh(&mac->hw->page_lock);
680 void mtk_stats_update_mac(struct mtk_mac *mac)
682 struct mtk_hw_stats *hw_stats = mac->hw_stats;
683 unsigned int base = MTK_GDM1_TX_GBCNT;
686 base += hw_stats->reg_offset;
688 u64_stats_update_begin(&hw_stats->syncp);
690 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
691 stats = mtk_r32(mac->hw, base + 0x04);
693 hw_stats->rx_bytes += (stats << 32);
694 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
695 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
696 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
697 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
698 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
699 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
700 hw_stats->rx_flow_control_packets +=
701 mtk_r32(mac->hw, base + 0x24);
702 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
703 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
704 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
705 stats = mtk_r32(mac->hw, base + 0x34);
707 hw_stats->tx_bytes += (stats << 32);
708 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
709 u64_stats_update_end(&hw_stats->syncp);
712 static void mtk_stats_update(struct mtk_eth *eth)
716 for (i = 0; i < MTK_MAC_COUNT; i++) {
717 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
719 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
720 mtk_stats_update_mac(eth->mac[i]);
721 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
726 static void mtk_get_stats64(struct net_device *dev,
727 struct rtnl_link_stats64 *storage)
729 struct mtk_mac *mac = netdev_priv(dev);
730 struct mtk_hw_stats *hw_stats = mac->hw_stats;
733 if (netif_running(dev) && netif_device_present(dev)) {
734 if (spin_trylock_bh(&hw_stats->stats_lock)) {
735 mtk_stats_update_mac(mac);
736 spin_unlock_bh(&hw_stats->stats_lock);
741 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
742 storage->rx_packets = hw_stats->rx_packets;
743 storage->tx_packets = hw_stats->tx_packets;
744 storage->rx_bytes = hw_stats->rx_bytes;
745 storage->tx_bytes = hw_stats->tx_bytes;
746 storage->collisions = hw_stats->tx_collisions;
747 storage->rx_length_errors = hw_stats->rx_short_errors +
748 hw_stats->rx_long_errors;
749 storage->rx_over_errors = hw_stats->rx_overflow;
750 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
751 storage->rx_errors = hw_stats->rx_checksum_errors;
752 storage->tx_aborted_errors = hw_stats->tx_skip;
753 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
755 storage->tx_errors = dev->stats.tx_errors;
756 storage->rx_dropped = dev->stats.rx_dropped;
757 storage->tx_dropped = dev->stats.tx_dropped;
760 static inline int mtk_max_frag_size(int mtu)
762 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
763 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
764 mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
766 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
767 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
770 static inline int mtk_max_buf_size(int frag_size)
772 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
773 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
775 WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
780 static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
781 struct mtk_rx_dma *dma_rxd)
783 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
784 if (!(rxd->rxd2 & RX_DMA_DONE))
787 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
788 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
789 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
794 /* the qdma core needs scratch memory to be setup */
795 static int mtk_init_fq_dma(struct mtk_eth *eth)
797 dma_addr_t phy_ring_tail;
798 int cnt = MTK_DMA_SIZE;
802 eth->scratch_ring = dma_alloc_coherent(eth->dev,
803 cnt * sizeof(struct mtk_tx_dma),
804 ð->phy_scratch_ring,
806 if (unlikely(!eth->scratch_ring))
809 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
811 if (unlikely(!eth->scratch_head))
814 dma_addr = dma_map_single(eth->dev,
815 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
817 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
820 phy_ring_tail = eth->phy_scratch_ring +
821 (sizeof(struct mtk_tx_dma) * (cnt - 1));
823 for (i = 0; i < cnt; i++) {
824 eth->scratch_ring[i].txd1 =
825 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
827 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
828 ((i + 1) * sizeof(struct mtk_tx_dma)));
829 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
832 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
833 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
834 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
835 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
840 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
842 void *ret = ring->dma;
844 return ret + (desc - ring->phys);
847 static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
848 struct mtk_tx_dma *txd)
850 int idx = txd - ring->dma;
852 return &ring->buf[idx];
855 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
856 struct mtk_tx_dma *dma)
858 return ring->dma_pdma - ring->dma + dma;
861 static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
863 return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
866 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
869 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
870 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
871 dma_unmap_single(eth->dev,
872 dma_unmap_addr(tx_buf, dma_addr0),
873 dma_unmap_len(tx_buf, dma_len0),
875 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
876 dma_unmap_page(eth->dev,
877 dma_unmap_addr(tx_buf, dma_addr0),
878 dma_unmap_len(tx_buf, dma_len0),
882 if (dma_unmap_len(tx_buf, dma_len0)) {
883 dma_unmap_page(eth->dev,
884 dma_unmap_addr(tx_buf, dma_addr0),
885 dma_unmap_len(tx_buf, dma_len0),
889 if (dma_unmap_len(tx_buf, dma_len1)) {
890 dma_unmap_page(eth->dev,
891 dma_unmap_addr(tx_buf, dma_addr1),
892 dma_unmap_len(tx_buf, dma_len1),
899 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
901 napi_consume_skb(tx_buf->skb, napi);
903 dev_kfree_skb_any(tx_buf->skb);
908 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
909 struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
910 size_t size, int idx)
912 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
913 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
914 dma_unmap_len_set(tx_buf, dma_len0, size);
917 txd->txd3 = mapped_addr;
918 txd->txd2 |= TX_DMA_PLEN1(size);
919 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
920 dma_unmap_len_set(tx_buf, dma_len1, size);
922 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
923 txd->txd1 = mapped_addr;
924 txd->txd2 = TX_DMA_PLEN0(size);
925 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
926 dma_unmap_len_set(tx_buf, dma_len0, size);
931 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
932 int tx_num, struct mtk_tx_ring *ring, bool gso)
934 struct mtk_mac *mac = netdev_priv(dev);
935 struct mtk_eth *eth = mac->hw;
936 struct mtk_tx_dma *itxd, *txd;
937 struct mtk_tx_dma *itxd_pdma, *txd_pdma;
938 struct mtk_tx_buf *itx_buf, *tx_buf;
939 dma_addr_t mapped_addr;
940 unsigned int nr_frags;
945 itxd = ring->next_free;
946 itxd_pdma = qdma_to_pdma(ring, itxd);
947 if (itxd == ring->last_free)
950 /* set the forward port */
951 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
954 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
955 memset(itx_buf, 0, sizeof(*itx_buf));
960 /* TX Checksum offload */
961 if (skb->ip_summed == CHECKSUM_PARTIAL)
962 txd4 |= TX_DMA_CHKSUM;
964 /* VLAN header offload */
965 if (skb_vlan_tag_present(skb))
966 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
968 mapped_addr = dma_map_single(eth->dev, skb->data,
969 skb_headlen(skb), DMA_TO_DEVICE);
970 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
973 WRITE_ONCE(itxd->txd1, mapped_addr);
974 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
975 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
977 setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
982 txd_pdma = qdma_to_pdma(ring, txd);
983 nr_frags = skb_shinfo(skb)->nr_frags;
985 for (i = 0; i < nr_frags; i++) {
986 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
987 unsigned int offset = 0;
988 int frag_size = skb_frag_size(frag);
991 bool last_frag = false;
992 unsigned int frag_map_size;
993 bool new_desc = true;
995 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
997 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
998 txd_pdma = qdma_to_pdma(ring, txd);
999 if (txd == ring->last_free)
1008 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
1009 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
1012 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
1015 if (i == nr_frags - 1 &&
1016 (frag_size - frag_map_size) == 0)
1019 WRITE_ONCE(txd->txd1, mapped_addr);
1020 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
1021 TX_DMA_PLEN0(frag_map_size) |
1022 last_frag * TX_DMA_LS0));
1023 WRITE_ONCE(txd->txd4, fport);
1025 tx_buf = mtk_desc_to_tx_buf(ring, txd);
1027 memset(tx_buf, 0, sizeof(*tx_buf));
1028 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1029 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1030 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1031 MTK_TX_FLAGS_FPORT1;
1033 setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
1034 frag_map_size, k++);
1036 frag_size -= frag_map_size;
1037 offset += frag_map_size;
1041 /* store skb to cleanup */
1044 WRITE_ONCE(itxd->txd4, txd4);
1045 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
1046 (!nr_frags * TX_DMA_LS0)));
1047 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1049 txd_pdma->txd2 |= TX_DMA_LS0;
1051 txd_pdma->txd2 |= TX_DMA_LS1;
1054 netdev_sent_queue(dev, skb->len);
1055 skb_tx_timestamp(skb);
1057 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1058 atomic_sub(n_desc, &ring->free_count);
1060 /* make sure that all changes to the dma ring are flushed before we
1065 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1066 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1067 !netdev_xmit_more())
1068 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1070 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
1072 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1079 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
1082 mtk_tx_unmap(eth, tx_buf, false);
1084 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1085 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1086 itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1088 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1089 itxd_pdma = qdma_to_pdma(ring, itxd);
1090 } while (itxd != txd);
1095 static inline int mtk_cal_txd_req(struct sk_buff *skb)
1101 if (skb_is_gso(skb)) {
1102 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1103 frag = &skb_shinfo(skb)->frags[i];
1104 nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1105 MTK_TX_DMA_BUF_LEN);
1108 nfrags += skb_shinfo(skb)->nr_frags;
1114 static int mtk_queue_stopped(struct mtk_eth *eth)
1118 for (i = 0; i < MTK_MAC_COUNT; i++) {
1119 if (!eth->netdev[i])
1121 if (netif_queue_stopped(eth->netdev[i]))
1128 static void mtk_wake_queue(struct mtk_eth *eth)
1132 for (i = 0; i < MTK_MAC_COUNT; i++) {
1133 if (!eth->netdev[i])
1135 netif_wake_queue(eth->netdev[i]);
1139 static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1141 struct mtk_mac *mac = netdev_priv(dev);
1142 struct mtk_eth *eth = mac->hw;
1143 struct mtk_tx_ring *ring = ð->tx_ring;
1144 struct net_device_stats *stats = &dev->stats;
1148 /* normally we can rely on the stack not calling this more than once,
1149 * however we have 2 queues running on the same ring so we need to lock
1152 spin_lock(ð->page_lock);
1154 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1157 tx_num = mtk_cal_txd_req(skb);
1158 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1159 netif_stop_queue(dev);
1160 netif_err(eth, tx_queued, dev,
1161 "Tx Ring full when queue awake!\n");
1162 spin_unlock(ð->page_lock);
1163 return NETDEV_TX_BUSY;
1166 /* TSO: fill MSS info in tcp checksum field */
1167 if (skb_is_gso(skb)) {
1168 if (skb_cow_head(skb, 0)) {
1169 netif_warn(eth, tx_err, dev,
1170 "GSO expand head fail.\n");
1174 if (skb_shinfo(skb)->gso_type &
1175 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1177 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1181 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1184 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1185 netif_stop_queue(dev);
1187 spin_unlock(ð->page_lock);
1189 return NETDEV_TX_OK;
1192 spin_unlock(ð->page_lock);
1193 stats->tx_dropped++;
1194 dev_kfree_skb_any(skb);
1195 return NETDEV_TX_OK;
1198 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1201 struct mtk_rx_ring *ring;
1205 return ð->rx_ring[0];
1207 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1208 ring = ð->rx_ring[i];
1209 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1210 if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
1211 ring->calc_idx_update = true;
1219 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1221 struct mtk_rx_ring *ring;
1225 ring = ð->rx_ring[0];
1226 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1228 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1229 ring = ð->rx_ring[i];
1230 if (ring->calc_idx_update) {
1231 ring->calc_idx_update = false;
1232 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1238 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1239 struct mtk_eth *eth)
1241 struct dim_sample dim_sample = {};
1242 struct mtk_rx_ring *ring;
1244 struct sk_buff *skb;
1245 u8 *data, *new_data;
1246 struct mtk_rx_dma *rxd, trxd;
1247 int done = 0, bytes = 0;
1249 while (done < budget) {
1250 struct net_device *netdev;
1251 unsigned int pktlen;
1252 dma_addr_t dma_addr;
1255 ring = mtk_get_rx_ring(eth);
1256 if (unlikely(!ring))
1259 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1260 rxd = &ring->dma[idx];
1261 data = ring->data[idx];
1263 if (!mtk_rx_get_desc(&trxd, rxd))
1266 /* find out which mac the packet come from. values start at 1 */
1267 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
1268 (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
1271 mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1272 RX_DMA_FPORT_MASK) - 1;
1274 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1278 netdev = eth->netdev[mac];
1280 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1283 /* alloc new buffer */
1284 new_data = napi_alloc_frag(ring->frag_size);
1285 if (unlikely(!new_data)) {
1286 netdev->stats.rx_dropped++;
1289 dma_addr = dma_map_single(eth->dev,
1290 new_data + NET_SKB_PAD +
1294 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1295 skb_free_frag(new_data);
1296 netdev->stats.rx_dropped++;
1300 dma_unmap_single(eth->dev, trxd.rxd1,
1301 ring->buf_size, DMA_FROM_DEVICE);
1304 skb = build_skb(data, ring->frag_size);
1305 if (unlikely(!skb)) {
1306 skb_free_frag(data);
1307 netdev->stats.rx_dropped++;
1310 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1312 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1314 skb_put(skb, pktlen);
1315 if (trxd.rxd4 & eth->rx_dma_l4_valid)
1316 skb->ip_summed = CHECKSUM_UNNECESSARY;
1318 skb_checksum_none_assert(skb);
1319 skb->protocol = eth_type_trans(skb, netdev);
1322 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1323 (trxd.rxd2 & RX_DMA_VTAG))
1324 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1325 RX_DMA_VID(trxd.rxd3));
1326 skb_record_rx_queue(skb, 0);
1327 napi_gro_receive(napi, skb);
1330 ring->data[idx] = new_data;
1331 rxd->rxd1 = (unsigned int)dma_addr;
1334 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1335 rxd->rxd2 = RX_DMA_LSO;
1337 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1339 ring->calc_idx = idx;
1346 /* make sure that all changes to the dma ring are flushed before
1350 mtk_update_rx_cpu_idx(eth);
1353 eth->rx_packets += done;
1354 eth->rx_bytes += bytes;
1355 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
1357 net_dim(ð->rx_dim, dim_sample);
1362 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1363 unsigned int *done, unsigned int *bytes)
1365 struct mtk_tx_ring *ring = ð->tx_ring;
1366 struct mtk_tx_dma *desc;
1367 struct sk_buff *skb;
1368 struct mtk_tx_buf *tx_buf;
1371 cpu = ring->last_free_ptr;
1372 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1374 desc = mtk_qdma_phys_to_virt(ring, cpu);
1376 while ((cpu != dma) && budget) {
1377 u32 next_cpu = desc->txd2;
1380 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1381 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1384 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1385 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1392 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1393 bytes[mac] += skb->len;
1397 mtk_tx_unmap(eth, tx_buf, true);
1399 ring->last_free = desc;
1400 atomic_inc(&ring->free_count);
1405 ring->last_free_ptr = cpu;
1406 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1411 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
1412 unsigned int *done, unsigned int *bytes)
1414 struct mtk_tx_ring *ring = ð->tx_ring;
1415 struct mtk_tx_dma *desc;
1416 struct sk_buff *skb;
1417 struct mtk_tx_buf *tx_buf;
1420 cpu = ring->cpu_idx;
1421 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1423 while ((cpu != dma) && budget) {
1424 tx_buf = &ring->buf[cpu];
1429 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1430 bytes[0] += skb->len;
1435 mtk_tx_unmap(eth, tx_buf, true);
1437 desc = &ring->dma[cpu];
1438 ring->last_free = desc;
1439 atomic_inc(&ring->free_count);
1441 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1444 ring->cpu_idx = cpu;
1449 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1451 struct mtk_tx_ring *ring = ð->tx_ring;
1452 struct dim_sample dim_sample = {};
1453 unsigned int done[MTK_MAX_DEVS];
1454 unsigned int bytes[MTK_MAX_DEVS];
1457 memset(done, 0, sizeof(done));
1458 memset(bytes, 0, sizeof(bytes));
1460 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1461 budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
1463 budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
1465 for (i = 0; i < MTK_MAC_COUNT; i++) {
1466 if (!eth->netdev[i] || !done[i])
1468 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1470 eth->tx_packets += done[i];
1471 eth->tx_bytes += bytes[i];
1474 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
1476 net_dim(ð->tx_dim, dim_sample);
1478 if (mtk_queue_stopped(eth) &&
1479 (atomic_read(&ring->free_count) > ring->thresh))
1480 mtk_wake_queue(eth);
1485 static void mtk_handle_status_irq(struct mtk_eth *eth)
1487 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1489 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1490 mtk_stats_update(eth);
1491 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1496 static int mtk_napi_tx(struct napi_struct *napi, int budget)
1498 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1502 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1503 mtk_handle_status_irq(eth);
1504 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1505 tx_done = mtk_poll_tx(eth, budget);
1507 if (unlikely(netif_msg_intr(eth))) {
1508 status = mtk_r32(eth, eth->tx_int_status_reg);
1509 mask = mtk_r32(eth, eth->tx_int_mask_reg);
1511 "done tx %d, intr 0x%08x/0x%x\n",
1512 tx_done, status, mask);
1515 if (tx_done == budget)
1518 status = mtk_r32(eth, eth->tx_int_status_reg);
1519 if (status & MTK_TX_DONE_INT)
1522 napi_complete(napi);
1523 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1528 static int mtk_napi_rx(struct napi_struct *napi, int budget)
1530 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1533 int remain_budget = budget;
1535 mtk_handle_status_irq(eth);
1538 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1539 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1541 if (unlikely(netif_msg_intr(eth))) {
1542 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1543 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1545 "done rx %d, intr 0x%08x/0x%x\n",
1546 rx_done, status, mask);
1548 if (rx_done == remain_budget)
1551 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1552 if (status & MTK_RX_DONE_INT) {
1553 remain_budget -= rx_done;
1556 napi_complete(napi);
1557 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1559 return rx_done + budget - remain_budget;
1562 static int mtk_tx_alloc(struct mtk_eth *eth)
1564 struct mtk_tx_ring *ring = ð->tx_ring;
1565 int i, sz = sizeof(*ring->dma);
1567 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1572 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1573 &ring->phys, GFP_ATOMIC);
1577 for (i = 0; i < MTK_DMA_SIZE; i++) {
1578 int next = (i + 1) % MTK_DMA_SIZE;
1579 u32 next_ptr = ring->phys + next * sz;
1581 ring->dma[i].txd2 = next_ptr;
1582 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1585 /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1586 * only as the framework. The real HW descriptors are the PDMA
1587 * descriptors in ring->dma_pdma.
1589 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1590 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1593 if (!ring->dma_pdma)
1596 for (i = 0; i < MTK_DMA_SIZE; i++) {
1597 ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
1598 ring->dma_pdma[i].txd4 = 0;
1602 ring->dma_size = MTK_DMA_SIZE;
1603 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1604 ring->next_free = &ring->dma[0];
1605 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1606 ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
1607 ring->thresh = MAX_SKB_FRAGS;
1609 /* make sure that all changes to the dma ring are flushed before we
1614 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1615 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1616 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1618 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1620 mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
1621 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1624 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1625 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1626 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1627 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1636 static void mtk_tx_clean(struct mtk_eth *eth)
1638 struct mtk_tx_ring *ring = ð->tx_ring;
1642 for (i = 0; i < MTK_DMA_SIZE; i++)
1643 mtk_tx_unmap(eth, &ring->buf[i], false);
1649 dma_free_coherent(eth->dev,
1650 MTK_DMA_SIZE * sizeof(*ring->dma),
1656 if (ring->dma_pdma) {
1657 dma_free_coherent(eth->dev,
1658 MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
1661 ring->dma_pdma = NULL;
1665 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1667 struct mtk_rx_ring *ring;
1668 int rx_data_len, rx_dma_size;
1672 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1675 ring = ð->rx_ring_qdma;
1678 ring = ð->rx_ring[ring_no];
1681 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1682 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1683 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1685 rx_data_len = ETH_DATA_LEN;
1686 rx_dma_size = MTK_DMA_SIZE;
1689 ring->frag_size = mtk_max_frag_size(rx_data_len);
1690 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1691 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1696 for (i = 0; i < rx_dma_size; i++) {
1697 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1702 ring->dma = dma_alloc_coherent(eth->dev,
1703 rx_dma_size * sizeof(*ring->dma),
1704 &ring->phys, GFP_ATOMIC);
1708 for (i = 0; i < rx_dma_size; i++) {
1709 dma_addr_t dma_addr = dma_map_single(eth->dev,
1710 ring->data[i] + NET_SKB_PAD + eth->ip_align,
1713 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1715 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1717 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1718 ring->dma[i].rxd2 = RX_DMA_LSO;
1720 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1722 ring->dma_size = rx_dma_size;
1723 ring->calc_idx_update = false;
1724 ring->calc_idx = rx_dma_size - 1;
1725 ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1726 /* make sure that all changes to the dma ring are flushed before we
1731 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
1732 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
1733 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
1734 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
1739 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
1743 if (ring->data && ring->dma) {
1744 for (i = 0; i < ring->dma_size; i++) {
1747 if (!ring->dma[i].rxd1)
1749 dma_unmap_single(eth->dev,
1753 skb_free_frag(ring->data[i]);
1760 dma_free_coherent(eth->dev,
1761 ring->dma_size * sizeof(*ring->dma),
1768 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1771 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1772 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1774 /* set LRO rings to auto-learn modes */
1775 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1777 /* validate LRO ring */
1778 ring_ctrl_dw2 |= MTK_RING_VLD;
1780 /* set AGE timer (unit: 20us) */
1781 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1782 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1784 /* set max AGG timer (unit: 20us) */
1785 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1787 /* set max LRO AGG count */
1788 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1789 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1791 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1792 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1793 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1794 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1797 /* IPv4 checksum update enable */
1798 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1800 /* switch priority comparison to packet count mode */
1801 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1803 /* bandwidth threshold setting */
1804 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1806 /* auto-learn score delta setting */
1807 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1809 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1810 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1811 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1813 /* set HW LRO mode & the max aggregation count for rx packets */
1814 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1816 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1817 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1820 lro_ctrl_dw0 |= MTK_LRO_EN;
1822 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1823 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1828 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1833 /* relinquish lro rings, flush aggregated packets */
1834 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1836 /* wait for relinquishments done */
1837 for (i = 0; i < 10; i++) {
1838 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1839 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1846 /* invalidate lro rings */
1847 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1848 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1850 /* disable HW LRO */
1851 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1854 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1858 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1860 /* invalidate the IP setting */
1861 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1863 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1865 /* validate the IP setting */
1866 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1869 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1873 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1875 /* invalidate the IP setting */
1876 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1878 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1881 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1886 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1887 if (mac->hwlro_ip[i])
1894 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1895 struct ethtool_rxnfc *cmd)
1897 struct ethtool_rx_flow_spec *fsp =
1898 (struct ethtool_rx_flow_spec *)&cmd->fs;
1899 struct mtk_mac *mac = netdev_priv(dev);
1900 struct mtk_eth *eth = mac->hw;
1903 if ((fsp->flow_type != TCP_V4_FLOW) ||
1904 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1905 (fsp->location > 1))
1908 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1909 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1911 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1913 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1918 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1919 struct ethtool_rxnfc *cmd)
1921 struct ethtool_rx_flow_spec *fsp =
1922 (struct ethtool_rx_flow_spec *)&cmd->fs;
1923 struct mtk_mac *mac = netdev_priv(dev);
1924 struct mtk_eth *eth = mac->hw;
1927 if (fsp->location > 1)
1930 mac->hwlro_ip[fsp->location] = 0;
1931 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1933 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1935 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1940 static void mtk_hwlro_netdev_disable(struct net_device *dev)
1942 struct mtk_mac *mac = netdev_priv(dev);
1943 struct mtk_eth *eth = mac->hw;
1946 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1947 mac->hwlro_ip[i] = 0;
1948 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1950 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1953 mac->hwlro_ip_cnt = 0;
1956 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1957 struct ethtool_rxnfc *cmd)
1959 struct mtk_mac *mac = netdev_priv(dev);
1960 struct ethtool_rx_flow_spec *fsp =
1961 (struct ethtool_rx_flow_spec *)&cmd->fs;
1963 /* only tcp dst ipv4 is meaningful, others are meaningless */
1964 fsp->flow_type = TCP_V4_FLOW;
1965 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1966 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1968 fsp->h_u.tcp_ip4_spec.ip4src = 0;
1969 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1970 fsp->h_u.tcp_ip4_spec.psrc = 0;
1971 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1972 fsp->h_u.tcp_ip4_spec.pdst = 0;
1973 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
1974 fsp->h_u.tcp_ip4_spec.tos = 0;
1975 fsp->m_u.tcp_ip4_spec.tos = 0xff;
1980 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
1981 struct ethtool_rxnfc *cmd,
1984 struct mtk_mac *mac = netdev_priv(dev);
1988 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1989 if (mac->hwlro_ip[i]) {
1995 cmd->rule_cnt = cnt;
2000 static netdev_features_t mtk_fix_features(struct net_device *dev,
2001 netdev_features_t features)
2003 if (!(features & NETIF_F_LRO)) {
2004 struct mtk_mac *mac = netdev_priv(dev);
2005 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
2008 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
2010 features |= NETIF_F_LRO;
2017 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2021 if (!((dev->features ^ features) & NETIF_F_LRO))
2024 if (!(features & NETIF_F_LRO))
2025 mtk_hwlro_netdev_disable(dev);
2030 /* wait for DMA to finish whatever it is doing before we start using it again */
2031 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2033 unsigned long t_start = jiffies;
2036 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2037 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2038 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2041 if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2042 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2046 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2050 dev_err(eth->dev, "DMA init timeout\n");
2054 static int mtk_dma_init(struct mtk_eth *eth)
2059 if (mtk_dma_busy_wait(eth))
2062 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2063 /* QDMA needs scratch memory for internal reordering of the
2066 err = mtk_init_fq_dma(eth);
2071 err = mtk_tx_alloc(eth);
2075 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2076 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2081 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2086 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2087 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2091 err = mtk_hwlro_rx_init(eth);
2096 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2097 /* Enable random early drop and set drop threshold
2100 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2101 FC_THRES_MIN, MTK_QDMA_FC_THRES);
2102 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2108 static void mtk_dma_free(struct mtk_eth *eth)
2112 for (i = 0; i < MTK_MAC_COUNT; i++)
2114 netdev_reset_queue(eth->netdev[i]);
2115 if (eth->scratch_ring) {
2116 dma_free_coherent(eth->dev,
2117 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
2119 eth->phy_scratch_ring);
2120 eth->scratch_ring = NULL;
2121 eth->phy_scratch_ring = 0;
2124 mtk_rx_clean(eth, ð->rx_ring[0]);
2125 mtk_rx_clean(eth, ð->rx_ring_qdma);
2128 mtk_hwlro_rx_uninit(eth);
2129 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2130 mtk_rx_clean(eth, ð->rx_ring[i]);
2133 kfree(eth->scratch_head);
2136 static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
2138 struct mtk_mac *mac = netdev_priv(dev);
2139 struct mtk_eth *eth = mac->hw;
2141 eth->netdev[mac->id]->stats.tx_errors++;
2142 netif_err(eth, tx_err, dev,
2143 "transmit timed out\n");
2144 schedule_work(ð->pending_work);
2147 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
2149 struct mtk_eth *eth = _eth;
2152 if (likely(napi_schedule_prep(ð->rx_napi))) {
2153 __napi_schedule(ð->rx_napi);
2154 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2160 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2162 struct mtk_eth *eth = _eth;
2165 if (likely(napi_schedule_prep(ð->tx_napi))) {
2166 __napi_schedule(ð->tx_napi);
2167 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2173 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2175 struct mtk_eth *eth = _eth;
2177 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
2178 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
2179 mtk_handle_irq_rx(irq, _eth);
2181 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2182 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2183 mtk_handle_irq_tx(irq, _eth);
2189 #ifdef CONFIG_NET_POLL_CONTROLLER
2190 static void mtk_poll_controller(struct net_device *dev)
2192 struct mtk_mac *mac = netdev_priv(dev);
2193 struct mtk_eth *eth = mac->hw;
2195 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2196 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2197 mtk_handle_irq_rx(eth->irq[2], dev);
2198 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2199 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2203 static int mtk_start_dma(struct mtk_eth *eth)
2205 u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
2208 err = mtk_dma_init(eth);
2214 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2216 MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
2217 MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
2218 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2223 MTK_RX_DMA_EN | rx_2b_offset |
2224 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2227 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2228 MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2235 static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
2239 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2242 for (i = 0; i < MTK_MAC_COUNT; i++) {
2243 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2245 /* default setup the forward port to send frame to PDMA */
2248 /* Enable RX checksum */
2249 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2253 if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
2254 val |= MTK_GDMA_SPECIAL_TAG;
2256 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2258 /* Reset and enable PSE */
2259 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2260 mtk_w32(eth, 0, MTK_RST_GL);
2263 static int mtk_open(struct net_device *dev)
2265 struct mtk_mac *mac = netdev_priv(dev);
2266 struct mtk_eth *eth = mac->hw;
2269 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2271 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2276 /* we run 2 netdevs on the same dma ring so we only bring it up once */
2277 if (!refcount_read(ð->dma_refcnt)) {
2278 u32 gdm_config = MTK_GDMA_TO_PDMA;
2281 err = mtk_start_dma(eth);
2285 if (eth->soc->offload_version && mtk_ppe_start(ð->ppe) == 0)
2286 gdm_config = MTK_GDMA_TO_PPE;
2288 mtk_gdm_config(eth, gdm_config);
2290 napi_enable(ð->tx_napi);
2291 napi_enable(ð->rx_napi);
2292 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2293 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2294 refcount_set(ð->dma_refcnt, 1);
2297 refcount_inc(ð->dma_refcnt);
2299 phylink_start(mac->phylink);
2300 netif_start_queue(dev);
2304 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2309 /* stop the dma engine */
2310 spin_lock_bh(ð->page_lock);
2311 val = mtk_r32(eth, glo_cfg);
2312 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2314 spin_unlock_bh(ð->page_lock);
2316 /* wait for dma stop */
2317 for (i = 0; i < 10; i++) {
2318 val = mtk_r32(eth, glo_cfg);
2319 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
2327 static int mtk_stop(struct net_device *dev)
2329 struct mtk_mac *mac = netdev_priv(dev);
2330 struct mtk_eth *eth = mac->hw;
2332 phylink_stop(mac->phylink);
2334 netif_tx_disable(dev);
2336 phylink_disconnect_phy(mac->phylink);
2338 /* only shutdown DMA if this is the last user */
2339 if (!refcount_dec_and_test(ð->dma_refcnt))
2342 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2344 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2345 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2346 napi_disable(ð->tx_napi);
2347 napi_disable(ð->rx_napi);
2349 cancel_work_sync(ð->rx_dim.work);
2350 cancel_work_sync(ð->tx_dim.work);
2352 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2353 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2354 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2358 if (eth->soc->offload_version)
2359 mtk_ppe_stop(ð->ppe);
2364 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
2366 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2370 usleep_range(1000, 1100);
2371 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2377 static void mtk_clk_disable(struct mtk_eth *eth)
2381 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2382 clk_disable_unprepare(eth->clks[clk]);
2385 static int mtk_clk_enable(struct mtk_eth *eth)
2389 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2390 ret = clk_prepare_enable(eth->clks[clk]);
2392 goto err_disable_clks;
2399 clk_disable_unprepare(eth->clks[clk]);
2404 static void mtk_dim_rx(struct work_struct *work)
2406 struct dim *dim = container_of(work, struct dim, work);
2407 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
2408 struct dim_cq_moder cur_profile;
2411 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
2413 spin_lock_bh(ð->dim_lock);
2415 val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
2416 val &= MTK_PDMA_DELAY_TX_MASK;
2417 val |= MTK_PDMA_DELAY_RX_EN;
2419 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
2420 val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
2422 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
2423 val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
2425 mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
2426 mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
2428 spin_unlock_bh(ð->dim_lock);
2430 dim->state = DIM_START_MEASURE;
2433 static void mtk_dim_tx(struct work_struct *work)
2435 struct dim *dim = container_of(work, struct dim, work);
2436 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
2437 struct dim_cq_moder cur_profile;
2440 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
2442 spin_lock_bh(ð->dim_lock);
2444 val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
2445 val &= MTK_PDMA_DELAY_RX_MASK;
2446 val |= MTK_PDMA_DELAY_TX_EN;
2448 cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
2449 val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
2451 cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
2452 val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
2454 mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
2455 mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
2457 spin_unlock_bh(ð->dim_lock);
2459 dim->state = DIM_START_MEASURE;
2462 static int mtk_hw_init(struct mtk_eth *eth)
2466 if (test_and_set_bit(MTK_HW_INIT, ð->state))
2469 pm_runtime_enable(eth->dev);
2470 pm_runtime_get_sync(eth->dev);
2472 ret = mtk_clk_enable(eth);
2474 goto err_disable_pm;
2476 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2477 ret = device_reset(eth->dev);
2479 dev_err(eth->dev, "MAC reset failed!\n");
2480 goto err_disable_pm;
2483 /* disable delay and normal interrupt */
2484 mtk_tx_irq_disable(eth, ~0);
2485 mtk_rx_irq_disable(eth, ~0);
2490 /* Non-MT7628 handling... */
2491 ethsys_reset(eth, RSTCTRL_FE);
2492 ethsys_reset(eth, RSTCTRL_PPE);
2495 /* Set GE2 driving and slew rate */
2496 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2499 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2502 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2505 /* Set linkdown as the default for each GMAC. Its own MCR would be set
2506 * up with the more appropriate value when mtk_mac_config call is being
2509 for (i = 0; i < MTK_MAC_COUNT; i++)
2510 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
2512 /* Indicates CDM to parse the MTK special tag from CPU
2513 * which also is working out for untag packets.
2515 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2516 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2518 /* Enable RX VLan Offloading */
2519 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2521 /* set interrupt delays based on current Net DIM sample */
2522 mtk_dim_rx(ð->rx_dim.work);
2523 mtk_dim_tx(ð->tx_dim.work);
2525 /* disable delay and normal interrupt */
2526 mtk_tx_irq_disable(eth, ~0);
2527 mtk_rx_irq_disable(eth, ~0);
2529 /* FE int grouping */
2530 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
2531 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
2532 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
2533 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
2534 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
2539 pm_runtime_put_sync(eth->dev);
2540 pm_runtime_disable(eth->dev);
2545 static int mtk_hw_deinit(struct mtk_eth *eth)
2547 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
2550 mtk_clk_disable(eth);
2552 pm_runtime_put_sync(eth->dev);
2553 pm_runtime_disable(eth->dev);
2558 static int __init mtk_init(struct net_device *dev)
2560 struct mtk_mac *mac = netdev_priv(dev);
2561 struct mtk_eth *eth = mac->hw;
2564 ret = of_get_mac_address(mac->of_node, dev->dev_addr);
2566 /* If the mac address is invalid, use random mac address */
2567 eth_hw_addr_random(dev);
2568 dev_err(eth->dev, "generated random MAC address %pM\n",
2575 static void mtk_uninit(struct net_device *dev)
2577 struct mtk_mac *mac = netdev_priv(dev);
2578 struct mtk_eth *eth = mac->hw;
2580 phylink_disconnect_phy(mac->phylink);
2581 mtk_tx_irq_disable(eth, ~0);
2582 mtk_rx_irq_disable(eth, ~0);
2585 static int mtk_change_mtu(struct net_device *dev, int new_mtu)
2587 int length = new_mtu + MTK_RX_ETH_HLEN;
2588 struct mtk_mac *mac = netdev_priv(dev);
2589 struct mtk_eth *eth = mac->hw;
2590 u32 mcr_cur, mcr_new;
2592 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2593 mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
2594 mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
2597 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
2598 else if (length <= 1536)
2599 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
2600 else if (length <= 1552)
2601 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
2603 mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
2605 if (mcr_new != mcr_cur)
2606 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
2614 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2616 struct mtk_mac *mac = netdev_priv(dev);
2622 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
2630 static void mtk_pending_work(struct work_struct *work)
2632 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2634 unsigned long restart = 0;
2638 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2640 while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
2643 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2644 /* stop all devices to make sure that dma is properly shut down */
2645 for (i = 0; i < MTK_MAC_COUNT; i++) {
2646 if (!eth->netdev[i])
2648 mtk_stop(eth->netdev[i]);
2649 __set_bit(i, &restart);
2651 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
2653 /* restart underlying hardware such as power, clock, pin mux
2654 * and the connected phy
2659 pinctrl_select_state(eth->dev->pins->p,
2660 eth->dev->pins->default_state);
2663 /* restart DMA and enable IRQs */
2664 for (i = 0; i < MTK_MAC_COUNT; i++) {
2665 if (!test_bit(i, &restart))
2667 err = mtk_open(eth->netdev[i]);
2669 netif_alert(eth, ifup, eth->netdev[i],
2670 "Driver up/down cycle failed, closing device.\n");
2671 dev_close(eth->netdev[i]);
2675 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2677 clear_bit_unlock(MTK_RESETTING, ð->state);
2682 static int mtk_free_dev(struct mtk_eth *eth)
2686 for (i = 0; i < MTK_MAC_COUNT; i++) {
2687 if (!eth->netdev[i])
2689 free_netdev(eth->netdev[i]);
2695 static int mtk_unreg_dev(struct mtk_eth *eth)
2699 for (i = 0; i < MTK_MAC_COUNT; i++) {
2700 if (!eth->netdev[i])
2702 unregister_netdev(eth->netdev[i]);
2708 static int mtk_cleanup(struct mtk_eth *eth)
2712 cancel_work_sync(ð->pending_work);
2717 static int mtk_get_link_ksettings(struct net_device *ndev,
2718 struct ethtool_link_ksettings *cmd)
2720 struct mtk_mac *mac = netdev_priv(ndev);
2722 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2725 return phylink_ethtool_ksettings_get(mac->phylink, cmd);
2728 static int mtk_set_link_ksettings(struct net_device *ndev,
2729 const struct ethtool_link_ksettings *cmd)
2731 struct mtk_mac *mac = netdev_priv(ndev);
2733 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2736 return phylink_ethtool_ksettings_set(mac->phylink, cmd);
2739 static void mtk_get_drvinfo(struct net_device *dev,
2740 struct ethtool_drvinfo *info)
2742 struct mtk_mac *mac = netdev_priv(dev);
2744 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2745 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2746 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2749 static u32 mtk_get_msglevel(struct net_device *dev)
2751 struct mtk_mac *mac = netdev_priv(dev);
2753 return mac->hw->msg_enable;
2756 static void mtk_set_msglevel(struct net_device *dev, u32 value)
2758 struct mtk_mac *mac = netdev_priv(dev);
2760 mac->hw->msg_enable = value;
2763 static int mtk_nway_reset(struct net_device *dev)
2765 struct mtk_mac *mac = netdev_priv(dev);
2767 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2773 return phylink_ethtool_nway_reset(mac->phylink);
2776 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2780 switch (stringset) {
2782 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2783 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2784 data += ETH_GSTRING_LEN;
2790 static int mtk_get_sset_count(struct net_device *dev, int sset)
2794 return ARRAY_SIZE(mtk_ethtool_stats);
2800 static void mtk_get_ethtool_stats(struct net_device *dev,
2801 struct ethtool_stats *stats, u64 *data)
2803 struct mtk_mac *mac = netdev_priv(dev);
2804 struct mtk_hw_stats *hwstats = mac->hw_stats;
2805 u64 *data_src, *data_dst;
2809 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2812 if (netif_running(dev) && netif_device_present(dev)) {
2813 if (spin_trylock_bh(&hwstats->stats_lock)) {
2814 mtk_stats_update_mac(mac);
2815 spin_unlock_bh(&hwstats->stats_lock);
2819 data_src = (u64 *)hwstats;
2823 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2825 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2826 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2827 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2830 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2833 int ret = -EOPNOTSUPP;
2836 case ETHTOOL_GRXRINGS:
2837 if (dev->hw_features & NETIF_F_LRO) {
2838 cmd->data = MTK_MAX_RX_RING_NUM;
2842 case ETHTOOL_GRXCLSRLCNT:
2843 if (dev->hw_features & NETIF_F_LRO) {
2844 struct mtk_mac *mac = netdev_priv(dev);
2846 cmd->rule_cnt = mac->hwlro_ip_cnt;
2850 case ETHTOOL_GRXCLSRULE:
2851 if (dev->hw_features & NETIF_F_LRO)
2852 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2854 case ETHTOOL_GRXCLSRLALL:
2855 if (dev->hw_features & NETIF_F_LRO)
2856 ret = mtk_hwlro_get_fdir_all(dev, cmd,
2866 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2868 int ret = -EOPNOTSUPP;
2871 case ETHTOOL_SRXCLSRLINS:
2872 if (dev->hw_features & NETIF_F_LRO)
2873 ret = mtk_hwlro_add_ipaddr(dev, cmd);
2875 case ETHTOOL_SRXCLSRLDEL:
2876 if (dev->hw_features & NETIF_F_LRO)
2877 ret = mtk_hwlro_del_ipaddr(dev, cmd);
2886 static const struct ethtool_ops mtk_ethtool_ops = {
2887 .get_link_ksettings = mtk_get_link_ksettings,
2888 .set_link_ksettings = mtk_set_link_ksettings,
2889 .get_drvinfo = mtk_get_drvinfo,
2890 .get_msglevel = mtk_get_msglevel,
2891 .set_msglevel = mtk_set_msglevel,
2892 .nway_reset = mtk_nway_reset,
2893 .get_link = ethtool_op_get_link,
2894 .get_strings = mtk_get_strings,
2895 .get_sset_count = mtk_get_sset_count,
2896 .get_ethtool_stats = mtk_get_ethtool_stats,
2897 .get_rxnfc = mtk_get_rxnfc,
2898 .set_rxnfc = mtk_set_rxnfc,
2901 static const struct net_device_ops mtk_netdev_ops = {
2902 .ndo_init = mtk_init,
2903 .ndo_uninit = mtk_uninit,
2904 .ndo_open = mtk_open,
2905 .ndo_stop = mtk_stop,
2906 .ndo_start_xmit = mtk_start_xmit,
2907 .ndo_set_mac_address = mtk_set_mac_address,
2908 .ndo_validate_addr = eth_validate_addr,
2909 .ndo_do_ioctl = mtk_do_ioctl,
2910 .ndo_change_mtu = mtk_change_mtu,
2911 .ndo_tx_timeout = mtk_tx_timeout,
2912 .ndo_get_stats64 = mtk_get_stats64,
2913 .ndo_fix_features = mtk_fix_features,
2914 .ndo_set_features = mtk_set_features,
2915 #ifdef CONFIG_NET_POLL_CONTROLLER
2916 .ndo_poll_controller = mtk_poll_controller,
2918 .ndo_setup_tc = mtk_eth_setup_tc,
2921 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2923 const __be32 *_id = of_get_property(np, "reg", NULL);
2924 phy_interface_t phy_mode;
2925 struct phylink *phylink;
2926 struct mtk_mac *mac;
2930 dev_err(eth->dev, "missing mac id\n");
2934 id = be32_to_cpup(_id);
2935 if (id >= MTK_MAC_COUNT) {
2936 dev_err(eth->dev, "%d is not a valid mac id\n", id);
2940 if (eth->netdev[id]) {
2941 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2945 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2946 if (!eth->netdev[id]) {
2947 dev_err(eth->dev, "alloc_etherdev failed\n");
2950 mac = netdev_priv(eth->netdev[id]);
2956 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2957 mac->hwlro_ip_cnt = 0;
2959 mac->hw_stats = devm_kzalloc(eth->dev,
2960 sizeof(*mac->hw_stats),
2962 if (!mac->hw_stats) {
2963 dev_err(eth->dev, "failed to allocate counter memory\n");
2967 spin_lock_init(&mac->hw_stats->stats_lock);
2968 u64_stats_init(&mac->hw_stats->syncp);
2969 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2971 /* phylink create */
2972 err = of_get_phy_mode(np, &phy_mode);
2974 dev_err(eth->dev, "incorrect phy-mode\n");
2978 /* mac config is not set */
2979 mac->interface = PHY_INTERFACE_MODE_NA;
2980 mac->mode = MLO_AN_PHY;
2981 mac->speed = SPEED_UNKNOWN;
2983 mac->phylink_config.dev = ð->netdev[id]->dev;
2984 mac->phylink_config.type = PHYLINK_NETDEV;
2986 phylink = phylink_create(&mac->phylink_config,
2987 of_fwnode_handle(mac->of_node),
2988 phy_mode, &mtk_phylink_ops);
2989 if (IS_ERR(phylink)) {
2990 err = PTR_ERR(phylink);
2994 mac->phylink = phylink;
2996 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2997 eth->netdev[id]->watchdog_timeo = 5 * HZ;
2998 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2999 eth->netdev[id]->base_addr = (unsigned long)eth->base;
3001 eth->netdev[id]->hw_features = eth->soc->hw_features;
3003 eth->netdev[id]->hw_features |= NETIF_F_LRO;
3005 eth->netdev[id]->vlan_features = eth->soc->hw_features &
3006 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
3007 eth->netdev[id]->features |= eth->soc->hw_features;
3008 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
3010 eth->netdev[id]->irq = eth->irq[0];
3011 eth->netdev[id]->dev.of_node = np;
3013 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
3014 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
3016 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
3021 free_netdev(eth->netdev[id]);
3025 static int mtk_probe(struct platform_device *pdev)
3027 struct device_node *mac_np;
3028 struct mtk_eth *eth;
3031 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
3035 eth->soc = of_device_get_match_data(&pdev->dev);
3037 eth->dev = &pdev->dev;
3038 eth->base = devm_platform_ioremap_resource(pdev, 0);
3039 if (IS_ERR(eth->base))
3040 return PTR_ERR(eth->base);
3042 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
3043 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
3044 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
3046 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
3047 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
3050 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3051 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
3052 eth->ip_align = NET_IP_ALIGN;
3054 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
3057 spin_lock_init(ð->page_lock);
3058 spin_lock_init(ð->tx_irq_lock);
3059 spin_lock_init(ð->rx_irq_lock);
3060 spin_lock_init(ð->dim_lock);
3062 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3063 INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
3065 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3066 INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
3068 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3069 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3071 if (IS_ERR(eth->ethsys)) {
3072 dev_err(&pdev->dev, "no ethsys regmap found\n");
3073 return PTR_ERR(eth->ethsys);
3077 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
3078 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3079 "mediatek,infracfg");
3080 if (IS_ERR(eth->infra)) {
3081 dev_err(&pdev->dev, "no infracfg regmap found\n");
3082 return PTR_ERR(eth->infra);
3086 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
3087 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
3092 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
3093 eth->soc->ana_rgc3);
3099 if (eth->soc->required_pctl) {
3100 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
3102 if (IS_ERR(eth->pctl)) {
3103 dev_err(&pdev->dev, "no pctl regmap found\n");
3104 return PTR_ERR(eth->pctl);
3108 for (i = 0; i < 3; i++) {
3109 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
3110 eth->irq[i] = eth->irq[0];
3112 eth->irq[i] = platform_get_irq(pdev, i);
3113 if (eth->irq[i] < 0) {
3114 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
3118 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
3119 eth->clks[i] = devm_clk_get(eth->dev,
3120 mtk_clks_source_name[i]);
3121 if (IS_ERR(eth->clks[i])) {
3122 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
3123 return -EPROBE_DEFER;
3124 if (eth->soc->required_clks & BIT(i)) {
3125 dev_err(&pdev->dev, "clock %s not found\n",
3126 mtk_clks_source_name[i]);
3129 eth->clks[i] = NULL;
3133 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
3134 INIT_WORK(ð->pending_work, mtk_pending_work);
3136 err = mtk_hw_init(eth);
3140 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3142 for_each_child_of_node(pdev->dev.of_node, mac_np) {
3143 if (!of_device_is_compatible(mac_np,
3144 "mediatek,eth-mac"))
3147 if (!of_device_is_available(mac_np))
3150 err = mtk_add_mac(eth, mac_np);
3152 of_node_put(mac_np);
3157 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3158 err = devm_request_irq(eth->dev, eth->irq[0],
3160 dev_name(eth->dev), eth);
3162 err = devm_request_irq(eth->dev, eth->irq[1],
3163 mtk_handle_irq_tx, 0,
3164 dev_name(eth->dev), eth);
3168 err = devm_request_irq(eth->dev, eth->irq[2],
3169 mtk_handle_irq_rx, 0,
3170 dev_name(eth->dev), eth);
3175 /* No MT7628/88 support yet */
3176 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3177 err = mtk_mdio_init(eth);
3182 if (eth->soc->offload_version) {
3183 err = mtk_ppe_init(ð->ppe, eth->dev,
3184 eth->base + MTK_ETH_PPE_BASE, 2);
3188 err = mtk_eth_offload_init(eth);
3193 for (i = 0; i < MTK_MAX_DEVS; i++) {
3194 if (!eth->netdev[i])
3197 err = register_netdev(eth->netdev[i]);
3199 dev_err(eth->dev, "error bringing up device\n");
3200 goto err_deinit_mdio;
3202 netif_info(eth, probe, eth->netdev[i],
3203 "mediatek frame engine at 0x%08lx, irq %d\n",
3204 eth->netdev[i]->base_addr, eth->irq[0]);
3207 /* we run 2 devices on the same DMA ring so we need a dummy device
3210 init_dummy_netdev(ð->dummy_dev);
3211 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
3213 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
3216 platform_set_drvdata(pdev, eth);
3221 mtk_mdio_cleanup(eth);
3230 static int mtk_remove(struct platform_device *pdev)
3232 struct mtk_eth *eth = platform_get_drvdata(pdev);
3233 struct mtk_mac *mac;
3236 /* stop all devices to make sure that dma is properly shut down */
3237 for (i = 0; i < MTK_MAC_COUNT; i++) {
3238 if (!eth->netdev[i])
3240 mtk_stop(eth->netdev[i]);
3241 mac = netdev_priv(eth->netdev[i]);
3242 phylink_disconnect_phy(mac->phylink);
3247 netif_napi_del(ð->tx_napi);
3248 netif_napi_del(ð->rx_napi);
3250 mtk_mdio_cleanup(eth);
3255 static const struct mtk_soc_data mt2701_data = {
3256 .caps = MT7623_CAPS | MTK_HWLRO,
3257 .hw_features = MTK_HW_FEATURES,
3258 .required_clks = MT7623_CLKS_BITMAP,
3259 .required_pctl = true,
3262 static const struct mtk_soc_data mt7621_data = {
3263 .caps = MT7621_CAPS,
3264 .hw_features = MTK_HW_FEATURES,
3265 .required_clks = MT7621_CLKS_BITMAP,
3266 .required_pctl = false,
3267 .offload_version = 2,
3270 static const struct mtk_soc_data mt7622_data = {
3272 .caps = MT7622_CAPS | MTK_HWLRO,
3273 .hw_features = MTK_HW_FEATURES,
3274 .required_clks = MT7622_CLKS_BITMAP,
3275 .required_pctl = false,
3276 .offload_version = 2,
3279 static const struct mtk_soc_data mt7623_data = {
3280 .caps = MT7623_CAPS | MTK_HWLRO,
3281 .hw_features = MTK_HW_FEATURES,
3282 .required_clks = MT7623_CLKS_BITMAP,
3283 .required_pctl = true,
3284 .offload_version = 2,
3287 static const struct mtk_soc_data mt7629_data = {
3289 .caps = MT7629_CAPS | MTK_HWLRO,
3290 .hw_features = MTK_HW_FEATURES,
3291 .required_clks = MT7629_CLKS_BITMAP,
3292 .required_pctl = false,
3295 static const struct mtk_soc_data rt5350_data = {
3296 .caps = MT7628_CAPS,
3297 .hw_features = MTK_HW_FEATURES_MT7628,
3298 .required_clks = MT7628_CLKS_BITMAP,
3299 .required_pctl = false,
3302 const struct of_device_id of_mtk_match[] = {
3303 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
3304 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
3305 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
3306 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
3307 { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
3308 { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
3311 MODULE_DEVICE_TABLE(of, of_mtk_match);
3313 static struct platform_driver mtk_driver = {
3315 .remove = mtk_remove,
3317 .name = "mtk_soc_eth",
3318 .of_match_table = of_mtk_match,
3322 module_platform_driver(mtk_driver);
3324 MODULE_LICENSE("GPL");
3325 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
3326 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");