1 // SPDX-License-Identifier: GPL-2.0
2 /* Atheros AR71xx built-in ethernet mac driver
4 * Copyright (C) 2019 Oleksij Rempel <o.rempel@pengutronix.de>
6 * List of authors contributed to this driver before mainlining:
7 * Alexander Couzens <lynxis@fe80.eu>
8 * Christian Lamparter <chunkeey@gmail.com>
9 * Chuanhong Guo <gch981213@gmail.com>
10 * Daniel F. Dickinson <cshored@thecshore.com>
11 * David Bauer <mail@david-bauer.net>
12 * Felix Fietkau <nbd@nbd.name>
13 * Gabor Juhos <juhosg@freemail.hu>
14 * Hauke Mehrtens <hauke@hauke-m.de>
15 * Johann Neuhauser <johann@it-neuhauser.de>
16 * John Crispin <john@phrozen.org>
17 * Jo-Philipp Wich <jo@mein.io>
18 * Koen Vandeputte <koen.vandeputte@ncentric.com>
19 * Lucian Cristian <lucian.cristian@gmail.com>
20 * Matt Merhar <mattmerhar@protonmail.com>
21 * Milan Krstic <milan.krstic@gmail.com>
22 * Petr Štetiar <ynezz@true.cz>
23 * Rosen Penev <rosenp@gmail.com>
24 * Stephen Walker <stephendwalker+github@gmail.com>
25 * Vittorio Gambaletta <openwrt@vittgam.net>
26 * Weijie Gao <hackpascal@gmail.com>
27 * Imre Kaloz <kaloz@openwrt.org>
30 #include <linux/if_vlan.h>
31 #include <linux/mfd/syscon.h>
32 #include <linux/of_mdio.h>
33 #include <linux/of_net.h>
34 #include <linux/of_platform.h>
35 #include <linux/phylink.h>
36 #include <linux/regmap.h>
37 #include <linux/reset.h>
38 #include <linux/clk.h>
41 /* For our NAPI weight bigger does *NOT* mean better - it means more
42 * D-cache misses and lots more wasted cycles than we'll ever
43 * possibly gain from saving instructions.
45 #define AG71XX_NAPI_WEIGHT 32
46 #define AG71XX_OOM_REFILL (1 + HZ / 10)
48 #define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE)
49 #define AG71XX_INT_TX (AG71XX_INT_TX_PS)
50 #define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF)
52 #define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX)
53 #define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL)
55 #define AG71XX_TX_MTU_LEN 1540
57 #define AG71XX_TX_RING_SPLIT 512
58 #define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \
60 #define AG71XX_TX_RING_SIZE_DEFAULT 128
61 #define AG71XX_RX_RING_SIZE_DEFAULT 256
63 #define AG71XX_MDIO_RETRY 1000
64 #define AG71XX_MDIO_DELAY 5
65 #define AG71XX_MDIO_MAX_CLK 5000000
67 /* Register offsets */
68 #define AG71XX_REG_MAC_CFG1 0x0000
69 #define MAC_CFG1_TXE BIT(0) /* Tx Enable */
70 #define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */
71 #define MAC_CFG1_RXE BIT(2) /* Rx Enable */
72 #define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */
73 #define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */
74 #define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */
75 #define MAC_CFG1_SR BIT(31) /* Soft Reset */
76 #define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \
77 MAC_CFG1_SRX | MAC_CFG1_STX)
79 #define AG71XX_REG_MAC_CFG2 0x0004
80 #define MAC_CFG2_FDX BIT(0)
81 #define MAC_CFG2_PAD_CRC_EN BIT(2)
82 #define MAC_CFG2_LEN_CHECK BIT(4)
83 #define MAC_CFG2_IF_1000 BIT(9)
84 #define MAC_CFG2_IF_10_100 BIT(8)
86 #define AG71XX_REG_MAC_MFL 0x0010
88 #define AG71XX_REG_MII_CFG 0x0020
89 #define MII_CFG_CLK_DIV_4 0
90 #define MII_CFG_CLK_DIV_6 2
91 #define MII_CFG_CLK_DIV_8 3
92 #define MII_CFG_CLK_DIV_10 4
93 #define MII_CFG_CLK_DIV_14 5
94 #define MII_CFG_CLK_DIV_20 6
95 #define MII_CFG_CLK_DIV_28 7
96 #define MII_CFG_CLK_DIV_34 8
97 #define MII_CFG_CLK_DIV_42 9
98 #define MII_CFG_CLK_DIV_50 10
99 #define MII_CFG_CLK_DIV_58 11
100 #define MII_CFG_CLK_DIV_66 12
101 #define MII_CFG_CLK_DIV_74 13
102 #define MII_CFG_CLK_DIV_82 14
103 #define MII_CFG_CLK_DIV_98 15
104 #define MII_CFG_RESET BIT(31)
106 #define AG71XX_REG_MII_CMD 0x0024
107 #define MII_CMD_READ BIT(0)
109 #define AG71XX_REG_MII_ADDR 0x0028
110 #define MII_ADDR_SHIFT 8
112 #define AG71XX_REG_MII_CTRL 0x002c
113 #define AG71XX_REG_MII_STATUS 0x0030
114 #define AG71XX_REG_MII_IND 0x0034
115 #define MII_IND_BUSY BIT(0)
116 #define MII_IND_INVALID BIT(2)
118 #define AG71XX_REG_MAC_IFCTL 0x0038
119 #define MAC_IFCTL_SPEED BIT(16)
121 #define AG71XX_REG_MAC_ADDR1 0x0040
122 #define AG71XX_REG_MAC_ADDR2 0x0044
123 #define AG71XX_REG_FIFO_CFG0 0x0048
124 #define FIFO_CFG0_WTM BIT(0) /* Watermark Module */
125 #define FIFO_CFG0_RXS BIT(1) /* Rx System Module */
126 #define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */
127 #define FIFO_CFG0_TXS BIT(3) /* Tx System Module */
128 #define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */
129 #define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \
130 | FIFO_CFG0_TXS | FIFO_CFG0_TXF)
131 #define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT)
133 #define FIFO_CFG0_ENABLE_SHIFT 8
135 #define AG71XX_REG_FIFO_CFG1 0x004c
136 #define AG71XX_REG_FIFO_CFG2 0x0050
137 #define AG71XX_REG_FIFO_CFG3 0x0054
138 #define AG71XX_REG_FIFO_CFG4 0x0058
139 #define FIFO_CFG4_DE BIT(0) /* Drop Event */
140 #define FIFO_CFG4_DV BIT(1) /* RX_DV Event */
141 #define FIFO_CFG4_FC BIT(2) /* False Carrier */
142 #define FIFO_CFG4_CE BIT(3) /* Code Error */
143 #define FIFO_CFG4_CR BIT(4) /* CRC error */
144 #define FIFO_CFG4_LM BIT(5) /* Length Mismatch */
145 #define FIFO_CFG4_LO BIT(6) /* Length out of range */
146 #define FIFO_CFG4_OK BIT(7) /* Packet is OK */
147 #define FIFO_CFG4_MC BIT(8) /* Multicast Packet */
148 #define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */
149 #define FIFO_CFG4_DR BIT(10) /* Dribble */
150 #define FIFO_CFG4_LE BIT(11) /* Long Event */
151 #define FIFO_CFG4_CF BIT(12) /* Control Frame */
152 #define FIFO_CFG4_PF BIT(13) /* Pause Frame */
153 #define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */
154 #define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */
155 #define FIFO_CFG4_FT BIT(16) /* Frame Truncated */
156 #define FIFO_CFG4_UC BIT(17) /* Unicast Packet */
157 #define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \
158 FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \
159 FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \
160 FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \
161 FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \
164 #define AG71XX_REG_FIFO_CFG5 0x005c
165 #define FIFO_CFG5_DE BIT(0) /* Drop Event */
166 #define FIFO_CFG5_DV BIT(1) /* RX_DV Event */
167 #define FIFO_CFG5_FC BIT(2) /* False Carrier */
168 #define FIFO_CFG5_CE BIT(3) /* Code Error */
169 #define FIFO_CFG5_LM BIT(4) /* Length Mismatch */
170 #define FIFO_CFG5_LO BIT(5) /* Length Out of Range */
171 #define FIFO_CFG5_OK BIT(6) /* Packet is OK */
172 #define FIFO_CFG5_MC BIT(7) /* Multicast Packet */
173 #define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */
174 #define FIFO_CFG5_DR BIT(9) /* Dribble */
175 #define FIFO_CFG5_CF BIT(10) /* Control Frame */
176 #define FIFO_CFG5_PF BIT(11) /* Pause Frame */
177 #define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */
178 #define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */
179 #define FIFO_CFG5_LE BIT(14) /* Long Event */
180 #define FIFO_CFG5_FT BIT(15) /* Frame Truncated */
181 #define FIFO_CFG5_16 BIT(16) /* unknown */
182 #define FIFO_CFG5_17 BIT(17) /* unknown */
183 #define FIFO_CFG5_SF BIT(18) /* Short Frame */
184 #define FIFO_CFG5_BM BIT(19) /* Byte Mode */
185 #define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \
186 FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \
187 FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \
188 FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \
189 FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \
190 FIFO_CFG5_17 | FIFO_CFG5_SF)
192 #define AG71XX_REG_TX_CTRL 0x0180
193 #define TX_CTRL_TXE BIT(0) /* Tx Enable */
195 #define AG71XX_REG_TX_DESC 0x0184
196 #define AG71XX_REG_TX_STATUS 0x0188
197 #define TX_STATUS_PS BIT(0) /* Packet Sent */
198 #define TX_STATUS_UR BIT(1) /* Tx Underrun */
199 #define TX_STATUS_BE BIT(3) /* Bus Error */
201 #define AG71XX_REG_RX_CTRL 0x018c
202 #define RX_CTRL_RXE BIT(0) /* Rx Enable */
204 #define AG71XX_DMA_RETRY 10
205 #define AG71XX_DMA_DELAY 1
207 #define AG71XX_REG_RX_DESC 0x0190
208 #define AG71XX_REG_RX_STATUS 0x0194
209 #define RX_STATUS_PR BIT(0) /* Packet Received */
210 #define RX_STATUS_OF BIT(2) /* Rx Overflow */
211 #define RX_STATUS_BE BIT(3) /* Bus Error */
213 #define AG71XX_REG_INT_ENABLE 0x0198
214 #define AG71XX_REG_INT_STATUS 0x019c
215 #define AG71XX_INT_TX_PS BIT(0)
216 #define AG71XX_INT_TX_UR BIT(1)
217 #define AG71XX_INT_TX_BE BIT(3)
218 #define AG71XX_INT_RX_PR BIT(4)
219 #define AG71XX_INT_RX_OF BIT(6)
220 #define AG71XX_INT_RX_BE BIT(7)
222 #define AG71XX_REG_FIFO_DEPTH 0x01a8
223 #define AG71XX_REG_RX_SM 0x01b0
224 #define AG71XX_REG_TX_SM 0x01b4
226 #define ETH_SWITCH_HEADER_LEN 2
228 #define AG71XX_DEFAULT_MSG_ENABLE \
238 struct ag71xx_statistic {
239 unsigned short offset;
241 const char name[ETH_GSTRING_LEN];
244 static const struct ag71xx_statistic ag71xx_statistics[] = {
245 { 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", },
246 { 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", },
247 { 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", },
248 { 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", },
249 { 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", },
250 { 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", },
251 { 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", },
252 { 0x009C, GENMASK(23, 0), "Rx Byte", },
253 { 0x00A0, GENMASK(17, 0), "Rx Packet", },
254 { 0x00A4, GENMASK(11, 0), "Rx FCS Error", },
255 { 0x00A8, GENMASK(17, 0), "Rx Multicast Packet", },
256 { 0x00AC, GENMASK(21, 0), "Rx Broadcast Packet", },
257 { 0x00B0, GENMASK(17, 0), "Rx Control Frame Packet", },
258 { 0x00B4, GENMASK(11, 0), "Rx Pause Frame Packet", },
259 { 0x00B8, GENMASK(11, 0), "Rx Unknown OPCode Packet", },
260 { 0x00BC, GENMASK(11, 0), "Rx Alignment Error", },
261 { 0x00C0, GENMASK(15, 0), "Rx Frame Length Error", },
262 { 0x00C4, GENMASK(11, 0), "Rx Code Error", },
263 { 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", },
264 { 0x00CC, GENMASK(11, 0), "Rx Undersize Packet", },
265 { 0x00D0, GENMASK(11, 0), "Rx Oversize Packet", },
266 { 0x00D4, GENMASK(11, 0), "Rx Fragments", },
267 { 0x00D8, GENMASK(11, 0), "Rx Jabber", },
268 { 0x00DC, GENMASK(11, 0), "Rx Dropped Packet", },
269 { 0x00E0, GENMASK(23, 0), "Tx Byte", },
270 { 0x00E4, GENMASK(17, 0), "Tx Packet", },
271 { 0x00E8, GENMASK(17, 0), "Tx Multicast Packet", },
272 { 0x00EC, GENMASK(17, 0), "Tx Broadcast Packet", },
273 { 0x00F0, GENMASK(11, 0), "Tx Pause Control Frame", },
274 { 0x00F4, GENMASK(11, 0), "Tx Deferral Packet", },
275 { 0x00F8, GENMASK(11, 0), "Tx Excessive Deferral Packet", },
276 { 0x00FC, GENMASK(11, 0), "Tx Single Collision Packet", },
277 { 0x0100, GENMASK(11, 0), "Tx Multiple Collision", },
278 { 0x0104, GENMASK(11, 0), "Tx Late Collision Packet", },
279 { 0x0108, GENMASK(11, 0), "Tx Excessive Collision Packet", },
280 { 0x010C, GENMASK(12, 0), "Tx Total Collision", },
281 { 0x0110, GENMASK(11, 0), "Tx Pause Frames Honored", },
282 { 0x0114, GENMASK(11, 0), "Tx Drop Frame", },
283 { 0x0118, GENMASK(11, 0), "Tx Jabber Frame", },
284 { 0x011C, GENMASK(11, 0), "Tx FCS Error", },
285 { 0x0120, GENMASK(11, 0), "Tx Control Frame", },
286 { 0x0124, GENMASK(11, 0), "Tx Oversize Frame", },
287 { 0x0128, GENMASK(11, 0), "Tx Undersize Frame", },
288 { 0x012C, GENMASK(11, 0), "Tx Fragment", },
291 #define DESC_EMPTY BIT(31)
292 #define DESC_MORE BIT(24)
293 #define DESC_PKTLEN_M 0xfff
301 #define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \
318 /* "Hot" fields in the data path. */
322 /* "Cold" fields - not used in the data path. */
323 struct ag71xx_buf *buf;
326 dma_addr_t descs_dma;
343 u16 desc_pktlen_mask;
344 bool tx_hang_workaround;
345 enum ag71xx_type type;
349 /* Critical data related to the per-packet data path are clustered
350 * early in this structure to help improve the D-cache footprint.
352 struct ag71xx_ring rx_ring ____cacheline_aligned;
353 struct ag71xx_ring tx_ring ____cacheline_aligned;
358 struct net_device *ndev;
359 struct platform_device *pdev;
360 struct napi_struct napi;
362 const struct ag71xx_dcfg *dcfg;
364 /* From this point onwards we're not looking at per-packet fields. */
365 void __iomem *mac_base;
367 struct ag71xx_desc *stop_desc;
368 dma_addr_t stop_desc_dma;
370 phy_interface_t phy_if_mode;
371 struct phylink *phylink;
372 struct phylink_config phylink_config;
374 struct delayed_work restart_work;
375 struct timer_list oom_timer;
377 struct reset_control *mac_reset;
382 struct reset_control *mdio_reset;
383 struct mii_bus *mii_bus;
384 struct clk *clk_mdio;
388 static int ag71xx_desc_empty(struct ag71xx_desc *desc)
390 return (desc->ctrl & DESC_EMPTY) != 0;
393 static struct ag71xx_desc *ag71xx_ring_desc(struct ag71xx_ring *ring, int idx)
395 return (struct ag71xx_desc *)&ring->descs_cpu[idx * AG71XX_DESC_SIZE];
398 static int ag71xx_ring_size_order(int size)
400 return fls(size - 1);
403 static bool ag71xx_is(struct ag71xx *ag, enum ag71xx_type type)
405 return ag->dcfg->type == type;
408 static void ag71xx_wr(struct ag71xx *ag, unsigned int reg, u32 value)
410 iowrite32(value, ag->mac_base + reg);
412 (void)ioread32(ag->mac_base + reg);
415 static u32 ag71xx_rr(struct ag71xx *ag, unsigned int reg)
417 return ioread32(ag->mac_base + reg);
420 static void ag71xx_sb(struct ag71xx *ag, unsigned int reg, u32 mask)
424 r = ag->mac_base + reg;
425 iowrite32(ioread32(r) | mask, r);
430 static void ag71xx_cb(struct ag71xx *ag, unsigned int reg, u32 mask)
434 r = ag->mac_base + reg;
435 iowrite32(ioread32(r) & ~mask, r);
440 static void ag71xx_int_enable(struct ag71xx *ag, u32 ints)
442 ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints);
445 static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
447 ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
450 static void ag71xx_get_drvinfo(struct net_device *ndev,
451 struct ethtool_drvinfo *info)
453 struct ag71xx *ag = netdev_priv(ndev);
455 strlcpy(info->driver, "ag71xx", sizeof(info->driver));
456 strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
457 sizeof(info->bus_info));
460 static int ag71xx_get_link_ksettings(struct net_device *ndev,
461 struct ethtool_link_ksettings *kset)
463 struct ag71xx *ag = netdev_priv(ndev);
465 return phylink_ethtool_ksettings_get(ag->phylink, kset);
468 static int ag71xx_set_link_ksettings(struct net_device *ndev,
469 const struct ethtool_link_ksettings *kset)
471 struct ag71xx *ag = netdev_priv(ndev);
473 return phylink_ethtool_ksettings_set(ag->phylink, kset);
476 static int ag71xx_ethtool_nway_reset(struct net_device *ndev)
478 struct ag71xx *ag = netdev_priv(ndev);
480 return phylink_ethtool_nway_reset(ag->phylink);
483 static void ag71xx_ethtool_get_pauseparam(struct net_device *ndev,
484 struct ethtool_pauseparam *pause)
486 struct ag71xx *ag = netdev_priv(ndev);
488 phylink_ethtool_get_pauseparam(ag->phylink, pause);
491 static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev,
492 struct ethtool_pauseparam *pause)
494 struct ag71xx *ag = netdev_priv(ndev);
496 return phylink_ethtool_set_pauseparam(ag->phylink, pause);
499 static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
502 if (sset == ETH_SS_STATS) {
505 for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
506 memcpy(data + i * ETH_GSTRING_LEN,
507 ag71xx_statistics[i].name, ETH_GSTRING_LEN);
511 static void ag71xx_ethtool_get_stats(struct net_device *ndev,
512 struct ethtool_stats *stats, u64 *data)
514 struct ag71xx *ag = netdev_priv(ndev);
517 for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
518 *data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset)
519 & ag71xx_statistics[i].mask;
522 static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
524 if (sset == ETH_SS_STATS)
525 return ARRAY_SIZE(ag71xx_statistics);
529 static const struct ethtool_ops ag71xx_ethtool_ops = {
530 .get_drvinfo = ag71xx_get_drvinfo,
531 .get_link = ethtool_op_get_link,
532 .get_ts_info = ethtool_op_get_ts_info,
533 .get_link_ksettings = ag71xx_get_link_ksettings,
534 .set_link_ksettings = ag71xx_set_link_ksettings,
535 .nway_reset = ag71xx_ethtool_nway_reset,
536 .get_pauseparam = ag71xx_ethtool_get_pauseparam,
537 .set_pauseparam = ag71xx_ethtool_set_pauseparam,
538 .get_strings = ag71xx_ethtool_get_strings,
539 .get_ethtool_stats = ag71xx_ethtool_get_stats,
540 .get_sset_count = ag71xx_ethtool_get_sset_count,
543 static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
545 struct net_device *ndev = ag->ndev;
548 for (i = 0; i < AG71XX_MDIO_RETRY; i++) {
551 udelay(AG71XX_MDIO_DELAY);
553 busy = ag71xx_rr(ag, AG71XX_REG_MII_IND);
557 udelay(AG71XX_MDIO_DELAY);
560 netif_err(ag, link, ndev, "MDIO operation timed out\n");
565 static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg)
567 struct ag71xx *ag = bus->priv;
570 err = ag71xx_mdio_wait_busy(ag);
574 ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
575 ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
576 /* enable read mode */
577 ag71xx_wr(ag, AG71XX_REG_MII_CMD, MII_CMD_READ);
579 err = ag71xx_mdio_wait_busy(ag);
583 val = ag71xx_rr(ag, AG71XX_REG_MII_STATUS);
584 /* disable read mode */
585 ag71xx_wr(ag, AG71XX_REG_MII_CMD, 0);
587 netif_dbg(ag, link, ag->ndev, "mii_read: addr=%04x, reg=%04x, value=%04x\n",
593 static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg,
596 struct ag71xx *ag = bus->priv;
598 netif_dbg(ag, link, ag->ndev, "mii_write: addr=%04x, reg=%04x, value=%04x\n",
601 ag71xx_wr(ag, AG71XX_REG_MII_ADDR,
602 ((addr & 0x1f) << MII_ADDR_SHIFT) | (reg & 0xff));
603 ag71xx_wr(ag, AG71XX_REG_MII_CTRL, val);
605 return ag71xx_mdio_wait_busy(ag);
608 static const u32 ar71xx_mdio_div_table[] = {
609 4, 4, 6, 8, 10, 14, 20, 28,
612 static const u32 ar7240_mdio_div_table[] = {
613 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
616 static const u32 ar933x_mdio_div_table[] = {
617 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
620 static int ag71xx_mdio_get_divider(struct ag71xx *ag, u32 *div)
622 unsigned long ref_clock;
626 ref_clock = clk_get_rate(ag->clk_mdio);
630 if (ag71xx_is(ag, AR9330) || ag71xx_is(ag, AR9340)) {
631 table = ar933x_mdio_div_table;
632 ndivs = ARRAY_SIZE(ar933x_mdio_div_table);
633 } else if (ag71xx_is(ag, AR7240)) {
634 table = ar7240_mdio_div_table;
635 ndivs = ARRAY_SIZE(ar7240_mdio_div_table);
637 table = ar71xx_mdio_div_table;
638 ndivs = ARRAY_SIZE(ar71xx_mdio_div_table);
641 for (i = 0; i < ndivs; i++) {
644 t = ref_clock / table[i];
645 if (t <= AG71XX_MDIO_MAX_CLK) {
654 static int ag71xx_mdio_reset(struct mii_bus *bus)
656 struct ag71xx *ag = bus->priv;
660 err = ag71xx_mdio_get_divider(ag, &t);
664 ag71xx_wr(ag, AG71XX_REG_MII_CFG, t | MII_CFG_RESET);
665 usleep_range(100, 200);
667 ag71xx_wr(ag, AG71XX_REG_MII_CFG, t);
668 usleep_range(100, 200);
673 static int ag71xx_mdio_probe(struct ag71xx *ag)
675 struct device *dev = &ag->pdev->dev;
676 struct net_device *ndev = ag->ndev;
677 static struct mii_bus *mii_bus;
678 struct device_node *np, *mnp;
684 ag->clk_mdio = devm_clk_get(dev, "mdio");
685 if (IS_ERR(ag->clk_mdio)) {
686 netif_err(ag, probe, ndev, "Failed to get mdio clk.\n");
687 return PTR_ERR(ag->clk_mdio);
690 err = clk_prepare_enable(ag->clk_mdio);
692 netif_err(ag, probe, ndev, "Failed to enable mdio clk.\n");
696 mii_bus = devm_mdiobus_alloc(dev);
699 goto mdio_err_put_clk;
702 ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
703 if (IS_ERR(ag->mdio_reset)) {
704 netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
705 err = PTR_ERR(ag->mdio_reset);
706 goto mdio_err_put_clk;
709 mii_bus->name = "ag71xx_mdio";
710 mii_bus->read = ag71xx_mdio_mii_read;
711 mii_bus->write = ag71xx_mdio_mii_write;
712 mii_bus->reset = ag71xx_mdio_reset;
714 mii_bus->parent = dev;
715 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, ag->mac_idx);
717 if (!IS_ERR(ag->mdio_reset)) {
718 reset_control_assert(ag->mdio_reset);
720 reset_control_deassert(ag->mdio_reset);
724 mnp = of_get_child_by_name(np, "mdio");
725 err = of_mdiobus_register(mii_bus, mnp);
728 goto mdio_err_put_clk;
730 ag->mii_bus = mii_bus;
735 clk_disable_unprepare(ag->clk_mdio);
739 static void ag71xx_mdio_remove(struct ag71xx *ag)
742 mdiobus_unregister(ag->mii_bus);
743 clk_disable_unprepare(ag->clk_mdio);
746 static void ag71xx_hw_stop(struct ag71xx *ag)
748 /* disable all interrupts and stop the rx/tx engine */
749 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0);
750 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
751 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
754 static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
756 unsigned long timestamp;
757 u32 rx_sm, tx_sm, rx_fd;
759 timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start;
760 if (likely(time_before(jiffies, timestamp + HZ / 10)))
763 if (!netif_carrier_ok(ag->ndev))
766 rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM);
767 if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6)
770 tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM);
771 rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH);
772 if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) &&
773 ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0)
779 static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
781 struct ag71xx_ring *ring = &ag->tx_ring;
782 int sent = 0, bytes_compl = 0, n = 0;
783 struct net_device *ndev = ag->ndev;
784 int ring_mask, ring_size;
785 bool dma_stuck = false;
787 ring_mask = BIT(ring->order) - 1;
788 ring_size = BIT(ring->order);
790 netif_dbg(ag, tx_queued, ndev, "processing TX ring\n");
792 while (ring->dirty + n != ring->curr) {
793 struct ag71xx_desc *desc;
797 i = (ring->dirty + n) & ring_mask;
798 desc = ag71xx_ring_desc(ring, i);
799 skb = ring->buf[i].tx.skb;
801 if (!flush && !ag71xx_desc_empty(desc)) {
802 if (ag->dcfg->tx_hang_workaround &&
803 ag71xx_check_dma_stuck(ag)) {
804 schedule_delayed_work(&ag->restart_work,
812 desc->ctrl |= DESC_EMPTY;
818 dev_kfree_skb_any(skb);
819 ring->buf[i].tx.skb = NULL;
821 bytes_compl += ring->buf[i].tx.len;
827 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
832 netif_dbg(ag, tx_done, ndev, "%d packets sent out\n", sent);
837 ag->ndev->stats.tx_bytes += bytes_compl;
838 ag->ndev->stats.tx_packets += sent;
840 netdev_completed_queue(ag->ndev, sent, bytes_compl);
841 if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
842 netif_wake_queue(ag->ndev);
845 cancel_delayed_work(&ag->restart_work);
850 static void ag71xx_dma_wait_stop(struct ag71xx *ag)
852 struct net_device *ndev = ag->ndev;
855 for (i = 0; i < AG71XX_DMA_RETRY; i++) {
858 mdelay(AG71XX_DMA_DELAY);
860 rx = ag71xx_rr(ag, AG71XX_REG_RX_CTRL) & RX_CTRL_RXE;
861 tx = ag71xx_rr(ag, AG71XX_REG_TX_CTRL) & TX_CTRL_TXE;
866 netif_err(ag, hw, ndev, "DMA stop operation timed out\n");
869 static void ag71xx_dma_reset(struct ag71xx *ag)
871 struct net_device *ndev = ag->ndev;
876 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0);
877 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0);
879 /* give the hardware some time to really stop all rx/tx activity
880 * clearing the descriptors too early causes random memory corruption
882 ag71xx_dma_wait_stop(ag);
884 /* clear descriptor addresses */
885 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma);
886 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma);
888 /* clear pending RX/TX interrupts */
889 for (i = 0; i < 256; i++) {
890 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
891 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
894 /* clear pending errors */
895 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF);
896 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR);
898 val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
900 netif_err(ag, hw, ndev, "unable to clear DMA Rx status: %08x\n",
903 val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
905 /* mask out reserved bits */
909 netif_err(ag, hw, ndev, "unable to clear DMA Tx status: %08x\n",
913 static void ag71xx_hw_setup(struct ag71xx *ag)
915 u32 init = MAC_CFG1_INIT;
917 /* setup MAC configuration registers */
918 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
920 ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
921 MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
923 /* setup max frame length to zero */
924 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0);
926 /* setup FIFO configuration registers */
927 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
928 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]);
929 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]);
930 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT);
931 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT);
934 static unsigned int ag71xx_max_frame_len(unsigned int mtu)
936 return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
939 static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
943 t = (((u32)mac[5]) << 24) | (((u32)mac[4]) << 16)
944 | (((u32)mac[3]) << 8) | ((u32)mac[2]);
946 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t);
948 t = (((u32)mac[1]) << 24) | (((u32)mac[0]) << 16);
949 ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t);
952 static void ag71xx_fast_reset(struct ag71xx *ag)
954 struct net_device *dev = ag->ndev;
960 mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
961 rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
963 ag71xx_tx_packets(ag, true);
965 reset_control_assert(ag->mac_reset);
966 usleep_range(10, 20);
967 reset_control_deassert(ag->mac_reset);
968 usleep_range(10, 20);
970 ag71xx_dma_reset(ag);
972 ag->tx_ring.curr = 0;
973 ag->tx_ring.dirty = 0;
974 netdev_reset_queue(ag->ndev);
976 /* setup max frame length */
977 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
978 ag71xx_max_frame_len(ag->ndev->mtu));
980 ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
981 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
982 ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
984 ag71xx_hw_set_macaddr(ag, dev->dev_addr);
987 static void ag71xx_hw_start(struct ag71xx *ag)
989 /* start RX engine */
990 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
992 /* enable interrupts */
993 ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
995 netif_wake_queue(ag->ndev);
998 static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
999 const struct phylink_link_state *state)
1001 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1003 if (phylink_autoneg_inband(mode))
1006 if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1007 ag71xx_fast_reset(ag);
1009 if (ag->tx_ring.desc_split) {
1010 ag->fifodata[2] &= 0xffff;
1011 ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
1014 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
1017 static void ag71xx_mac_validate(struct phylink_config *config,
1018 unsigned long *supported,
1019 struct phylink_link_state *state)
1021 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1022 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1024 switch (state->interface) {
1025 case PHY_INTERFACE_MODE_NA:
1027 case PHY_INTERFACE_MODE_MII:
1028 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
1029 ag71xx_is(ag, AR9340) ||
1030 ag71xx_is(ag, QCA9530) ||
1031 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1034 case PHY_INTERFACE_MODE_GMII:
1035 if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
1036 (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
1037 (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
1040 case PHY_INTERFACE_MODE_SGMII:
1041 if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
1044 case PHY_INTERFACE_MODE_RMII:
1045 if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
1048 case PHY_INTERFACE_MODE_RGMII:
1049 if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
1050 (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
1057 phylink_set(mask, MII);
1059 phylink_set(mask, Pause);
1060 phylink_set(mask, Asym_Pause);
1061 phylink_set(mask, Autoneg);
1062 phylink_set(mask, 10baseT_Half);
1063 phylink_set(mask, 10baseT_Full);
1064 phylink_set(mask, 100baseT_Half);
1065 phylink_set(mask, 100baseT_Full);
1067 if (state->interface == PHY_INTERFACE_MODE_NA ||
1068 state->interface == PHY_INTERFACE_MODE_SGMII ||
1069 state->interface == PHY_INTERFACE_MODE_RGMII ||
1070 state->interface == PHY_INTERFACE_MODE_GMII) {
1071 phylink_set(mask, 1000baseT_Full);
1072 phylink_set(mask, 1000baseX_Full);
1075 bitmap_and(supported, supported, mask,
1076 __ETHTOOL_LINK_MODE_MASK_NBITS);
1077 bitmap_and(state->advertising, state->advertising, mask,
1078 __ETHTOOL_LINK_MODE_MASK_NBITS);
1082 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1085 static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
1086 struct phylink_link_state *state)
1091 static void ag71xx_mac_an_restart(struct phylink_config *config)
1096 static void ag71xx_mac_link_down(struct phylink_config *config,
1097 unsigned int mode, phy_interface_t interface)
1099 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1104 static void ag71xx_mac_link_up(struct phylink_config *config,
1105 struct phy_device *phy,
1106 unsigned int mode, phy_interface_t interface,
1107 int speed, int duplex,
1108 bool tx_pause, bool rx_pause)
1110 struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
1115 cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2);
1116 cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX);
1117 cfg2 |= duplex ? MAC_CFG2_FDX : 0;
1119 ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL);
1120 ifctl &= ~(MAC_IFCTL_SPEED);
1122 fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5);
1123 fifo5 &= ~FIFO_CFG5_BM;
1127 cfg2 |= MAC_CFG2_IF_1000;
1128 fifo5 |= FIFO_CFG5_BM;
1131 cfg2 |= MAC_CFG2_IF_10_100;
1132 ifctl |= MAC_IFCTL_SPEED;
1135 cfg2 |= MAC_CFG2_IF_10_100;
1141 ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
1142 ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
1143 ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
1145 cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1);
1146 cfg1 &= ~(MAC_CFG1_TFC | MAC_CFG1_RFC);
1148 cfg1 |= MAC_CFG1_TFC;
1151 cfg1 |= MAC_CFG1_RFC;
1152 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1);
1154 ag71xx_hw_start(ag);
1157 static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
1158 .validate = ag71xx_mac_validate,
1159 .mac_pcs_get_state = ag71xx_mac_pcs_get_state,
1160 .mac_an_restart = ag71xx_mac_an_restart,
1161 .mac_config = ag71xx_mac_config,
1162 .mac_link_down = ag71xx_mac_link_down,
1163 .mac_link_up = ag71xx_mac_link_up,
1166 static int ag71xx_phylink_setup(struct ag71xx *ag)
1168 struct phylink *phylink;
1170 ag->phylink_config.dev = &ag->ndev->dev;
1171 ag->phylink_config.type = PHYLINK_NETDEV;
1173 phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
1174 ag->phy_if_mode, &ag71xx_phylink_mac_ops);
1175 if (IS_ERR(phylink))
1176 return PTR_ERR(phylink);
1178 ag->phylink = phylink;
1182 static void ag71xx_ring_tx_clean(struct ag71xx *ag)
1184 struct ag71xx_ring *ring = &ag->tx_ring;
1185 int ring_mask = BIT(ring->order) - 1;
1186 u32 bytes_compl = 0, pkts_compl = 0;
1187 struct net_device *ndev = ag->ndev;
1189 while (ring->curr != ring->dirty) {
1190 struct ag71xx_desc *desc;
1191 u32 i = ring->dirty & ring_mask;
1193 desc = ag71xx_ring_desc(ring, i);
1194 if (!ag71xx_desc_empty(desc)) {
1196 ndev->stats.tx_errors++;
1199 if (ring->buf[i].tx.skb) {
1200 bytes_compl += ring->buf[i].tx.len;
1202 dev_kfree_skb_any(ring->buf[i].tx.skb);
1204 ring->buf[i].tx.skb = NULL;
1208 /* flush descriptors */
1211 netdev_completed_queue(ndev, pkts_compl, bytes_compl);
1214 static void ag71xx_ring_tx_init(struct ag71xx *ag)
1216 struct ag71xx_ring *ring = &ag->tx_ring;
1217 int ring_size = BIT(ring->order);
1218 int ring_mask = ring_size - 1;
1221 for (i = 0; i < ring_size; i++) {
1222 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1224 desc->next = (u32)(ring->descs_dma +
1225 AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
1227 desc->ctrl = DESC_EMPTY;
1228 ring->buf[i].tx.skb = NULL;
1231 /* flush descriptors */
1236 netdev_reset_queue(ag->ndev);
1239 static void ag71xx_ring_rx_clean(struct ag71xx *ag)
1241 struct ag71xx_ring *ring = &ag->rx_ring;
1242 int ring_size = BIT(ring->order);
1248 for (i = 0; i < ring_size; i++)
1249 if (ring->buf[i].rx.rx_buf) {
1250 dma_unmap_single(&ag->pdev->dev,
1251 ring->buf[i].rx.dma_addr,
1252 ag->rx_buf_size, DMA_FROM_DEVICE);
1253 skb_free_frag(ring->buf[i].rx.rx_buf);
1257 static int ag71xx_buffer_size(struct ag71xx *ag)
1259 return ag->rx_buf_size +
1260 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1263 static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
1265 void *(*alloc)(unsigned int size))
1267 struct ag71xx_ring *ring = &ag->rx_ring;
1268 struct ag71xx_desc *desc;
1271 desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
1273 data = alloc(ag71xx_buffer_size(ag));
1277 buf->rx.rx_buf = data;
1278 buf->rx.dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size,
1280 desc->data = (u32)buf->rx.dma_addr + offset;
1284 static int ag71xx_ring_rx_init(struct ag71xx *ag)
1286 struct ag71xx_ring *ring = &ag->rx_ring;
1287 struct net_device *ndev = ag->ndev;
1288 int ring_mask = BIT(ring->order) - 1;
1289 int ring_size = BIT(ring->order);
1294 for (i = 0; i < ring_size; i++) {
1295 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1297 desc->next = (u32)(ring->descs_dma +
1298 AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
1300 netif_dbg(ag, rx_status, ndev, "RX desc at %p, next is %08x\n",
1304 for (i = 0; i < ring_size; i++) {
1305 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1307 if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset,
1308 netdev_alloc_frag)) {
1313 desc->ctrl = DESC_EMPTY;
1316 /* flush descriptors */
1325 static int ag71xx_ring_rx_refill(struct ag71xx *ag)
1327 struct ag71xx_ring *ring = &ag->rx_ring;
1328 int ring_mask = BIT(ring->order) - 1;
1329 int offset = ag->rx_buf_offset;
1333 for (; ring->curr - ring->dirty > 0; ring->dirty++) {
1334 struct ag71xx_desc *desc;
1337 i = ring->dirty & ring_mask;
1338 desc = ag71xx_ring_desc(ring, i);
1340 if (!ring->buf[i].rx.rx_buf &&
1341 !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
1345 desc->ctrl = DESC_EMPTY;
1349 /* flush descriptors */
1352 netif_dbg(ag, rx_status, ag->ndev, "%u rx descriptors refilled\n",
1358 static int ag71xx_rings_init(struct ag71xx *ag)
1360 struct ag71xx_ring *tx = &ag->tx_ring;
1361 struct ag71xx_ring *rx = &ag->rx_ring;
1362 int ring_size, tx_size;
1364 ring_size = BIT(tx->order) + BIT(rx->order);
1365 tx_size = BIT(tx->order);
1367 tx->buf = kcalloc(ring_size, sizeof(*tx->buf), GFP_KERNEL);
1371 tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
1372 ring_size * AG71XX_DESC_SIZE,
1373 &tx->descs_dma, GFP_KERNEL);
1374 if (!tx->descs_cpu) {
1380 rx->buf = &tx->buf[tx_size];
1381 rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
1382 rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
1384 ag71xx_ring_tx_init(ag);
1385 return ag71xx_ring_rx_init(ag);
1388 static void ag71xx_rings_free(struct ag71xx *ag)
1390 struct ag71xx_ring *tx = &ag->tx_ring;
1391 struct ag71xx_ring *rx = &ag->rx_ring;
1394 ring_size = BIT(tx->order) + BIT(rx->order);
1397 dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE,
1398 tx->descs_cpu, tx->descs_dma);
1402 tx->descs_cpu = NULL;
1403 rx->descs_cpu = NULL;
1408 static void ag71xx_rings_cleanup(struct ag71xx *ag)
1410 ag71xx_ring_rx_clean(ag);
1411 ag71xx_ring_tx_clean(ag);
1412 ag71xx_rings_free(ag);
1414 netdev_reset_queue(ag->ndev);
1417 static void ag71xx_hw_init(struct ag71xx *ag)
1421 ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
1422 usleep_range(20, 30);
1424 reset_control_assert(ag->mac_reset);
1426 reset_control_deassert(ag->mac_reset);
1429 ag71xx_hw_setup(ag);
1431 ag71xx_dma_reset(ag);
1434 static int ag71xx_hw_enable(struct ag71xx *ag)
1438 ret = ag71xx_rings_init(ag);
1442 napi_enable(&ag->napi);
1443 ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
1444 ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
1445 netif_start_queue(ag->ndev);
1450 static void ag71xx_hw_disable(struct ag71xx *ag)
1452 netif_stop_queue(ag->ndev);
1455 ag71xx_dma_reset(ag);
1457 napi_disable(&ag->napi);
1458 del_timer_sync(&ag->oom_timer);
1460 ag71xx_rings_cleanup(ag);
1463 static int ag71xx_open(struct net_device *ndev)
1465 struct ag71xx *ag = netdev_priv(ndev);
1466 unsigned int max_frame_len;
1469 ret = phylink_of_phy_connect(ag->phylink, ag->pdev->dev.of_node, 0);
1471 netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
1476 max_frame_len = ag71xx_max_frame_len(ndev->mtu);
1478 SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
1480 /* setup max frame length */
1481 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
1482 ag71xx_hw_set_macaddr(ag, ndev->dev_addr);
1484 ret = ag71xx_hw_enable(ag);
1488 phylink_start(ag->phylink);
1493 ag71xx_rings_cleanup(ag);
1497 static int ag71xx_stop(struct net_device *ndev)
1499 struct ag71xx *ag = netdev_priv(ndev);
1501 phylink_stop(ag->phylink);
1502 phylink_disconnect_phy(ag->phylink);
1503 ag71xx_hw_disable(ag);
1508 static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
1510 int i, ring_mask, ndesc, split;
1511 struct ag71xx_desc *desc;
1513 ring_mask = BIT(ring->order) - 1;
1515 split = ring->desc_split;
1521 unsigned int cur_len = len;
1523 i = (ring->curr + ndesc) & ring_mask;
1524 desc = ag71xx_ring_desc(ring, i);
1526 if (!ag71xx_desc_empty(desc))
1529 if (cur_len > split) {
1532 /* TX will hang if DMA transfers <= 4 bytes,
1533 * make sure next segment is more than 4 bytes long.
1535 if (len <= split + 4)
1544 cur_len |= DESC_MORE;
1546 /* prevent early tx attempt of this descriptor */
1548 cur_len |= DESC_EMPTY;
1550 desc->ctrl = cur_len;
1557 static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
1558 struct net_device *ndev)
1560 int i, n, ring_min, ring_mask, ring_size;
1561 struct ag71xx *ag = netdev_priv(ndev);
1562 struct ag71xx_ring *ring;
1563 struct ag71xx_desc *desc;
1564 dma_addr_t dma_addr;
1566 ring = &ag->tx_ring;
1567 ring_mask = BIT(ring->order) - 1;
1568 ring_size = BIT(ring->order);
1570 if (skb->len <= 4) {
1571 netif_dbg(ag, tx_err, ndev, "packet len is too small\n");
1575 dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len,
1578 i = ring->curr & ring_mask;
1579 desc = ag71xx_ring_desc(ring, i);
1581 /* setup descriptor fields */
1582 n = ag71xx_fill_dma_desc(ring, (u32)dma_addr,
1583 skb->len & ag->dcfg->desc_pktlen_mask);
1585 goto err_drop_unmap;
1587 i = (ring->curr + n - 1) & ring_mask;
1588 ring->buf[i].tx.len = skb->len;
1589 ring->buf[i].tx.skb = skb;
1591 netdev_sent_queue(ndev, skb->len);
1593 skb_tx_timestamp(skb);
1595 desc->ctrl &= ~DESC_EMPTY;
1598 /* flush descriptor */
1602 if (ring->desc_split)
1603 ring_min *= AG71XX_TX_RING_DS_PER_PKT;
1605 if (ring->curr - ring->dirty >= ring_size - ring_min) {
1606 netif_dbg(ag, tx_err, ndev, "tx queue full\n");
1607 netif_stop_queue(ndev);
1610 netif_dbg(ag, tx_queued, ndev, "packet injected into TX queue\n");
1612 /* enable TX engine */
1613 ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE);
1615 return NETDEV_TX_OK;
1618 dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
1621 ndev->stats.tx_dropped++;
1624 return NETDEV_TX_OK;
1627 static void ag71xx_oom_timer_handler(struct timer_list *t)
1629 struct ag71xx *ag = from_timer(ag, t, oom_timer);
1631 napi_schedule(&ag->napi);
1634 static void ag71xx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
1636 struct ag71xx *ag = netdev_priv(ndev);
1638 netif_err(ag, tx_err, ndev, "tx timeout\n");
1640 schedule_delayed_work(&ag->restart_work, 1);
1643 static void ag71xx_restart_work_func(struct work_struct *work)
1645 struct ag71xx *ag = container_of(work, struct ag71xx,
1649 ag71xx_hw_disable(ag);
1650 ag71xx_hw_enable(ag);
1652 phylink_stop(ag->phylink);
1653 phylink_start(ag->phylink);
1658 static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
1660 struct net_device *ndev = ag->ndev;
1661 int ring_mask, ring_size, done = 0;
1662 unsigned int pktlen_mask, offset;
1663 struct sk_buff *next, *skb;
1664 struct ag71xx_ring *ring;
1665 struct list_head rx_list;
1667 ring = &ag->rx_ring;
1668 pktlen_mask = ag->dcfg->desc_pktlen_mask;
1669 offset = ag->rx_buf_offset;
1670 ring_mask = BIT(ring->order) - 1;
1671 ring_size = BIT(ring->order);
1673 netif_dbg(ag, rx_status, ndev, "rx packets, limit=%d, curr=%u, dirty=%u\n",
1674 limit, ring->curr, ring->dirty);
1676 INIT_LIST_HEAD(&rx_list);
1678 while (done < limit) {
1679 unsigned int i = ring->curr & ring_mask;
1680 struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
1684 if (ag71xx_desc_empty(desc))
1687 if ((ring->dirty + ring_size) == ring->curr) {
1688 WARN_ONCE(1, "RX out of ring");
1692 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR);
1694 pktlen = desc->ctrl & pktlen_mask;
1695 pktlen -= ETH_FCS_LEN;
1697 dma_unmap_single(&ag->pdev->dev, ring->buf[i].rx.dma_addr,
1698 ag->rx_buf_size, DMA_FROM_DEVICE);
1700 ndev->stats.rx_packets++;
1701 ndev->stats.rx_bytes += pktlen;
1703 skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
1705 skb_free_frag(ring->buf[i].rx.rx_buf);
1709 skb_reserve(skb, offset);
1710 skb_put(skb, pktlen);
1713 ndev->stats.rx_dropped++;
1717 skb->ip_summed = CHECKSUM_NONE;
1718 list_add_tail(&skb->list, &rx_list);
1722 ring->buf[i].rx.rx_buf = NULL;
1728 ag71xx_ring_rx_refill(ag);
1730 list_for_each_entry_safe(skb, next, &rx_list, list)
1731 skb->protocol = eth_type_trans(skb, ndev);
1732 netif_receive_skb_list(&rx_list);
1734 netif_dbg(ag, rx_status, ndev, "rx finish, curr=%u, dirty=%u, done=%d\n",
1735 ring->curr, ring->dirty, done);
1740 static int ag71xx_poll(struct napi_struct *napi, int limit)
1742 struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
1743 struct ag71xx_ring *rx_ring = &ag->rx_ring;
1744 int rx_ring_size = BIT(rx_ring->order);
1745 struct net_device *ndev = ag->ndev;
1746 int tx_done, rx_done;
1749 tx_done = ag71xx_tx_packets(ag, false);
1751 netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
1752 rx_done = ag71xx_rx_packets(ag, limit);
1754 if (!rx_ring->buf[rx_ring->dirty % rx_ring_size].rx.rx_buf)
1757 status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
1758 if (unlikely(status & RX_STATUS_OF)) {
1759 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF);
1760 ndev->stats.rx_fifo_errors++;
1763 ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE);
1766 if (rx_done < limit) {
1767 if (status & RX_STATUS_PR)
1770 status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS);
1771 if (status & TX_STATUS_PS)
1774 netif_dbg(ag, rx_status, ndev, "disable polling mode, rx=%d, tx=%d,limit=%d\n",
1775 rx_done, tx_done, limit);
1777 napi_complete(napi);
1779 /* enable interrupts */
1780 ag71xx_int_enable(ag, AG71XX_INT_POLL);
1785 netif_dbg(ag, rx_status, ndev, "stay in polling mode, rx=%d, tx=%d, limit=%d\n",
1786 rx_done, tx_done, limit);
1790 netif_err(ag, rx_err, ndev, "out of memory\n");
1792 mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL);
1793 napi_complete(napi);
1797 static irqreturn_t ag71xx_interrupt(int irq, void *dev_id)
1799 struct net_device *ndev = dev_id;
1803 ag = netdev_priv(ndev);
1804 status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS);
1806 if (unlikely(!status))
1809 if (unlikely(status & AG71XX_INT_ERR)) {
1810 if (status & AG71XX_INT_TX_BE) {
1811 ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE);
1812 netif_err(ag, intr, ndev, "TX BUS error\n");
1814 if (status & AG71XX_INT_RX_BE) {
1815 ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE);
1816 netif_err(ag, intr, ndev, "RX BUS error\n");
1820 if (likely(status & AG71XX_INT_POLL)) {
1821 ag71xx_int_disable(ag, AG71XX_INT_POLL);
1822 netif_dbg(ag, intr, ndev, "enable polling mode\n");
1823 napi_schedule(&ag->napi);
1829 static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
1831 struct ag71xx *ag = netdev_priv(ndev);
1833 ndev->mtu = new_mtu;
1834 ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
1835 ag71xx_max_frame_len(ndev->mtu));
1840 static const struct net_device_ops ag71xx_netdev_ops = {
1841 .ndo_open = ag71xx_open,
1842 .ndo_stop = ag71xx_stop,
1843 .ndo_start_xmit = ag71xx_hard_start_xmit,
1844 .ndo_do_ioctl = phy_do_ioctl,
1845 .ndo_tx_timeout = ag71xx_tx_timeout,
1846 .ndo_change_mtu = ag71xx_change_mtu,
1847 .ndo_set_mac_address = eth_mac_addr,
1848 .ndo_validate_addr = eth_validate_addr,
1851 static const u32 ar71xx_addr_ar7100[] = {
1852 0x19000000, 0x1a000000,
1855 static int ag71xx_probe(struct platform_device *pdev)
1857 struct device_node *np = pdev->dev.of_node;
1858 const struct ag71xx_dcfg *dcfg;
1859 struct net_device *ndev;
1860 struct resource *res;
1861 const void *mac_addr;
1862 int tx_size, err, i;
1868 ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag));
1872 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1876 dcfg = of_device_get_match_data(&pdev->dev);
1880 ag = netdev_priv(ndev);
1882 for (i = 0; i < ARRAY_SIZE(ar71xx_addr_ar7100); i++) {
1883 if (ar71xx_addr_ar7100[i] == res->start)
1887 if (ag->mac_idx < 0) {
1888 netif_err(ag, probe, ndev, "unknown mac idx\n");
1892 ag->clk_eth = devm_clk_get(&pdev->dev, "eth");
1893 if (IS_ERR(ag->clk_eth)) {
1894 netif_err(ag, probe, ndev, "Failed to get eth clk.\n");
1895 return PTR_ERR(ag->clk_eth);
1898 SET_NETDEV_DEV(ndev, &pdev->dev);
1903 ag->msg_enable = netif_msg_init(-1, AG71XX_DEFAULT_MSG_ENABLE);
1904 memcpy(ag->fifodata, dcfg->fifodata, sizeof(ag->fifodata));
1906 ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
1907 if (IS_ERR(ag->mac_reset)) {
1908 netif_err(ag, probe, ndev, "missing mac reset\n");
1909 err = PTR_ERR(ag->mac_reset);
1913 ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1914 if (!ag->mac_base) {
1919 ndev->irq = platform_get_irq(pdev, 0);
1920 err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
1921 0x0, dev_name(&pdev->dev), ndev);
1923 netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
1928 ndev->netdev_ops = &ag71xx_netdev_ops;
1929 ndev->ethtool_ops = &ag71xx_ethtool_ops;
1931 INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
1932 timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
1934 tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
1935 ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
1938 ndev->max_mtu = dcfg->max_frame_len - ag71xx_max_frame_len(0);
1940 ag->rx_buf_offset = NET_SKB_PAD;
1941 if (!ag71xx_is(ag, AR7100) && !ag71xx_is(ag, AR9130))
1942 ag->rx_buf_offset += NET_IP_ALIGN;
1944 if (ag71xx_is(ag, AR7100)) {
1945 ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
1946 tx_size *= AG71XX_TX_RING_DS_PER_PKT;
1948 ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
1950 ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
1951 sizeof(struct ag71xx_desc),
1952 &ag->stop_desc_dma, GFP_KERNEL);
1953 if (!ag->stop_desc) {
1958 ag->stop_desc->data = 0;
1959 ag->stop_desc->ctrl = 0;
1960 ag->stop_desc->next = (u32)ag->stop_desc_dma;
1962 mac_addr = of_get_mac_address(np);
1963 if (!IS_ERR(mac_addr))
1964 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
1965 if (IS_ERR(mac_addr) || !is_valid_ether_addr(ndev->dev_addr)) {
1966 netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
1967 eth_random_addr(ndev->dev_addr);
1970 err = of_get_phy_mode(np, &ag->phy_if_mode);
1972 netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
1976 netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
1978 err = clk_prepare_enable(ag->clk_eth);
1980 netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
1984 ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
1988 err = ag71xx_mdio_probe(ag);
1992 platform_set_drvdata(pdev, ndev);
1994 err = ag71xx_phylink_setup(ag);
1996 netif_err(ag, probe, ndev, "failed to setup phylink (%d)\n", err);
1997 goto err_mdio_remove;
2000 err = register_netdev(ndev);
2002 netif_err(ag, probe, ndev, "unable to register net device\n");
2003 platform_set_drvdata(pdev, NULL);
2004 goto err_mdio_remove;
2007 netif_info(ag, probe, ndev, "Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n",
2008 (unsigned long)ag->mac_base, ndev->irq,
2009 phy_modes(ag->phy_if_mode));
2014 ag71xx_mdio_remove(ag);
2016 clk_disable_unprepare(ag->clk_eth);
2022 static int ag71xx_remove(struct platform_device *pdev)
2024 struct net_device *ndev = platform_get_drvdata(pdev);
2030 ag = netdev_priv(ndev);
2031 unregister_netdev(ndev);
2032 ag71xx_mdio_remove(ag);
2033 clk_disable_unprepare(ag->clk_eth);
2034 platform_set_drvdata(pdev, NULL);
2039 static const u32 ar71xx_fifo_ar7100[] = {
2040 0x0fff0000, 0x00001fff, 0x00780fff,
2043 static const u32 ar71xx_fifo_ar9130[] = {
2044 0x0fff0000, 0x00001fff, 0x008001ff,
2047 static const u32 ar71xx_fifo_ar9330[] = {
2048 0x0010ffff, 0x015500aa, 0x01f00140,
2051 static const struct ag71xx_dcfg ag71xx_dcfg_ar7100 = {
2053 .fifodata = ar71xx_fifo_ar7100,
2054 .max_frame_len = 1540,
2055 .desc_pktlen_mask = SZ_4K - 1,
2056 .tx_hang_workaround = false,
2059 static const struct ag71xx_dcfg ag71xx_dcfg_ar7240 = {
2061 .fifodata = ar71xx_fifo_ar7100,
2062 .max_frame_len = 1540,
2063 .desc_pktlen_mask = SZ_4K - 1,
2064 .tx_hang_workaround = true,
2067 static const struct ag71xx_dcfg ag71xx_dcfg_ar9130 = {
2069 .fifodata = ar71xx_fifo_ar9130,
2070 .max_frame_len = 1540,
2071 .desc_pktlen_mask = SZ_4K - 1,
2072 .tx_hang_workaround = false,
2075 static const struct ag71xx_dcfg ag71xx_dcfg_ar9330 = {
2077 .fifodata = ar71xx_fifo_ar9330,
2078 .max_frame_len = 1540,
2079 .desc_pktlen_mask = SZ_4K - 1,
2080 .tx_hang_workaround = true,
2083 static const struct ag71xx_dcfg ag71xx_dcfg_ar9340 = {
2085 .fifodata = ar71xx_fifo_ar9330,
2086 .max_frame_len = SZ_16K - 1,
2087 .desc_pktlen_mask = SZ_16K - 1,
2088 .tx_hang_workaround = true,
2091 static const struct ag71xx_dcfg ag71xx_dcfg_qca9530 = {
2093 .fifodata = ar71xx_fifo_ar9330,
2094 .max_frame_len = SZ_16K - 1,
2095 .desc_pktlen_mask = SZ_16K - 1,
2096 .tx_hang_workaround = true,
2099 static const struct ag71xx_dcfg ag71xx_dcfg_qca9550 = {
2101 .fifodata = ar71xx_fifo_ar9330,
2102 .max_frame_len = 1540,
2103 .desc_pktlen_mask = SZ_16K - 1,
2104 .tx_hang_workaround = true,
2107 static const struct of_device_id ag71xx_match[] = {
2108 { .compatible = "qca,ar7100-eth", .data = &ag71xx_dcfg_ar7100 },
2109 { .compatible = "qca,ar7240-eth", .data = &ag71xx_dcfg_ar7240 },
2110 { .compatible = "qca,ar7241-eth", .data = &ag71xx_dcfg_ar7240 },
2111 { .compatible = "qca,ar7242-eth", .data = &ag71xx_dcfg_ar7240 },
2112 { .compatible = "qca,ar9130-eth", .data = &ag71xx_dcfg_ar9130 },
2113 { .compatible = "qca,ar9330-eth", .data = &ag71xx_dcfg_ar9330 },
2114 { .compatible = "qca,ar9340-eth", .data = &ag71xx_dcfg_ar9340 },
2115 { .compatible = "qca,qca9530-eth", .data = &ag71xx_dcfg_qca9530 },
2116 { .compatible = "qca,qca9550-eth", .data = &ag71xx_dcfg_qca9550 },
2117 { .compatible = "qca,qca9560-eth", .data = &ag71xx_dcfg_qca9550 },
2121 static struct platform_driver ag71xx_driver = {
2122 .probe = ag71xx_probe,
2123 .remove = ag71xx_remove,
2126 .of_match_table = ag71xx_match,
2130 module_platform_driver(ag71xx_driver);
2131 MODULE_LICENSE("GPL v2");