2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/inetdevice.h>
19 #include <linux/interrupt.h>
21 #include <linux/kernel.h>
22 #include <linux/mbus.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy/phy.h>
31 #include <linux/phy.h>
32 #include <linux/phylink.h>
33 #include <linux/platform_device.h>
34 #include <linux/skbuff.h>
36 #include "mvneta_bm.h"
40 #include <net/page_pool.h>
41 #include <linux/bpf_trace.h>
44 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
45 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
46 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
47 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
48 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
49 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
50 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
51 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
52 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
53 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
54 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
55 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
56 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
57 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
58 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
59 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
60 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
61 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
62 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
63 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
64 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
65 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
66 #define MVNETA_PORT_RX_RESET 0x1cc0
67 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
68 #define MVNETA_PHY_ADDR 0x2000
69 #define MVNETA_PHY_ADDR_MASK 0x1f
70 #define MVNETA_MBUS_RETRY 0x2010
71 #define MVNETA_UNIT_INTR_CAUSE 0x2080
72 #define MVNETA_UNIT_CONTROL 0x20B0
73 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
74 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
75 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
76 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
77 #define MVNETA_BASE_ADDR_ENABLE 0x2290
78 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
79 #define MVNETA_PORT_CONFIG 0x2400
80 #define MVNETA_UNI_PROMISC_MODE BIT(0)
81 #define MVNETA_DEF_RXQ(q) ((q) << 1)
82 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
83 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
84 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
85 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
86 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
87 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
88 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
89 MVNETA_DEF_RXQ_ARP(q) | \
90 MVNETA_DEF_RXQ_TCP(q) | \
91 MVNETA_DEF_RXQ_UDP(q) | \
92 MVNETA_DEF_RXQ_BPDU(q) | \
93 MVNETA_TX_UNSET_ERR_SUM | \
94 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
95 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
96 #define MVNETA_MAC_ADDR_LOW 0x2414
97 #define MVNETA_MAC_ADDR_HIGH 0x2418
98 #define MVNETA_SDMA_CONFIG 0x241c
99 #define MVNETA_SDMA_BRST_SIZE_16 4
100 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
101 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
102 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
103 #define MVNETA_DESC_SWAP BIT(6)
104 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
105 #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440
106 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
107 #define MVNETA_PORT_STATUS 0x2444
108 #define MVNETA_TX_IN_PRGRS BIT(0)
109 #define MVNETA_TX_FIFO_EMPTY BIT(8)
110 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
111 /* Only exists on Armada XP and Armada 370 */
112 #define MVNETA_SERDES_CFG 0x24A0
113 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
114 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
115 #define MVNETA_HSGMII_SERDES_PROTO 0x1107
116 #define MVNETA_TYPE_PRIO 0x24bc
117 #define MVNETA_FORCE_UNI BIT(21)
118 #define MVNETA_TXQ_CMD_1 0x24e4
119 #define MVNETA_TXQ_CMD 0x2448
120 #define MVNETA_TXQ_DISABLE_SHIFT 8
121 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
122 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
123 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488
124 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
125 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
126 #define MVNETA_ACC_MODE 0x2500
127 #define MVNETA_BM_ADDRESS 0x2504
128 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
129 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
130 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
131 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
132 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
133 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
135 /* Exception Interrupt Port/Queue Cause register
137 * Their behavior depend of the mapping done using the PCPX2Q
138 * registers. For a given CPU if the bit associated to a queue is not
139 * set, then for the register a read from this CPU will always return
140 * 0 and a write won't do anything
143 #define MVNETA_INTR_NEW_CAUSE 0x25a0
144 #define MVNETA_INTR_NEW_MASK 0x25a4
146 /* bits 0..7 = TXQ SENT, one bit per queue.
147 * bits 8..15 = RXQ OCCUP, one bit per queue.
148 * bits 16..23 = RXQ FREE, one bit per queue.
149 * bit 29 = OLD_REG_SUM, see old reg ?
150 * bit 30 = TX_ERR_SUM, one bit for 4 ports
151 * bit 31 = MISC_SUM, one bit for 4 ports
153 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
154 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
155 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
156 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
157 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
159 #define MVNETA_INTR_OLD_CAUSE 0x25a8
160 #define MVNETA_INTR_OLD_MASK 0x25ac
162 /* Data Path Port/Queue Cause Register */
163 #define MVNETA_INTR_MISC_CAUSE 0x25b0
164 #define MVNETA_INTR_MISC_MASK 0x25b4
166 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
167 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
168 #define MVNETA_CAUSE_PTP BIT(4)
170 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
171 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
172 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
173 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
174 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
175 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
176 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
177 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
179 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
180 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
181 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
183 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
184 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
185 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
187 #define MVNETA_INTR_ENABLE 0x25b8
188 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
189 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
191 #define MVNETA_RXQ_CMD 0x2680
192 #define MVNETA_RXQ_DISABLE_SHIFT 8
193 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
194 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
195 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
196 #define MVNETA_GMAC_CTRL_0 0x2c00
197 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
198 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
199 #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
200 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
201 #define MVNETA_GMAC_CTRL_2 0x2c08
202 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
203 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
204 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
205 #define MVNETA_GMAC2_PORT_RESET BIT(6)
206 #define MVNETA_GMAC_STATUS 0x2c10
207 #define MVNETA_GMAC_LINK_UP BIT(0)
208 #define MVNETA_GMAC_SPEED_1000 BIT(1)
209 #define MVNETA_GMAC_SPEED_100 BIT(2)
210 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
211 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
212 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
213 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
214 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
215 #define MVNETA_GMAC_AN_COMPLETE BIT(11)
216 #define MVNETA_GMAC_SYNC_OK BIT(14)
217 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
218 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
219 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
220 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
221 #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
222 #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
223 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
224 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
225 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
226 #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
227 #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
228 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
229 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
230 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
231 #define MVNETA_GMAC_CTRL_4 0x2c90
232 #define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1)
233 #define MVNETA_MIB_COUNTERS_BASE 0x3000
234 #define MVNETA_MIB_LATE_COLLISION 0x7c
235 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
236 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
237 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
238 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
239 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
240 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
241 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
242 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
243 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
244 #define MVNETA_TXQ_DEC_SENT_MASK 0xff
245 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
246 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
247 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
248 #define MVNETA_PORT_TX_RESET 0x3cf0
249 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
250 #define MVNETA_TX_MTU 0x3e0c
251 #define MVNETA_TX_TOKEN_SIZE 0x3e14
252 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
253 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
254 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
256 #define MVNETA_LPI_CTRL_0 0x2cc0
257 #define MVNETA_LPI_CTRL_1 0x2cc4
258 #define MVNETA_LPI_REQUEST_ENABLE BIT(0)
259 #define MVNETA_LPI_CTRL_2 0x2cc8
260 #define MVNETA_LPI_STATUS 0x2ccc
262 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
264 /* Descriptor ring Macros */
265 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
266 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
268 /* Various constants */
271 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
272 #define MVNETA_RX_COAL_PKTS 32
273 #define MVNETA_RX_COAL_USEC 100
275 /* The two bytes Marvell header. Either contains a special value used
276 * by Marvell switches when a specific hardware mode is enabled (not
277 * supported by this driver) or is filled automatically by zeroes on
278 * the RX side. Those two bytes being at the front of the Ethernet
279 * header, they allow to have the IP header aligned on a 4 bytes
280 * boundary automatically: the hardware skips those two bytes on its
283 #define MVNETA_MH_SIZE 2
285 #define MVNETA_VLAN_TAG_LEN 4
287 #define MVNETA_TX_CSUM_DEF_SIZE 1600
288 #define MVNETA_TX_CSUM_MAX_SIZE 9800
289 #define MVNETA_ACC_MODE_EXT1 1
290 #define MVNETA_ACC_MODE_EXT2 2
292 #define MVNETA_MAX_DECODE_WIN 6
294 /* Timeout constants */
295 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
296 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
297 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
299 #define MVNETA_TX_MTU_MAX 0x3ffff
301 /* The RSS lookup table actually has 256 entries but we do not use
304 #define MVNETA_RSS_LU_TABLE_SIZE 1
306 /* Max number of Rx descriptors */
307 #define MVNETA_MAX_RXD 512
309 /* Max number of Tx descriptors */
310 #define MVNETA_MAX_TXD 1024
312 /* Max number of allowed TCP segments for software TSO */
313 #define MVNETA_MAX_TSO_SEGS 100
315 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
317 /* descriptor aligned size */
318 #define MVNETA_DESC_ALIGNED_SIZE 32
320 /* Number of bytes to be taken into account by HW when putting incoming data
321 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
322 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
324 #define MVNETA_RX_PKT_OFFSET_CORRECTION 64
326 #define MVNETA_RX_PKT_SIZE(mtu) \
327 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
328 ETH_HLEN + ETH_FCS_LEN, \
331 /* Driver assumes that the last 3 bits are 0 */
332 #define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
333 #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
334 MVNETA_SKB_HEADROOM))
335 #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
337 #define IS_TSO_HEADER(txq, addr) \
338 ((addr >= txq->tso_hdrs_phys) && \
339 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
341 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
342 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
345 ETHTOOL_STAT_EEE_WAKEUP,
346 ETHTOOL_STAT_SKB_ALLOC_ERR,
347 ETHTOOL_STAT_REFILL_ERR,
348 ETHTOOL_XDP_REDIRECT,
354 ETHTOOL_XDP_XMIT_ERR,
358 struct mvneta_statistic {
359 unsigned short offset;
361 const char name[ETH_GSTRING_LEN];
368 #define MVNETA_XDP_PASS 0
369 #define MVNETA_XDP_DROPPED BIT(0)
370 #define MVNETA_XDP_TX BIT(1)
371 #define MVNETA_XDP_REDIR BIT(2)
373 static const struct mvneta_statistic mvneta_statistics[] = {
374 { 0x3000, T_REG_64, "good_octets_received", },
375 { 0x3010, T_REG_32, "good_frames_received", },
376 { 0x3008, T_REG_32, "bad_octets_received", },
377 { 0x3014, T_REG_32, "bad_frames_received", },
378 { 0x3018, T_REG_32, "broadcast_frames_received", },
379 { 0x301c, T_REG_32, "multicast_frames_received", },
380 { 0x3050, T_REG_32, "unrec_mac_control_received", },
381 { 0x3058, T_REG_32, "good_fc_received", },
382 { 0x305c, T_REG_32, "bad_fc_received", },
383 { 0x3060, T_REG_32, "undersize_received", },
384 { 0x3064, T_REG_32, "fragments_received", },
385 { 0x3068, T_REG_32, "oversize_received", },
386 { 0x306c, T_REG_32, "jabber_received", },
387 { 0x3070, T_REG_32, "mac_receive_error", },
388 { 0x3074, T_REG_32, "bad_crc_event", },
389 { 0x3078, T_REG_32, "collision", },
390 { 0x307c, T_REG_32, "late_collision", },
391 { 0x2484, T_REG_32, "rx_discard", },
392 { 0x2488, T_REG_32, "rx_overrun", },
393 { 0x3020, T_REG_32, "frames_64_octets", },
394 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
395 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
396 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
397 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
398 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
399 { 0x3038, T_REG_64, "good_octets_sent", },
400 { 0x3040, T_REG_32, "good_frames_sent", },
401 { 0x3044, T_REG_32, "excessive_collision", },
402 { 0x3048, T_REG_32, "multicast_frames_sent", },
403 { 0x304c, T_REG_32, "broadcast_frames_sent", },
404 { 0x3054, T_REG_32, "fc_sent", },
405 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
406 { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
407 { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
408 { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
409 { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", },
410 { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
411 { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
412 { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
413 { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
414 { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
415 { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
418 struct mvneta_stats {
433 struct mvneta_ethtool_stats {
434 struct mvneta_stats ps;
439 struct mvneta_pcpu_stats {
440 struct u64_stats_sync syncp;
442 struct mvneta_ethtool_stats es;
447 struct mvneta_pcpu_port {
448 /* Pointer to the shared port */
449 struct mvneta_port *pp;
451 /* Pointer to the CPU-local NAPI struct */
452 struct napi_struct napi;
454 /* Cause of the previous interrupt */
464 struct mvneta_pcpu_port __percpu *ports;
465 struct mvneta_pcpu_stats __percpu *stats;
471 struct mvneta_rx_queue *rxqs;
472 struct mvneta_tx_queue *txqs;
473 struct net_device *dev;
474 struct hlist_node node_online;
475 struct hlist_node node_dead;
477 /* Protect the access to the percpu interrupt registers,
478 * ensuring that the configuration remains coherent.
484 struct napi_struct napi;
486 struct bpf_prog *xdp_prog;
497 phy_interface_t phy_interface;
498 struct device_node *dn;
499 unsigned int tx_csum_limit;
500 struct phylink *phylink;
501 struct phylink_config phylink_config;
504 struct mvneta_bm *bm_priv;
505 struct mvneta_bm_pool *pool_long;
506 struct mvneta_bm_pool *pool_short;
513 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
515 u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
517 /* Flags for special SoC configurations */
518 bool neta_armada3700;
519 u16 rx_offset_correction;
520 const struct mbus_dram_target_info *dram_target_info;
523 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
524 * layout of the transmit and reception DMA descriptors, and their
525 * layout is therefore defined by the hardware design
528 #define MVNETA_TX_L3_OFF_SHIFT 0
529 #define MVNETA_TX_IP_HLEN_SHIFT 8
530 #define MVNETA_TX_L4_UDP BIT(16)
531 #define MVNETA_TX_L3_IP6 BIT(17)
532 #define MVNETA_TXD_IP_CSUM BIT(18)
533 #define MVNETA_TXD_Z_PAD BIT(19)
534 #define MVNETA_TXD_L_DESC BIT(20)
535 #define MVNETA_TXD_F_DESC BIT(21)
536 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
537 MVNETA_TXD_L_DESC | \
539 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
540 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
542 #define MVNETA_RXD_ERR_CRC 0x0
543 #define MVNETA_RXD_BM_POOL_SHIFT 13
544 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
545 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
546 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
547 #define MVNETA_RXD_ERR_LEN BIT(18)
548 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
549 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
550 #define MVNETA_RXD_L3_IP4 BIT(25)
551 #define MVNETA_RXD_LAST_DESC BIT(26)
552 #define MVNETA_RXD_FIRST_DESC BIT(27)
553 #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
554 MVNETA_RXD_LAST_DESC)
555 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
557 #if defined(__LITTLE_ENDIAN)
558 struct mvneta_tx_desc {
559 u32 command; /* Options used by HW for packet transmitting.*/
560 u16 reserved1; /* csum_l4 (for future use) */
561 u16 data_size; /* Data size of transmitted packet in bytes */
562 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
563 u32 reserved2; /* hw_cmd - (for future use, PMT) */
564 u32 reserved3[4]; /* Reserved - (for future use) */
567 struct mvneta_rx_desc {
568 u32 status; /* Info about received packet */
569 u16 reserved1; /* pnc_info - (for future use, PnC) */
570 u16 data_size; /* Size of received packet in bytes */
572 u32 buf_phys_addr; /* Physical address of the buffer */
573 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
575 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
576 u16 reserved3; /* prefetch_cmd, for future use */
577 u16 reserved4; /* csum_l4 - (for future use, PnC) */
579 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
580 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
583 struct mvneta_tx_desc {
584 u16 data_size; /* Data size of transmitted packet in bytes */
585 u16 reserved1; /* csum_l4 (for future use) */
586 u32 command; /* Options used by HW for packet transmitting.*/
587 u32 reserved2; /* hw_cmd - (for future use, PMT) */
588 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
589 u32 reserved3[4]; /* Reserved - (for future use) */
592 struct mvneta_rx_desc {
593 u16 data_size; /* Size of received packet in bytes */
594 u16 reserved1; /* pnc_info - (for future use, PnC) */
595 u32 status; /* Info about received packet */
597 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
598 u32 buf_phys_addr; /* Physical address of the buffer */
600 u16 reserved4; /* csum_l4 - (for future use, PnC) */
601 u16 reserved3; /* prefetch_cmd, for future use */
602 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
604 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
605 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
609 enum mvneta_tx_buf_type {
615 struct mvneta_tx_buf {
616 enum mvneta_tx_buf_type type;
618 struct xdp_frame *xdpf;
623 struct mvneta_tx_queue {
624 /* Number of this TX queue, in the range 0-7 */
627 /* Number of TX DMA descriptors in the descriptor ring */
630 /* Number of currently used TX DMA descriptor in the
635 int tx_stop_threshold;
636 int tx_wake_threshold;
638 /* Array of transmitted buffers */
639 struct mvneta_tx_buf *buf;
641 /* Index of last TX DMA descriptor that was inserted */
644 /* Index of the TX DMA descriptor to be cleaned up */
649 /* Virtual address of the TX DMA descriptors array */
650 struct mvneta_tx_desc *descs;
652 /* DMA address of the TX DMA descriptors array */
653 dma_addr_t descs_phys;
655 /* Index of the last TX DMA descriptor */
658 /* Index of the next TX DMA descriptor to process */
659 int next_desc_to_proc;
661 /* DMA buffers for TSO headers */
664 /* DMA address of TSO headers */
665 dma_addr_t tso_hdrs_phys;
667 /* Affinity mask for CPUs*/
668 cpumask_t affinity_mask;
671 struct mvneta_rx_queue {
672 /* rx queue number, in the range 0-7 */
675 /* num of rx descriptors in the rx descriptor ring */
682 struct page_pool *page_pool;
683 struct xdp_rxq_info xdp_rxq;
685 /* Virtual address of the RX buffer */
686 void **buf_virt_addr;
688 /* Virtual address of the RX DMA descriptors array */
689 struct mvneta_rx_desc *descs;
691 /* DMA address of the RX DMA descriptors array */
692 dma_addr_t descs_phys;
694 /* Index of the last RX DMA descriptor */
697 /* Index of the next RX DMA descriptor to process */
698 int next_desc_to_proc;
700 /* Index of first RX DMA descriptor to refill */
705 static enum cpuhp_state online_hpstate;
706 /* The hardware supports eight (8) rx queues, but we are only allowing
707 * the first one to be used. Therefore, let's just allocate one queue.
709 static int rxq_number = 8;
710 static int txq_number = 8;
714 static int rx_copybreak __read_mostly = 256;
716 /* HW BM need that each port be identify by a unique ID */
717 static int global_port_id;
719 #define MVNETA_DRIVER_NAME "mvneta"
720 #define MVNETA_DRIVER_VERSION "1.0"
722 /* Utility/helper methods */
724 /* Write helper method */
725 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
727 writel(data, pp->base + offset);
730 /* Read helper method */
731 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
733 return readl(pp->base + offset);
736 /* Increment txq get counter */
737 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
739 txq->txq_get_index++;
740 if (txq->txq_get_index == txq->size)
741 txq->txq_get_index = 0;
744 /* Increment txq put counter */
745 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
747 txq->txq_put_index++;
748 if (txq->txq_put_index == txq->size)
749 txq->txq_put_index = 0;
753 /* Clear all MIB counters */
754 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
758 /* Perform dummy reads from MIB counters */
759 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
760 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
761 mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
762 mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
765 /* Get System Network Statistics */
767 mvneta_get_stats64(struct net_device *dev,
768 struct rtnl_link_stats64 *stats)
770 struct mvneta_port *pp = netdev_priv(dev);
774 for_each_possible_cpu(cpu) {
775 struct mvneta_pcpu_stats *cpu_stats;
783 cpu_stats = per_cpu_ptr(pp->stats, cpu);
785 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
786 rx_packets = cpu_stats->es.ps.rx_packets;
787 rx_bytes = cpu_stats->es.ps.rx_bytes;
788 rx_dropped = cpu_stats->rx_dropped;
789 rx_errors = cpu_stats->rx_errors;
790 tx_packets = cpu_stats->es.ps.tx_packets;
791 tx_bytes = cpu_stats->es.ps.tx_bytes;
792 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
794 stats->rx_packets += rx_packets;
795 stats->rx_bytes += rx_bytes;
796 stats->rx_dropped += rx_dropped;
797 stats->rx_errors += rx_errors;
798 stats->tx_packets += tx_packets;
799 stats->tx_bytes += tx_bytes;
802 stats->tx_dropped = dev->stats.tx_dropped;
805 /* Rx descriptors helper methods */
807 /* Checks whether the RX descriptor having this status is both the first
808 * and the last descriptor for the RX packet. Each RX packet is currently
809 * received through a single RX descriptor, so not having each RX
810 * descriptor with its first and last bits set is an error
812 static int mvneta_rxq_desc_is_first_last(u32 status)
814 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
815 MVNETA_RXD_FIRST_LAST_DESC;
818 /* Add number of descriptors ready to receive new packets */
819 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
820 struct mvneta_rx_queue *rxq,
823 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
826 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
827 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
828 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
829 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
830 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
833 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
834 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
837 /* Get number of RX descriptors occupied by received packets */
838 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
839 struct mvneta_rx_queue *rxq)
843 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
844 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
847 /* Update num of rx desc called upon return from rx path or
848 * from mvneta_rxq_drop_pkts().
850 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
851 struct mvneta_rx_queue *rxq,
852 int rx_done, int rx_filled)
856 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
858 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
859 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
863 /* Only 255 descriptors can be added at once */
864 while ((rx_done > 0) || (rx_filled > 0)) {
865 if (rx_done <= 0xff) {
872 if (rx_filled <= 0xff) {
873 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
876 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
879 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
883 /* Get pointer to next RX descriptor to be processed by SW */
884 static struct mvneta_rx_desc *
885 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
887 int rx_desc = rxq->next_desc_to_proc;
889 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
890 prefetch(rxq->descs + rxq->next_desc_to_proc);
891 return rxq->descs + rx_desc;
894 /* Change maximum receive size of the port. */
895 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
899 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
900 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
901 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
902 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
903 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
907 /* Set rx queue offset */
908 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
909 struct mvneta_rx_queue *rxq,
914 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
915 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
918 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
919 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
923 /* Tx descriptors helper methods */
925 /* Update HW with number of TX descriptors to be sent */
926 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
927 struct mvneta_tx_queue *txq,
932 pend_desc += txq->pending;
934 /* Only 255 Tx descriptors can be added at once */
936 val = min(pend_desc, 255);
937 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
939 } while (pend_desc > 0);
943 /* Get pointer to next TX descriptor to be processed (send) by HW */
944 static struct mvneta_tx_desc *
945 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
947 int tx_desc = txq->next_desc_to_proc;
949 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
950 return txq->descs + tx_desc;
953 /* Release the last allocated TX descriptor. Useful to handle DMA
954 * mapping failures in the TX path.
956 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
958 if (txq->next_desc_to_proc == 0)
959 txq->next_desc_to_proc = txq->last_desc - 1;
961 txq->next_desc_to_proc--;
964 /* Set rxq buf size */
965 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
966 struct mvneta_rx_queue *rxq,
971 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
973 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
974 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
976 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
979 /* Disable buffer management (BM) */
980 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
981 struct mvneta_rx_queue *rxq)
985 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
986 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
987 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
990 /* Enable buffer management (BM) */
991 static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
992 struct mvneta_rx_queue *rxq)
996 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
997 val |= MVNETA_RXQ_HW_BUF_ALLOC;
998 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1001 /* Notify HW about port's assignment of pool for bigger packets */
1002 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
1003 struct mvneta_rx_queue *rxq)
1007 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1008 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
1009 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
1011 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1014 /* Notify HW about port's assignment of pool for smaller packets */
1015 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
1016 struct mvneta_rx_queue *rxq)
1020 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1021 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
1022 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
1024 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1027 /* Set port's receive buffer size for assigned BM pool */
1028 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
1034 if (!IS_ALIGNED(buf_size, 8)) {
1035 dev_warn(pp->dev->dev.parent,
1036 "illegal buf_size value %d, round to %d\n",
1037 buf_size, ALIGN(buf_size, 8));
1038 buf_size = ALIGN(buf_size, 8);
1041 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
1042 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
1043 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
1046 /* Configure MBUS window in order to enable access BM internal SRAM */
1047 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
1050 u32 win_enable, win_protect;
1053 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
1055 if (pp->bm_win_id < 0) {
1056 /* Find first not occupied window */
1057 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
1058 if (win_enable & (1 << i)) {
1063 if (i == MVNETA_MAX_DECODE_WIN)
1069 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1070 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1073 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1075 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1076 (attr << 8) | target);
1078 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1080 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1081 win_protect |= 3 << (2 * i);
1082 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1084 win_enable &= ~(1 << i);
1085 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1090 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1096 /* Get BM window information */
1097 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1104 /* Open NETA -> BM window */
1105 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1108 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1114 /* Assign and initialize pools for port. In case of fail
1115 * buffer manager will remain disabled for current port.
1117 static int mvneta_bm_port_init(struct platform_device *pdev,
1118 struct mvneta_port *pp)
1120 struct device_node *dn = pdev->dev.of_node;
1121 u32 long_pool_id, short_pool_id;
1123 if (!pp->neta_armada3700) {
1126 ret = mvneta_bm_port_mbus_init(pp);
1131 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1132 netdev_info(pp->dev, "missing long pool id\n");
1136 /* Create port's long pool depending on mtu */
1137 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1138 MVNETA_BM_LONG, pp->id,
1139 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1140 if (!pp->pool_long) {
1141 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1145 pp->pool_long->port_map |= 1 << pp->id;
1147 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1150 /* If short pool id is not defined, assume using single pool */
1151 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1152 short_pool_id = long_pool_id;
1154 /* Create port's short pool */
1155 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1156 MVNETA_BM_SHORT, pp->id,
1157 MVNETA_BM_SHORT_PKT_SIZE);
1158 if (!pp->pool_short) {
1159 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1160 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1164 if (short_pool_id != long_pool_id) {
1165 pp->pool_short->port_map |= 1 << pp->id;
1166 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1167 pp->pool_short->id);
1173 /* Update settings of a pool for bigger packets */
1174 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1176 struct mvneta_bm_pool *bm_pool = pp->pool_long;
1177 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1180 /* Release all buffers from long pool */
1181 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1182 if (hwbm_pool->buf_num) {
1183 WARN(1, "cannot free all buffers in pool %d\n",
1188 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1189 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1190 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1191 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1193 /* Fill entire long pool */
1194 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
1195 if (num != hwbm_pool->size) {
1196 WARN(1, "pool %d: %d of %d allocated\n",
1197 bm_pool->id, num, hwbm_pool->size);
1200 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1205 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1206 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1209 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
1210 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1211 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1214 /* Start the Ethernet port RX and TX activity */
1215 static void mvneta_port_up(struct mvneta_port *pp)
1220 /* Enable all initialized TXs. */
1222 for (queue = 0; queue < txq_number; queue++) {
1223 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1225 q_map |= (1 << queue);
1227 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1230 /* Enable all initialized RXQs. */
1231 for (queue = 0; queue < rxq_number; queue++) {
1232 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1235 q_map |= (1 << queue);
1237 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1240 /* Stop the Ethernet port activity */
1241 static void mvneta_port_down(struct mvneta_port *pp)
1246 /* Stop Rx port activity. Check port Rx activity. */
1247 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1249 /* Issue stop command for active channels only */
1251 mvreg_write(pp, MVNETA_RXQ_CMD,
1252 val << MVNETA_RXQ_DISABLE_SHIFT);
1254 /* Wait for all Rx activity to terminate. */
1257 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1258 netdev_warn(pp->dev,
1259 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1265 val = mvreg_read(pp, MVNETA_RXQ_CMD);
1266 } while (val & MVNETA_RXQ_ENABLE_MASK);
1268 /* Stop Tx port activity. Check port Tx activity. Issue stop
1269 * command for active channels only
1271 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1274 mvreg_write(pp, MVNETA_TXQ_CMD,
1275 (val << MVNETA_TXQ_DISABLE_SHIFT));
1277 /* Wait for all Tx activity to terminate. */
1280 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1281 netdev_warn(pp->dev,
1282 "TIMEOUT for TX stopped status=0x%08x\n",
1288 /* Check TX Command reg that all Txqs are stopped */
1289 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1291 } while (val & MVNETA_TXQ_ENABLE_MASK);
1293 /* Double check to verify that TX FIFO is empty */
1296 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1297 netdev_warn(pp->dev,
1298 "TX FIFO empty timeout status=0x%08x\n",
1304 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1305 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1306 (val & MVNETA_TX_IN_PRGRS));
1311 /* Enable the port by setting the port enable bit of the MAC control register */
1312 static void mvneta_port_enable(struct mvneta_port *pp)
1317 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1318 val |= MVNETA_GMAC0_PORT_ENABLE;
1319 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1322 /* Disable the port and wait for about 200 usec before retuning */
1323 static void mvneta_port_disable(struct mvneta_port *pp)
1327 /* Reset the Enable bit in the Serial Control Register */
1328 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1329 val &= ~MVNETA_GMAC0_PORT_ENABLE;
1330 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1335 /* Multicast tables methods */
1337 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1338 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1346 val = 0x1 | (queue << 1);
1347 val |= (val << 24) | (val << 16) | (val << 8);
1350 for (offset = 0; offset <= 0xc; offset += 4)
1351 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1354 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1355 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1363 val = 0x1 | (queue << 1);
1364 val |= (val << 24) | (val << 16) | (val << 8);
1367 for (offset = 0; offset <= 0xfc; offset += 4)
1368 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1372 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1373 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1379 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1382 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1383 val = 0x1 | (queue << 1);
1384 val |= (val << 24) | (val << 16) | (val << 8);
1387 for (offset = 0; offset <= 0xfc; offset += 4)
1388 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1391 static void mvneta_percpu_unmask_interrupt(void *arg)
1393 struct mvneta_port *pp = arg;
1395 /* All the queue are unmasked, but actually only the ones
1396 * mapped to this CPU will be unmasked
1398 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1399 MVNETA_RX_INTR_MASK_ALL |
1400 MVNETA_TX_INTR_MASK_ALL |
1401 MVNETA_MISCINTR_INTR_MASK);
1404 static void mvneta_percpu_mask_interrupt(void *arg)
1406 struct mvneta_port *pp = arg;
1408 /* All the queue are masked, but actually only the ones
1409 * mapped to this CPU will be masked
1411 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1412 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1413 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1416 static void mvneta_percpu_clear_intr_cause(void *arg)
1418 struct mvneta_port *pp = arg;
1420 /* All the queue are cleared, but actually only the ones
1421 * mapped to this CPU will be cleared
1423 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1424 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1425 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1428 /* This method sets defaults to the NETA port:
1429 * Clears interrupt Cause and Mask registers.
1430 * Clears all MAC tables.
1431 * Sets defaults to all registers.
1432 * Resets RX and TX descriptor rings.
1434 * This method can be called after mvneta_port_down() to return the port
1435 * settings to defaults.
1437 static void mvneta_defaults_set(struct mvneta_port *pp)
1442 int max_cpu = num_present_cpus();
1444 /* Clear all Cause registers */
1445 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1447 /* Mask all interrupts */
1448 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1449 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1451 /* Enable MBUS Retry bit16 */
1452 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1454 /* Set CPU queue access map. CPUs are assigned to the RX and
1455 * TX queues modulo their number. If there is only one TX
1456 * queue then it is assigned to the CPU associated to the
1459 for_each_present_cpu(cpu) {
1460 int rxq_map = 0, txq_map = 0;
1462 if (!pp->neta_armada3700) {
1463 for (rxq = 0; rxq < rxq_number; rxq++)
1464 if ((rxq % max_cpu) == cpu)
1465 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1467 for (txq = 0; txq < txq_number; txq++)
1468 if ((txq % max_cpu) == cpu)
1469 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1471 /* With only one TX queue we configure a special case
1472 * which will allow to get all the irq on a single
1475 if (txq_number == 1)
1476 txq_map = (cpu == pp->rxq_def) ?
1477 MVNETA_CPU_TXQ_ACCESS(1) : 0;
1480 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1481 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1484 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1487 /* Reset RX and TX DMAs */
1488 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1489 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1491 /* Disable Legacy WRR, Disable EJP, Release from reset */
1492 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1493 for (queue = 0; queue < txq_number; queue++) {
1494 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1495 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1498 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1499 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1501 /* Set Port Acceleration Mode */
1503 /* HW buffer management + legacy parser */
1504 val = MVNETA_ACC_MODE_EXT2;
1506 /* SW buffer management + legacy parser */
1507 val = MVNETA_ACC_MODE_EXT1;
1508 mvreg_write(pp, MVNETA_ACC_MODE, val);
1511 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1513 /* Update val of portCfg register accordingly with all RxQueue types */
1514 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1515 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1518 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1519 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1521 /* Build PORT_SDMA_CONFIG_REG */
1524 /* Default burst size */
1525 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1526 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1527 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1529 #if defined(__BIG_ENDIAN)
1530 val |= MVNETA_DESC_SWAP;
1533 /* Assign port SDMA configuration */
1534 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1536 /* Disable PHY polling in hardware, since we're using the
1537 * kernel phylib to do this.
1539 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1540 val &= ~MVNETA_PHY_POLLING_ENABLE;
1541 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1543 mvneta_set_ucast_table(pp, -1);
1544 mvneta_set_special_mcast_table(pp, -1);
1545 mvneta_set_other_mcast_table(pp, -1);
1547 /* Set port interrupt enable register - default enable all */
1548 mvreg_write(pp, MVNETA_INTR_ENABLE,
1549 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1550 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1552 mvneta_mib_counters_clear(pp);
1555 /* Set max sizes for tx queues */
1556 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1562 mtu = max_tx_size * 8;
1563 if (mtu > MVNETA_TX_MTU_MAX)
1564 mtu = MVNETA_TX_MTU_MAX;
1567 val = mvreg_read(pp, MVNETA_TX_MTU);
1568 val &= ~MVNETA_TX_MTU_MAX;
1570 mvreg_write(pp, MVNETA_TX_MTU, val);
1572 /* TX token size and all TXQs token size must be larger that MTU */
1573 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1575 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1578 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1580 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1582 for (queue = 0; queue < txq_number; queue++) {
1583 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1585 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1588 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1590 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1595 /* Set unicast address */
1596 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1599 unsigned int unicast_reg;
1600 unsigned int tbl_offset;
1601 unsigned int reg_offset;
1603 /* Locate the Unicast table entry */
1604 last_nibble = (0xf & last_nibble);
1606 /* offset from unicast tbl base */
1607 tbl_offset = (last_nibble / 4) * 4;
1609 /* offset within the above reg */
1610 reg_offset = last_nibble % 4;
1612 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1615 /* Clear accepts frame bit at specified unicast DA tbl entry */
1616 unicast_reg &= ~(0xff << (8 * reg_offset));
1618 unicast_reg &= ~(0xff << (8 * reg_offset));
1619 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1622 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1625 /* Set mac address */
1626 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1633 mac_l = (addr[4] << 8) | (addr[5]);
1634 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1635 (addr[2] << 8) | (addr[3] << 0);
1637 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1638 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1641 /* Accept frames of this address */
1642 mvneta_set_ucast_addr(pp, addr[5], queue);
1645 /* Set the number of packets that will be received before RX interrupt
1646 * will be generated by HW.
1648 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1649 struct mvneta_rx_queue *rxq, u32 value)
1651 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1652 value | MVNETA_RXQ_NON_OCCUPIED(0));
1655 /* Set the time delay in usec before RX interrupt will be generated by
1658 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1659 struct mvneta_rx_queue *rxq, u32 value)
1662 unsigned long clk_rate;
1664 clk_rate = clk_get_rate(pp->clk);
1665 val = (clk_rate / 1000000) * value;
1667 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1670 /* Set threshold for TX_DONE pkts coalescing */
1671 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1672 struct mvneta_tx_queue *txq, u32 value)
1676 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1678 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1679 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1681 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1684 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1685 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1686 u32 phys_addr, void *virt_addr,
1687 struct mvneta_rx_queue *rxq)
1691 rx_desc->buf_phys_addr = phys_addr;
1692 i = rx_desc - rxq->descs;
1693 rxq->buf_virt_addr[i] = virt_addr;
1696 /* Decrement sent descriptors counter */
1697 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1698 struct mvneta_tx_queue *txq,
1703 /* Only 255 TX descriptors can be updated at once */
1704 while (sent_desc > 0xff) {
1705 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1706 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1707 sent_desc = sent_desc - 0xff;
1710 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1711 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1714 /* Get number of TX descriptors already sent by HW */
1715 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1716 struct mvneta_tx_queue *txq)
1721 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1722 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1723 MVNETA_TXQ_SENT_DESC_SHIFT;
1728 /* Get number of sent descriptors and decrement counter.
1729 * The number of sent descriptors is returned.
1731 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1732 struct mvneta_tx_queue *txq)
1736 /* Get number of sent descriptors */
1737 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1739 /* Decrement sent descriptors counter */
1741 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1746 /* Set TXQ descriptors fields relevant for CSUM calculation */
1747 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1748 int ip_hdr_len, int l4_proto)
1752 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1753 * G_L4_chk, L4_type; required only for checksum
1756 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1757 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1759 if (l3_proto == htons(ETH_P_IP))
1760 command |= MVNETA_TXD_IP_CSUM;
1762 command |= MVNETA_TX_L3_IP6;
1764 if (l4_proto == IPPROTO_TCP)
1765 command |= MVNETA_TX_L4_CSUM_FULL;
1766 else if (l4_proto == IPPROTO_UDP)
1767 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1769 command |= MVNETA_TX_L4_CSUM_NOT;
1775 /* Display more error info */
1776 static void mvneta_rx_error(struct mvneta_port *pp,
1777 struct mvneta_rx_desc *rx_desc)
1779 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1780 u32 status = rx_desc->status;
1782 /* update per-cpu counter */
1783 u64_stats_update_begin(&stats->syncp);
1785 u64_stats_update_end(&stats->syncp);
1787 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1788 case MVNETA_RXD_ERR_CRC:
1789 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1790 status, rx_desc->data_size);
1792 case MVNETA_RXD_ERR_OVERRUN:
1793 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1794 status, rx_desc->data_size);
1796 case MVNETA_RXD_ERR_LEN:
1797 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1798 status, rx_desc->data_size);
1800 case MVNETA_RXD_ERR_RESOURCE:
1801 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1802 status, rx_desc->data_size);
1807 /* Handle RX checksum offload based on the descriptor's status */
1808 static int mvneta_rx_csum(struct mvneta_port *pp, u32 status)
1810 if ((pp->dev->features & NETIF_F_RXCSUM) &&
1811 (status & MVNETA_RXD_L3_IP4) &&
1812 (status & MVNETA_RXD_L4_CSUM_OK))
1813 return CHECKSUM_UNNECESSARY;
1815 return CHECKSUM_NONE;
1818 /* Return tx queue pointer (find last set bit) according to <cause> returned
1819 * form tx_done reg. <cause> must not be null. The return value is always a
1820 * valid queue for matching the first one found in <cause>.
1822 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1825 int queue = fls(cause) - 1;
1827 return &pp->txqs[queue];
1830 /* Free tx queue skbuffs */
1831 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1832 struct mvneta_tx_queue *txq, int num,
1833 struct netdev_queue *nq, bool napi)
1835 unsigned int bytes_compl = 0, pkts_compl = 0;
1836 struct xdp_frame_bulk bq;
1839 xdp_frame_bulk_init(&bq);
1841 rcu_read_lock(); /* need for xdp_return_frame_bulk */
1843 for (i = 0; i < num; i++) {
1844 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
1845 struct mvneta_tx_desc *tx_desc = txq->descs +
1848 mvneta_txq_inc_get(txq);
1850 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
1851 buf->type != MVNETA_TYPE_XDP_TX)
1852 dma_unmap_single(pp->dev->dev.parent,
1853 tx_desc->buf_phys_addr,
1854 tx_desc->data_size, DMA_TO_DEVICE);
1855 if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
1856 bytes_compl += buf->skb->len;
1858 dev_kfree_skb_any(buf->skb);
1859 } else if (buf->type == MVNETA_TYPE_XDP_TX ||
1860 buf->type == MVNETA_TYPE_XDP_NDO) {
1861 if (napi && buf->type == MVNETA_TYPE_XDP_TX)
1862 xdp_return_frame_rx_napi(buf->xdpf);
1864 xdp_return_frame_bulk(buf->xdpf, &bq);
1867 xdp_flush_frame_bulk(&bq);
1871 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1874 /* Handle end of transmission */
1875 static void mvneta_txq_done(struct mvneta_port *pp,
1876 struct mvneta_tx_queue *txq)
1878 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1881 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1885 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
1887 txq->count -= tx_done;
1889 if (netif_tx_queue_stopped(nq)) {
1890 if (txq->count <= txq->tx_wake_threshold)
1891 netif_tx_wake_queue(nq);
1895 /* Refill processing for SW buffer management */
1896 /* Allocate page per descriptor */
1897 static int mvneta_rx_refill(struct mvneta_port *pp,
1898 struct mvneta_rx_desc *rx_desc,
1899 struct mvneta_rx_queue *rxq,
1902 dma_addr_t phys_addr;
1905 page = page_pool_alloc_pages(rxq->page_pool,
1906 gfp_mask | __GFP_NOWARN);
1910 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
1911 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
1916 /* Handle tx checksum */
1917 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1919 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1921 __be16 l3_proto = vlan_get_protocol(skb);
1924 if (l3_proto == htons(ETH_P_IP)) {
1925 struct iphdr *ip4h = ip_hdr(skb);
1927 /* Calculate IPv4 checksum and L4 checksum */
1928 ip_hdr_len = ip4h->ihl;
1929 l4_proto = ip4h->protocol;
1930 } else if (l3_proto == htons(ETH_P_IPV6)) {
1931 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1933 /* Read l4_protocol from one of IPv6 extra headers */
1934 if (skb_network_header_len(skb) > 0)
1935 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1936 l4_proto = ip6h->nexthdr;
1938 return MVNETA_TX_L4_CSUM_NOT;
1940 return mvneta_txq_desc_csum(skb_network_offset(skb),
1941 l3_proto, ip_hdr_len, l4_proto);
1944 return MVNETA_TX_L4_CSUM_NOT;
1947 /* Drop packets received by the RXQ and free buffers */
1948 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1949 struct mvneta_rx_queue *rxq)
1953 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1955 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1958 for (i = 0; i < rx_done; i++) {
1959 struct mvneta_rx_desc *rx_desc =
1960 mvneta_rxq_next_desc_get(rxq);
1961 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1962 struct mvneta_bm_pool *bm_pool;
1964 bm_pool = &pp->bm_priv->bm_pools[pool_id];
1965 /* Return dropped buffer to the pool */
1966 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1967 rx_desc->buf_phys_addr);
1972 for (i = 0; i < rxq->size; i++) {
1973 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1974 void *data = rxq->buf_virt_addr[i];
1975 if (!data || !(rx_desc->buf_phys_addr))
1978 page_pool_put_full_page(rxq->page_pool, data, false);
1980 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
1981 xdp_rxq_info_unreg(&rxq->xdp_rxq);
1982 page_pool_destroy(rxq->page_pool);
1983 rxq->page_pool = NULL;
1987 mvneta_update_stats(struct mvneta_port *pp,
1988 struct mvneta_stats *ps)
1990 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1992 u64_stats_update_begin(&stats->syncp);
1993 stats->es.ps.rx_packets += ps->rx_packets;
1994 stats->es.ps.rx_bytes += ps->rx_bytes;
1996 stats->es.ps.xdp_redirect += ps->xdp_redirect;
1997 stats->es.ps.xdp_pass += ps->xdp_pass;
1998 stats->es.ps.xdp_drop += ps->xdp_drop;
1999 u64_stats_update_end(&stats->syncp);
2003 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
2005 struct mvneta_rx_desc *rx_desc;
2006 int curr_desc = rxq->first_to_refill;
2009 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
2010 rx_desc = rxq->descs + curr_desc;
2011 if (!(rx_desc->buf_phys_addr)) {
2012 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
2013 struct mvneta_pcpu_stats *stats;
2015 pr_err("Can't refill queue %d. Done %d from %d\n",
2016 rxq->id, i, rxq->refill_num);
2018 stats = this_cpu_ptr(pp->stats);
2019 u64_stats_update_begin(&stats->syncp);
2020 stats->es.refill_error++;
2021 u64_stats_update_end(&stats->syncp);
2025 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
2027 rxq->refill_num -= i;
2028 rxq->first_to_refill = curr_desc;
2034 mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2035 struct xdp_buff *xdp, struct skb_shared_info *sinfo,
2040 for (i = 0; i < sinfo->nr_frags; i++)
2041 page_pool_put_full_page(rxq->page_pool,
2042 skb_frag_page(&sinfo->frags[i]), true);
2043 page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
2048 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
2049 struct xdp_frame *xdpf, bool dma_map)
2051 struct mvneta_tx_desc *tx_desc;
2052 struct mvneta_tx_buf *buf;
2053 dma_addr_t dma_addr;
2055 if (txq->count >= txq->tx_stop_threshold)
2056 return MVNETA_XDP_DROPPED;
2058 tx_desc = mvneta_txq_next_desc_get(txq);
2060 buf = &txq->buf[txq->txq_put_index];
2063 dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
2064 xdpf->len, DMA_TO_DEVICE);
2065 if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
2066 mvneta_txq_desc_put(txq);
2067 return MVNETA_XDP_DROPPED;
2069 buf->type = MVNETA_TYPE_XDP_NDO;
2071 struct page *page = virt_to_page(xdpf->data);
2073 dma_addr = page_pool_get_dma_addr(page) +
2074 sizeof(*xdpf) + xdpf->headroom;
2075 dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
2076 xdpf->len, DMA_BIDIRECTIONAL);
2077 buf->type = MVNETA_TYPE_XDP_TX;
2081 tx_desc->command = MVNETA_TXD_FLZ_DESC;
2082 tx_desc->buf_phys_addr = dma_addr;
2083 tx_desc->data_size = xdpf->len;
2085 mvneta_txq_inc_put(txq);
2089 return MVNETA_XDP_TX;
2093 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
2095 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2096 struct mvneta_tx_queue *txq;
2097 struct netdev_queue *nq;
2098 struct xdp_frame *xdpf;
2102 xdpf = xdp_convert_buff_to_frame(xdp);
2103 if (unlikely(!xdpf))
2104 return MVNETA_XDP_DROPPED;
2106 cpu = smp_processor_id();
2107 txq = &pp->txqs[cpu % txq_number];
2108 nq = netdev_get_tx_queue(pp->dev, txq->id);
2110 __netif_tx_lock(nq, cpu);
2111 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
2112 if (ret == MVNETA_XDP_TX) {
2113 u64_stats_update_begin(&stats->syncp);
2114 stats->es.ps.tx_bytes += xdpf->len;
2115 stats->es.ps.tx_packets++;
2116 stats->es.ps.xdp_tx++;
2117 u64_stats_update_end(&stats->syncp);
2119 mvneta_txq_pend_desc_add(pp, txq, 0);
2121 u64_stats_update_begin(&stats->syncp);
2122 stats->es.ps.xdp_tx_err++;
2123 u64_stats_update_end(&stats->syncp);
2125 __netif_tx_unlock(nq);
2131 mvneta_xdp_xmit(struct net_device *dev, int num_frame,
2132 struct xdp_frame **frames, u32 flags)
2134 struct mvneta_port *pp = netdev_priv(dev);
2135 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2136 int i, nxmit_byte = 0, nxmit = 0;
2137 int cpu = smp_processor_id();
2138 struct mvneta_tx_queue *txq;
2139 struct netdev_queue *nq;
2142 if (unlikely(test_bit(__MVNETA_DOWN, &pp->state)))
2145 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2148 txq = &pp->txqs[cpu % txq_number];
2149 nq = netdev_get_tx_queue(pp->dev, txq->id);
2151 __netif_tx_lock(nq, cpu);
2152 for (i = 0; i < num_frame; i++) {
2153 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
2154 if (ret != MVNETA_XDP_TX)
2157 nxmit_byte += frames[i]->len;
2161 if (unlikely(flags & XDP_XMIT_FLUSH))
2162 mvneta_txq_pend_desc_add(pp, txq, 0);
2163 __netif_tx_unlock(nq);
2165 u64_stats_update_begin(&stats->syncp);
2166 stats->es.ps.tx_bytes += nxmit_byte;
2167 stats->es.ps.tx_packets += nxmit;
2168 stats->es.ps.xdp_xmit += nxmit;
2169 stats->es.ps.xdp_xmit_err += num_frame - nxmit;
2170 u64_stats_update_end(&stats->syncp);
2176 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2177 struct bpf_prog *prog, struct xdp_buff *xdp,
2178 u32 frame_sz, struct mvneta_stats *stats)
2180 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2181 unsigned int len, data_len, sync;
2184 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2185 data_len = xdp->data_end - xdp->data;
2186 act = bpf_prog_run_xdp(prog, xdp);
2188 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
2189 sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2190 sync = max(sync, len);
2195 return MVNETA_XDP_PASS;
2196 case XDP_REDIRECT: {
2199 err = xdp_do_redirect(pp->dev, xdp, prog);
2200 if (unlikely(err)) {
2201 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
2202 ret = MVNETA_XDP_DROPPED;
2204 ret = MVNETA_XDP_REDIR;
2205 stats->xdp_redirect++;
2210 ret = mvneta_xdp_xmit_back(pp, xdp);
2211 if (ret != MVNETA_XDP_TX)
2212 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
2215 bpf_warn_invalid_xdp_action(act);
2218 trace_xdp_exception(pp->dev, prog, act);
2221 mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
2222 ret = MVNETA_XDP_DROPPED;
2227 stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len;
2228 stats->rx_packets++;
2234 mvneta_swbm_rx_frame(struct mvneta_port *pp,
2235 struct mvneta_rx_desc *rx_desc,
2236 struct mvneta_rx_queue *rxq,
2237 struct xdp_buff *xdp, int *size,
2240 unsigned char *data = page_address(page);
2241 int data_len = -MVNETA_MH_SIZE, len;
2242 struct net_device *dev = pp->dev;
2243 enum dma_data_direction dma_dir;
2244 struct skb_shared_info *sinfo;
2246 if (*size > MVNETA_MAX_RX_BUF_SIZE) {
2247 len = MVNETA_MAX_RX_BUF_SIZE;
2251 data_len += len - ETH_FCS_LEN;
2253 *size = *size - len;
2255 dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2256 dma_sync_single_for_cpu(dev->dev.parent,
2257 rx_desc->buf_phys_addr,
2260 rx_desc->buf_phys_addr = 0;
2262 /* Prefetch header */
2264 xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
2267 sinfo = xdp_get_shared_info_from_buff(xdp);
2268 sinfo->nr_frags = 0;
2272 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2273 struct mvneta_rx_desc *rx_desc,
2274 struct mvneta_rx_queue *rxq,
2275 struct xdp_buff *xdp, int *size,
2276 struct skb_shared_info *xdp_sinfo,
2279 struct net_device *dev = pp->dev;
2280 enum dma_data_direction dma_dir;
2283 if (*size > MVNETA_MAX_RX_BUF_SIZE) {
2284 len = MVNETA_MAX_RX_BUF_SIZE;
2288 data_len = len - ETH_FCS_LEN;
2290 dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2291 dma_sync_single_for_cpu(dev->dev.parent,
2292 rx_desc->buf_phys_addr,
2294 rx_desc->buf_phys_addr = 0;
2296 if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
2297 skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
2299 skb_frag_off_set(frag, pp->rx_offset_correction);
2300 skb_frag_size_set(frag, data_len);
2301 __skb_frag_set_page(frag, page);
2303 page_pool_put_full_page(rxq->page_pool, page, true);
2308 struct skb_shared_info *sinfo;
2310 sinfo = xdp_get_shared_info_from_buff(xdp);
2311 sinfo->nr_frags = xdp_sinfo->nr_frags;
2312 memcpy(sinfo->frags, xdp_sinfo->frags,
2313 sinfo->nr_frags * sizeof(skb_frag_t));
2318 static struct sk_buff *
2319 mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
2320 struct xdp_buff *xdp, u32 desc_status)
2322 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2323 int i, num_frags = sinfo->nr_frags;
2324 struct sk_buff *skb;
2326 skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
2328 return ERR_PTR(-ENOMEM);
2330 skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
2332 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2333 skb_put(skb, xdp->data_end - xdp->data);
2334 skb->ip_summed = mvneta_rx_csum(pp, desc_status);
2336 for (i = 0; i < num_frags; i++) {
2337 skb_frag_t *frag = &sinfo->frags[i];
2339 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2340 skb_frag_page(frag), skb_frag_off(frag),
2341 skb_frag_size(frag), PAGE_SIZE);
2342 /* We don't need to reset pp_recycle here. It's already set, so
2343 * just mark fragments for recycling.
2345 page_pool_store_mem_info(skb_frag_page(frag), pool);
2351 /* Main rx processing when using software buffer management */
2352 static int mvneta_rx_swbm(struct napi_struct *napi,
2353 struct mvneta_port *pp, int budget,
2354 struct mvneta_rx_queue *rxq)
2356 int rx_proc = 0, rx_todo, refill, size = 0;
2357 struct net_device *dev = pp->dev;
2358 struct skb_shared_info sinfo;
2359 struct mvneta_stats ps = {};
2360 struct bpf_prog *xdp_prog;
2361 u32 desc_status, frame_sz;
2362 struct xdp_buff xdp_buf;
2364 xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
2365 xdp_buf.data_hard_start = NULL;
2369 /* Get number of received packets */
2370 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
2372 xdp_prog = READ_ONCE(pp->xdp_prog);
2374 /* Fairness NAPI loop */
2375 while (rx_proc < budget && rx_proc < rx_todo) {
2376 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2377 u32 rx_status, index;
2378 struct sk_buff *skb;
2381 index = rx_desc - rxq->descs;
2382 page = (struct page *)rxq->buf_virt_addr[index];
2384 rx_status = rx_desc->status;
2388 if (rx_status & MVNETA_RXD_FIRST_DESC) {
2389 /* Check errors only for FIRST descriptor */
2390 if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
2391 mvneta_rx_error(pp, rx_desc);
2395 size = rx_desc->data_size;
2396 frame_sz = size - ETH_FCS_LEN;
2397 desc_status = rx_status;
2399 mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
2402 if (unlikely(!xdp_buf.data_hard_start)) {
2403 rx_desc->buf_phys_addr = 0;
2404 page_pool_put_full_page(rxq->page_pool, page,
2409 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
2410 &size, &sinfo, page);
2411 } /* Middle or Last descriptor */
2413 if (!(rx_status & MVNETA_RXD_LAST_DESC))
2414 /* no last descriptor this time */
2418 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
2423 mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
2426 skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
2428 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2430 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
2432 u64_stats_update_begin(&stats->syncp);
2433 stats->es.skb_alloc_error++;
2434 stats->rx_dropped++;
2435 u64_stats_update_end(&stats->syncp);
2440 ps.rx_bytes += skb->len;
2443 skb->protocol = eth_type_trans(skb, dev);
2444 napi_gro_receive(napi, skb);
2446 xdp_buf.data_hard_start = NULL;
2450 if (xdp_buf.data_hard_start)
2451 mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
2453 if (ps.xdp_redirect)
2457 mvneta_update_stats(pp, &ps);
2459 /* return some buffers to hardware queue, one at a time is too slow */
2460 refill = mvneta_rx_refill_queue(pp, rxq);
2462 /* Update rxq management counters */
2463 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2465 return ps.rx_packets;
2468 /* Main rx processing when using hardware buffer management */
2469 static int mvneta_rx_hwbm(struct napi_struct *napi,
2470 struct mvneta_port *pp, int rx_todo,
2471 struct mvneta_rx_queue *rxq)
2473 struct net_device *dev = pp->dev;
2478 /* Get number of received packets */
2479 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2481 if (rx_todo > rx_done)
2486 /* Fairness NAPI loop */
2487 while (rx_done < rx_todo) {
2488 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2489 struct mvneta_bm_pool *bm_pool = NULL;
2490 struct sk_buff *skb;
2491 unsigned char *data;
2492 dma_addr_t phys_addr;
2493 u32 rx_status, frag_size;
2498 rx_status = rx_desc->status;
2499 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2500 data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2501 phys_addr = rx_desc->buf_phys_addr;
2502 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2503 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2505 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2506 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2507 err_drop_frame_ret_pool:
2508 /* Return the buffer to the pool */
2509 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2510 rx_desc->buf_phys_addr);
2512 mvneta_rx_error(pp, rx_desc);
2513 /* leave the descriptor untouched */
2517 if (rx_bytes <= rx_copybreak) {
2518 /* better copy a small frame and not unmap the DMA region */
2519 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2521 goto err_drop_frame_ret_pool;
2523 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2524 rx_desc->buf_phys_addr,
2525 MVNETA_MH_SIZE + NET_SKB_PAD,
2528 skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2531 skb->protocol = eth_type_trans(skb, dev);
2532 skb->ip_summed = mvneta_rx_csum(pp, rx_status);
2533 napi_gro_receive(napi, skb);
2536 rcvd_bytes += rx_bytes;
2538 /* Return the buffer to the pool */
2539 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2540 rx_desc->buf_phys_addr);
2542 /* leave the descriptor and buffer untouched */
2546 /* Refill processing */
2547 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2549 struct mvneta_pcpu_stats *stats;
2551 netdev_err(dev, "Linux processing - Can't refill\n");
2553 stats = this_cpu_ptr(pp->stats);
2554 u64_stats_update_begin(&stats->syncp);
2555 stats->es.refill_error++;
2556 u64_stats_update_end(&stats->syncp);
2558 goto err_drop_frame_ret_pool;
2561 frag_size = bm_pool->hwbm_pool.frag_size;
2563 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2565 /* After refill old buffer has to be unmapped regardless
2566 * the skb is successfully built or not.
2568 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2569 bm_pool->buf_size, DMA_FROM_DEVICE);
2571 goto err_drop_frame;
2574 rcvd_bytes += rx_bytes;
2576 /* Linux processing */
2577 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2578 skb_put(skb, rx_bytes);
2580 skb->protocol = eth_type_trans(skb, dev);
2581 skb->ip_summed = mvneta_rx_csum(pp, rx_status);
2583 napi_gro_receive(napi, skb);
2587 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2589 u64_stats_update_begin(&stats->syncp);
2590 stats->es.ps.rx_packets += rcvd_pkts;
2591 stats->es.ps.rx_bytes += rcvd_bytes;
2592 u64_stats_update_end(&stats->syncp);
2595 /* Update rxq management counters */
2596 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2602 mvneta_tso_put_hdr(struct sk_buff *skb,
2603 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2605 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2606 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2607 struct mvneta_tx_desc *tx_desc;
2609 tx_desc = mvneta_txq_next_desc_get(txq);
2610 tx_desc->data_size = hdr_len;
2611 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2612 tx_desc->command |= MVNETA_TXD_F_DESC;
2613 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2614 txq->txq_put_index * TSO_HEADER_SIZE;
2615 buf->type = MVNETA_TYPE_SKB;
2618 mvneta_txq_inc_put(txq);
2622 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2623 struct sk_buff *skb, char *data, int size,
2624 bool last_tcp, bool is_last)
2626 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2627 struct mvneta_tx_desc *tx_desc;
2629 tx_desc = mvneta_txq_next_desc_get(txq);
2630 tx_desc->data_size = size;
2631 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2632 size, DMA_TO_DEVICE);
2633 if (unlikely(dma_mapping_error(dev->dev.parent,
2634 tx_desc->buf_phys_addr))) {
2635 mvneta_txq_desc_put(txq);
2639 tx_desc->command = 0;
2640 buf->type = MVNETA_TYPE_SKB;
2644 /* last descriptor in the TCP packet */
2645 tx_desc->command = MVNETA_TXD_L_DESC;
2647 /* last descriptor in SKB */
2651 mvneta_txq_inc_put(txq);
2655 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2656 struct mvneta_tx_queue *txq)
2658 int hdr_len, total_len, data_left;
2660 struct mvneta_port *pp = netdev_priv(dev);
2664 /* Count needed descriptors */
2665 if ((txq->count + tso_count_descs(skb)) >= txq->size)
2668 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2669 pr_info("*** Is this even possible???!?!?\n");
2673 /* Initialize the TSO handler, and prepare the first payload */
2674 hdr_len = tso_start(skb, &tso);
2676 total_len = skb->len - hdr_len;
2677 while (total_len > 0) {
2680 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2681 total_len -= data_left;
2684 /* prepare packet headers: MAC + IP + TCP */
2685 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2686 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2688 mvneta_tso_put_hdr(skb, pp, txq);
2690 while (data_left > 0) {
2694 size = min_t(int, tso.size, data_left);
2696 if (mvneta_tso_put_data(dev, txq, skb,
2703 tso_build_data(skb, &tso, size);
2710 /* Release all used data descriptors; header descriptors must not
2713 for (i = desc_count - 1; i >= 0; i--) {
2714 struct mvneta_tx_desc *tx_desc = txq->descs + i;
2715 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2716 dma_unmap_single(pp->dev->dev.parent,
2717 tx_desc->buf_phys_addr,
2720 mvneta_txq_desc_put(txq);
2725 /* Handle tx fragmentation processing */
2726 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2727 struct mvneta_tx_queue *txq)
2729 struct mvneta_tx_desc *tx_desc;
2730 int i, nr_frags = skb_shinfo(skb)->nr_frags;
2732 for (i = 0; i < nr_frags; i++) {
2733 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2734 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2735 void *addr = skb_frag_address(frag);
2737 tx_desc = mvneta_txq_next_desc_get(txq);
2738 tx_desc->data_size = skb_frag_size(frag);
2740 tx_desc->buf_phys_addr =
2741 dma_map_single(pp->dev->dev.parent, addr,
2742 tx_desc->data_size, DMA_TO_DEVICE);
2744 if (dma_mapping_error(pp->dev->dev.parent,
2745 tx_desc->buf_phys_addr)) {
2746 mvneta_txq_desc_put(txq);
2750 if (i == nr_frags - 1) {
2751 /* Last descriptor */
2752 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2755 /* Descriptor in the middle: Not First, Not Last */
2756 tx_desc->command = 0;
2759 buf->type = MVNETA_TYPE_SKB;
2760 mvneta_txq_inc_put(txq);
2766 /* Release all descriptors that were used to map fragments of
2767 * this packet, as well as the corresponding DMA mappings
2769 for (i = i - 1; i >= 0; i--) {
2770 tx_desc = txq->descs + i;
2771 dma_unmap_single(pp->dev->dev.parent,
2772 tx_desc->buf_phys_addr,
2775 mvneta_txq_desc_put(txq);
2781 /* Main tx processing */
2782 static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2784 struct mvneta_port *pp = netdev_priv(dev);
2785 u16 txq_id = skb_get_queue_mapping(skb);
2786 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2787 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2788 struct mvneta_tx_desc *tx_desc;
2793 if (!netif_running(dev))
2796 if (skb_is_gso(skb)) {
2797 frags = mvneta_tx_tso(skb, dev, txq);
2801 frags = skb_shinfo(skb)->nr_frags + 1;
2803 /* Get a descriptor for the first part of the packet */
2804 tx_desc = mvneta_txq_next_desc_get(txq);
2806 tx_cmd = mvneta_skb_tx_csum(pp, skb);
2808 tx_desc->data_size = skb_headlen(skb);
2810 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2813 if (unlikely(dma_mapping_error(dev->dev.parent,
2814 tx_desc->buf_phys_addr))) {
2815 mvneta_txq_desc_put(txq);
2820 buf->type = MVNETA_TYPE_SKB;
2822 /* First and Last descriptor */
2823 tx_cmd |= MVNETA_TXD_FLZ_DESC;
2824 tx_desc->command = tx_cmd;
2826 mvneta_txq_inc_put(txq);
2828 /* First but not Last */
2829 tx_cmd |= MVNETA_TXD_F_DESC;
2831 mvneta_txq_inc_put(txq);
2832 tx_desc->command = tx_cmd;
2833 /* Continue with other skb fragments */
2834 if (mvneta_tx_frag_process(pp, skb, txq)) {
2835 dma_unmap_single(dev->dev.parent,
2836 tx_desc->buf_phys_addr,
2839 mvneta_txq_desc_put(txq);
2847 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2848 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2850 netdev_tx_sent_queue(nq, len);
2852 txq->count += frags;
2853 if (txq->count >= txq->tx_stop_threshold)
2854 netif_tx_stop_queue(nq);
2856 if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
2857 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2858 mvneta_txq_pend_desc_add(pp, txq, frags);
2860 txq->pending += frags;
2862 u64_stats_update_begin(&stats->syncp);
2863 stats->es.ps.tx_bytes += len;
2864 stats->es.ps.tx_packets++;
2865 u64_stats_update_end(&stats->syncp);
2867 dev->stats.tx_dropped++;
2868 dev_kfree_skb_any(skb);
2871 return NETDEV_TX_OK;
2875 /* Free tx resources, when resetting a port */
2876 static void mvneta_txq_done_force(struct mvneta_port *pp,
2877 struct mvneta_tx_queue *txq)
2880 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2881 int tx_done = txq->count;
2883 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
2887 txq->txq_put_index = 0;
2888 txq->txq_get_index = 0;
2891 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
2892 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2894 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2896 struct mvneta_tx_queue *txq;
2897 struct netdev_queue *nq;
2898 int cpu = smp_processor_id();
2900 while (cause_tx_done) {
2901 txq = mvneta_tx_done_policy(pp, cause_tx_done);
2903 nq = netdev_get_tx_queue(pp->dev, txq->id);
2904 __netif_tx_lock(nq, cpu);
2907 mvneta_txq_done(pp, txq);
2909 __netif_tx_unlock(nq);
2910 cause_tx_done &= ~((1 << txq->id));
2914 /* Compute crc8 of the specified address, using a unique algorithm ,
2915 * according to hw spec, different than generic crc8 algorithm
2917 static int mvneta_addr_crc(unsigned char *addr)
2922 for (i = 0; i < ETH_ALEN; i++) {
2925 crc = (crc ^ addr[i]) << 8;
2926 for (j = 7; j >= 0; j--) {
2927 if (crc & (0x100 << j))
2935 /* This method controls the net device special MAC multicast support.
2936 * The Special Multicast Table for MAC addresses supports MAC of the form
2937 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2938 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2939 * Table entries in the DA-Filter table. This method set the Special
2940 * Multicast Table appropriate entry.
2942 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2943 unsigned char last_byte,
2946 unsigned int smc_table_reg;
2947 unsigned int tbl_offset;
2948 unsigned int reg_offset;
2950 /* Register offset from SMC table base */
2951 tbl_offset = (last_byte / 4);
2952 /* Entry offset within the above reg */
2953 reg_offset = last_byte % 4;
2955 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2959 smc_table_reg &= ~(0xff << (8 * reg_offset));
2961 smc_table_reg &= ~(0xff << (8 * reg_offset));
2962 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2965 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2969 /* This method controls the network device Other MAC multicast support.
2970 * The Other Multicast Table is used for multicast of another type.
2971 * A CRC-8 is used as an index to the Other Multicast Table entries
2972 * in the DA-Filter table.
2973 * The method gets the CRC-8 value from the calling routine and
2974 * sets the Other Multicast Table appropriate entry according to the
2977 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2981 unsigned int omc_table_reg;
2982 unsigned int tbl_offset;
2983 unsigned int reg_offset;
2985 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2986 reg_offset = crc8 % 4; /* Entry offset within the above reg */
2988 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2991 /* Clear accepts frame bit at specified Other DA table entry */
2992 omc_table_reg &= ~(0xff << (8 * reg_offset));
2994 omc_table_reg &= ~(0xff << (8 * reg_offset));
2995 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2998 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
3001 /* The network device supports multicast using two tables:
3002 * 1) Special Multicast Table for MAC addresses of the form
3003 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
3004 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
3005 * Table entries in the DA-Filter table.
3006 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
3007 * is used as an index to the Other Multicast Table entries in the
3010 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
3013 unsigned char crc_result = 0;
3015 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
3016 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
3020 crc_result = mvneta_addr_crc(p_addr);
3022 if (pp->mcast_count[crc_result] == 0) {
3023 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
3028 pp->mcast_count[crc_result]--;
3029 if (pp->mcast_count[crc_result] != 0) {
3030 netdev_info(pp->dev,
3031 "After delete there are %d valid Mcast for crc8=0x%02x\n",
3032 pp->mcast_count[crc_result], crc_result);
3036 pp->mcast_count[crc_result]++;
3038 mvneta_set_other_mcast_addr(pp, crc_result, queue);
3043 /* Configure Fitering mode of Ethernet port */
3044 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
3047 u32 port_cfg_reg, val;
3049 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
3051 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
3053 /* Set / Clear UPM bit in port configuration register */
3055 /* Accept all Unicast addresses */
3056 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
3057 val |= MVNETA_FORCE_UNI;
3058 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
3059 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
3061 /* Reject all Unicast addresses */
3062 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
3063 val &= ~MVNETA_FORCE_UNI;
3066 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
3067 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
3070 /* register unicast and multicast addresses */
3071 static void mvneta_set_rx_mode(struct net_device *dev)
3073 struct mvneta_port *pp = netdev_priv(dev);
3074 struct netdev_hw_addr *ha;
3076 if (dev->flags & IFF_PROMISC) {
3077 /* Accept all: Multicast + Unicast */
3078 mvneta_rx_unicast_promisc_set(pp, 1);
3079 mvneta_set_ucast_table(pp, pp->rxq_def);
3080 mvneta_set_special_mcast_table(pp, pp->rxq_def);
3081 mvneta_set_other_mcast_table(pp, pp->rxq_def);
3083 /* Accept single Unicast */
3084 mvneta_rx_unicast_promisc_set(pp, 0);
3085 mvneta_set_ucast_table(pp, -1);
3086 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
3088 if (dev->flags & IFF_ALLMULTI) {
3089 /* Accept all multicast */
3090 mvneta_set_special_mcast_table(pp, pp->rxq_def);
3091 mvneta_set_other_mcast_table(pp, pp->rxq_def);
3093 /* Accept only initialized multicast */
3094 mvneta_set_special_mcast_table(pp, -1);
3095 mvneta_set_other_mcast_table(pp, -1);
3097 if (!netdev_mc_empty(dev)) {
3098 netdev_for_each_mc_addr(ha, dev) {
3099 mvneta_mcast_addr_set(pp, ha->addr,
3107 /* Interrupt handling - the callback for request_irq() */
3108 static irqreturn_t mvneta_isr(int irq, void *dev_id)
3110 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
3112 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
3113 napi_schedule(&pp->napi);
3118 /* Interrupt handling - the callback for request_percpu_irq() */
3119 static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
3121 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
3123 disable_percpu_irq(port->pp->dev->irq);
3124 napi_schedule(&port->napi);
3129 static void mvneta_link_change(struct mvneta_port *pp)
3131 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3133 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
3137 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
3138 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
3139 * Bits 8 -15 of the cause Rx Tx register indicate that are received
3140 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
3141 * Each CPU has its own causeRxTx register
3143 static int mvneta_poll(struct napi_struct *napi, int budget)
3148 struct mvneta_port *pp = netdev_priv(napi->dev);
3149 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
3151 if (!netif_running(pp->dev)) {
3152 napi_complete(napi);
3156 /* Read cause register */
3157 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
3158 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
3159 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
3161 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
3163 if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
3164 MVNETA_CAUSE_LINK_CHANGE))
3165 mvneta_link_change(pp);
3168 /* Release Tx descriptors */
3169 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
3170 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
3171 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
3174 /* For the case where the last mvneta_poll did not process all
3177 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
3180 rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
3182 rx_queue = rx_queue - 1;
3184 rx_done = mvneta_rx_hwbm(napi, pp, budget,
3185 &pp->rxqs[rx_queue]);
3187 rx_done = mvneta_rx_swbm(napi, pp, budget,
3188 &pp->rxqs[rx_queue]);
3191 if (rx_done < budget) {
3193 napi_complete_done(napi, rx_done);
3195 if (pp->neta_armada3700) {
3196 unsigned long flags;
3198 local_irq_save(flags);
3199 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
3200 MVNETA_RX_INTR_MASK(rxq_number) |
3201 MVNETA_TX_INTR_MASK(txq_number) |
3202 MVNETA_MISCINTR_INTR_MASK);
3203 local_irq_restore(flags);
3205 enable_percpu_irq(pp->dev->irq, 0);
3209 if (pp->neta_armada3700)
3210 pp->cause_rx_tx = cause_rx_tx;
3212 port->cause_rx_tx = cause_rx_tx;
3217 static int mvneta_create_page_pool(struct mvneta_port *pp,
3218 struct mvneta_rx_queue *rxq, int size)
3220 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
3221 struct page_pool_params pp_params = {
3223 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
3225 .nid = NUMA_NO_NODE,
3226 .dev = pp->dev->dev.parent,
3227 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
3228 .offset = pp->rx_offset_correction,
3229 .max_len = MVNETA_MAX_RX_BUF_SIZE,
3233 rxq->page_pool = page_pool_create(&pp_params);
3234 if (IS_ERR(rxq->page_pool)) {
3235 err = PTR_ERR(rxq->page_pool);
3236 rxq->page_pool = NULL;
3240 err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0);
3244 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
3247 goto err_unregister_rxq;
3252 xdp_rxq_info_unreg(&rxq->xdp_rxq);
3254 page_pool_destroy(rxq->page_pool);
3255 rxq->page_pool = NULL;
3259 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
3260 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
3265 err = mvneta_create_page_pool(pp, rxq, num);
3269 for (i = 0; i < num; i++) {
3270 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
3271 if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
3274 "%s:rxq %d, %d of %d buffs filled\n",
3275 __func__, rxq->id, i, num);
3280 /* Add this number of RX descriptors as non occupied (ready to
3283 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
3288 /* Free all packets pending transmit from all TXQs and reset TX port */
3289 static void mvneta_tx_reset(struct mvneta_port *pp)
3293 /* free the skb's in the tx ring */
3294 for (queue = 0; queue < txq_number; queue++)
3295 mvneta_txq_done_force(pp, &pp->txqs[queue]);
3297 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
3298 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
3301 static void mvneta_rx_reset(struct mvneta_port *pp)
3303 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
3304 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
3307 /* Rx/Tx queue initialization/cleanup methods */
3309 static int mvneta_rxq_sw_init(struct mvneta_port *pp,
3310 struct mvneta_rx_queue *rxq)
3312 rxq->size = pp->rx_ring_size;
3314 /* Allocate memory for RX descriptors */
3315 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3316 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3317 &rxq->descs_phys, GFP_KERNEL);
3321 rxq->last_desc = rxq->size - 1;
3326 static void mvneta_rxq_hw_init(struct mvneta_port *pp,
3327 struct mvneta_rx_queue *rxq)
3329 /* Set Rx descriptors queue starting address */
3330 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
3331 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
3333 /* Set coalescing pkts and time */
3334 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3335 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3339 mvneta_rxq_offset_set(pp, rxq, 0);
3340 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
3341 MVNETA_MAX_RX_BUF_SIZE :
3342 MVNETA_RX_BUF_SIZE(pp->pkt_size));
3343 mvneta_rxq_bm_disable(pp, rxq);
3344 mvneta_rxq_fill(pp, rxq, rxq->size);
3347 mvneta_rxq_offset_set(pp, rxq,
3348 NET_SKB_PAD - pp->rx_offset_correction);
3350 mvneta_rxq_bm_enable(pp, rxq);
3351 /* Fill RXQ with buffers from RX pool */
3352 mvneta_rxq_long_pool_set(pp, rxq);
3353 mvneta_rxq_short_pool_set(pp, rxq);
3354 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
3358 /* Create a specified RX queue */
3359 static int mvneta_rxq_init(struct mvneta_port *pp,
3360 struct mvneta_rx_queue *rxq)
3365 ret = mvneta_rxq_sw_init(pp, rxq);
3369 mvneta_rxq_hw_init(pp, rxq);
3374 /* Cleanup Rx queue */
3375 static void mvneta_rxq_deinit(struct mvneta_port *pp,
3376 struct mvneta_rx_queue *rxq)
3378 mvneta_rxq_drop_pkts(pp, rxq);
3381 dma_free_coherent(pp->dev->dev.parent,
3382 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3388 rxq->next_desc_to_proc = 0;
3389 rxq->descs_phys = 0;
3390 rxq->first_to_refill = 0;
3391 rxq->refill_num = 0;
3394 static int mvneta_txq_sw_init(struct mvneta_port *pp,
3395 struct mvneta_tx_queue *txq)
3399 txq->size = pp->tx_ring_size;
3401 /* A queue must always have room for at least one skb.
3402 * Therefore, stop the queue when the free entries reaches
3403 * the maximum number of descriptors per skb.
3405 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
3406 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
3408 /* Allocate memory for TX descriptors */
3409 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3410 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3411 &txq->descs_phys, GFP_KERNEL);
3415 txq->last_desc = txq->size - 1;
3417 txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
3421 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
3422 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
3423 txq->size * TSO_HEADER_SIZE,
3424 &txq->tso_hdrs_phys, GFP_KERNEL);
3428 /* Setup XPS mapping */
3429 if (pp->neta_armada3700)
3431 else if (txq_number > 1)
3432 cpu = txq->id % num_present_cpus();
3434 cpu = pp->rxq_def % num_present_cpus();
3435 cpumask_set_cpu(cpu, &txq->affinity_mask);
3436 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3441 static void mvneta_txq_hw_init(struct mvneta_port *pp,
3442 struct mvneta_tx_queue *txq)
3444 /* Set maximum bandwidth for enabled TXQs */
3445 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3446 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3448 /* Set Tx descriptors queue starting address */
3449 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3450 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3452 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3455 /* Create and initialize a tx queue */
3456 static int mvneta_txq_init(struct mvneta_port *pp,
3457 struct mvneta_tx_queue *txq)
3461 ret = mvneta_txq_sw_init(pp, txq);
3465 mvneta_txq_hw_init(pp, txq);
3470 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
3471 static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3472 struct mvneta_tx_queue *txq)
3474 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3479 dma_free_coherent(pp->dev->dev.parent,
3480 txq->size * TSO_HEADER_SIZE,
3481 txq->tso_hdrs, txq->tso_hdrs_phys);
3483 dma_free_coherent(pp->dev->dev.parent,
3484 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3485 txq->descs, txq->descs_phys);
3487 netdev_tx_reset_queue(nq);
3491 txq->next_desc_to_proc = 0;
3492 txq->descs_phys = 0;
3495 static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3496 struct mvneta_tx_queue *txq)
3498 /* Set minimum bandwidth for disabled TXQs */
3499 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3500 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3502 /* Set Tx descriptors queue starting address and size */
3503 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3504 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3507 static void mvneta_txq_deinit(struct mvneta_port *pp,
3508 struct mvneta_tx_queue *txq)
3510 mvneta_txq_sw_deinit(pp, txq);
3511 mvneta_txq_hw_deinit(pp, txq);
3514 /* Cleanup all Tx queues */
3515 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3519 for (queue = 0; queue < txq_number; queue++)
3520 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3523 /* Cleanup all Rx queues */
3524 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3528 for (queue = 0; queue < rxq_number; queue++)
3529 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3533 /* Init all Rx queues */
3534 static int mvneta_setup_rxqs(struct mvneta_port *pp)
3538 for (queue = 0; queue < rxq_number; queue++) {
3539 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3542 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3544 mvneta_cleanup_rxqs(pp);
3552 /* Init all tx queues */
3553 static int mvneta_setup_txqs(struct mvneta_port *pp)
3557 for (queue = 0; queue < txq_number; queue++) {
3558 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3560 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3562 mvneta_cleanup_txqs(pp);
3570 static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
3574 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
3578 return phy_power_on(pp->comphy);
3581 static int mvneta_config_interface(struct mvneta_port *pp,
3582 phy_interface_t interface)
3587 if (interface == PHY_INTERFACE_MODE_SGMII ||
3588 interface == PHY_INTERFACE_MODE_1000BASEX ||
3589 interface == PHY_INTERFACE_MODE_2500BASEX) {
3590 ret = mvneta_comphy_init(pp, interface);
3593 switch (interface) {
3594 case PHY_INTERFACE_MODE_QSGMII:
3595 mvreg_write(pp, MVNETA_SERDES_CFG,
3596 MVNETA_QSGMII_SERDES_PROTO);
3599 case PHY_INTERFACE_MODE_SGMII:
3600 case PHY_INTERFACE_MODE_1000BASEX:
3601 mvreg_write(pp, MVNETA_SERDES_CFG,
3602 MVNETA_SGMII_SERDES_PROTO);
3605 case PHY_INTERFACE_MODE_2500BASEX:
3606 mvreg_write(pp, MVNETA_SERDES_CFG,
3607 MVNETA_HSGMII_SERDES_PROTO);
3614 pp->phy_interface = interface;
3619 static void mvneta_start_dev(struct mvneta_port *pp)
3623 WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
3625 mvneta_max_rx_size_set(pp, pp->pkt_size);
3626 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3628 /* start the Rx/Tx activity */
3629 mvneta_port_enable(pp);
3631 if (!pp->neta_armada3700) {
3632 /* Enable polling on the port */
3633 for_each_online_cpu(cpu) {
3634 struct mvneta_pcpu_port *port =
3635 per_cpu_ptr(pp->ports, cpu);
3637 napi_enable(&port->napi);
3640 napi_enable(&pp->napi);
3643 /* Unmask interrupts. It has to be done from each CPU */
3644 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3646 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3647 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3648 MVNETA_CAUSE_LINK_CHANGE);
3650 phylink_start(pp->phylink);
3652 /* We may have called phylink_speed_down before */
3653 phylink_speed_up(pp->phylink);
3655 netif_tx_start_all_queues(pp->dev);
3657 clear_bit(__MVNETA_DOWN, &pp->state);
3660 static void mvneta_stop_dev(struct mvneta_port *pp)
3664 set_bit(__MVNETA_DOWN, &pp->state);
3666 if (device_may_wakeup(&pp->dev->dev))
3667 phylink_speed_down(pp->phylink, false);
3669 phylink_stop(pp->phylink);
3671 if (!pp->neta_armada3700) {
3672 for_each_online_cpu(cpu) {
3673 struct mvneta_pcpu_port *port =
3674 per_cpu_ptr(pp->ports, cpu);
3676 napi_disable(&port->napi);
3679 napi_disable(&pp->napi);
3682 netif_carrier_off(pp->dev);
3684 mvneta_port_down(pp);
3685 netif_tx_stop_all_queues(pp->dev);
3687 /* Stop the port activity */
3688 mvneta_port_disable(pp);
3690 /* Clear all ethernet port interrupts */
3691 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3693 /* Mask all ethernet port interrupts */
3694 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3696 mvneta_tx_reset(pp);
3697 mvneta_rx_reset(pp);
3699 WARN_ON(phy_power_off(pp->comphy));
3702 static void mvneta_percpu_enable(void *arg)
3704 struct mvneta_port *pp = arg;
3706 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3709 static void mvneta_percpu_disable(void *arg)
3711 struct mvneta_port *pp = arg;
3713 disable_percpu_irq(pp->dev->irq);
3716 /* Change the device mtu */
3717 static int mvneta_change_mtu(struct net_device *dev, int mtu)
3719 struct mvneta_port *pp = netdev_priv(dev);
3722 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3723 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3724 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3725 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3728 if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
3729 netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu);
3735 if (!netif_running(dev)) {
3737 mvneta_bm_update_mtu(pp, mtu);
3739 netdev_update_features(dev);
3743 /* The interface is running, so we have to force a
3744 * reallocation of the queues
3746 mvneta_stop_dev(pp);
3747 on_each_cpu(mvneta_percpu_disable, pp, true);
3749 mvneta_cleanup_txqs(pp);
3750 mvneta_cleanup_rxqs(pp);
3753 mvneta_bm_update_mtu(pp, mtu);
3755 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3757 ret = mvneta_setup_rxqs(pp);
3759 netdev_err(dev, "unable to setup rxqs after MTU change\n");
3763 ret = mvneta_setup_txqs(pp);
3765 netdev_err(dev, "unable to setup txqs after MTU change\n");
3769 on_each_cpu(mvneta_percpu_enable, pp, true);
3770 mvneta_start_dev(pp);
3772 netdev_update_features(dev);
3777 static netdev_features_t mvneta_fix_features(struct net_device *dev,
3778 netdev_features_t features)
3780 struct mvneta_port *pp = netdev_priv(dev);
3782 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3783 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3785 "Disable IP checksum for MTU greater than %dB\n",
3792 /* Get mac address */
3793 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3795 u32 mac_addr_l, mac_addr_h;
3797 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3798 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3799 addr[0] = (mac_addr_h >> 24) & 0xFF;
3800 addr[1] = (mac_addr_h >> 16) & 0xFF;
3801 addr[2] = (mac_addr_h >> 8) & 0xFF;
3802 addr[3] = mac_addr_h & 0xFF;
3803 addr[4] = (mac_addr_l >> 8) & 0xFF;
3804 addr[5] = mac_addr_l & 0xFF;
3807 /* Handle setting mac address */
3808 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3810 struct mvneta_port *pp = netdev_priv(dev);
3811 struct sockaddr *sockaddr = addr;
3814 ret = eth_prepare_mac_addr_change(dev, addr);
3817 /* Remove previous address table entry */
3818 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3820 /* Set new addr in hw */
3821 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3823 eth_commit_mac_addr_change(dev, addr);
3827 static void mvneta_validate(struct phylink_config *config,
3828 unsigned long *supported,
3829 struct phylink_link_state *state)
3831 struct net_device *ndev = to_net_dev(config->dev);
3832 struct mvneta_port *pp = netdev_priv(ndev);
3833 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
3835 /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
3836 if (state->interface != PHY_INTERFACE_MODE_NA &&
3837 state->interface != PHY_INTERFACE_MODE_QSGMII &&
3838 state->interface != PHY_INTERFACE_MODE_SGMII &&
3839 !phy_interface_mode_is_8023z(state->interface) &&
3840 !phy_interface_mode_is_rgmii(state->interface)) {
3841 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
3845 /* Allow all the expected bits */
3846 phylink_set(mask, Autoneg);
3847 phylink_set_port_modes(mask);
3849 /* Asymmetric pause is unsupported */
3850 phylink_set(mask, Pause);
3852 /* Half-duplex at speeds higher than 100Mbit is unsupported */
3853 if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
3854 phylink_set(mask, 1000baseT_Full);
3855 phylink_set(mask, 1000baseX_Full);
3857 if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
3858 phylink_set(mask, 2500baseT_Full);
3859 phylink_set(mask, 2500baseX_Full);
3862 if (!phy_interface_mode_is_8023z(state->interface)) {
3863 /* 10M and 100M are only supported in non-802.3z mode */
3864 phylink_set(mask, 10baseT_Half);
3865 phylink_set(mask, 10baseT_Full);
3866 phylink_set(mask, 100baseT_Half);
3867 phylink_set(mask, 100baseT_Full);
3870 bitmap_and(supported, supported, mask,
3871 __ETHTOOL_LINK_MODE_MASK_NBITS);
3872 bitmap_and(state->advertising, state->advertising, mask,
3873 __ETHTOOL_LINK_MODE_MASK_NBITS);
3875 /* We can only operate at 2500BaseX or 1000BaseX. If requested
3876 * to advertise both, only report advertising at 2500BaseX.
3878 phylink_helper_basex_speed(state);
3881 static void mvneta_mac_pcs_get_state(struct phylink_config *config,
3882 struct phylink_link_state *state)
3884 struct net_device *ndev = to_net_dev(config->dev);
3885 struct mvneta_port *pp = netdev_priv(ndev);
3888 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3890 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
3892 state->interface == PHY_INTERFACE_MODE_2500BASEX ?
3893 SPEED_2500 : SPEED_1000;
3894 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
3895 state->speed = SPEED_100;
3897 state->speed = SPEED_10;
3899 state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
3900 state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
3901 state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
3904 if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
3905 state->pause |= MLO_PAUSE_RX;
3906 if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
3907 state->pause |= MLO_PAUSE_TX;
3910 static void mvneta_mac_an_restart(struct phylink_config *config)
3912 struct net_device *ndev = to_net_dev(config->dev);
3913 struct mvneta_port *pp = netdev_priv(ndev);
3914 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3916 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3917 gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
3918 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3919 gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
3922 static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
3923 const struct phylink_link_state *state)
3925 struct net_device *ndev = to_net_dev(config->dev);
3926 struct mvneta_port *pp = netdev_priv(ndev);
3927 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
3928 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
3929 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
3930 u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
3931 u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3933 new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
3934 new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
3935 MVNETA_GMAC2_PORT_RESET);
3936 new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
3937 new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
3938 new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
3939 MVNETA_GMAC_INBAND_RESTART_AN |
3940 MVNETA_GMAC_AN_SPEED_EN |
3941 MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
3942 MVNETA_GMAC_AN_FLOW_CTRL_EN |
3943 MVNETA_GMAC_AN_DUPLEX_EN);
3945 /* Even though it might look weird, when we're configured in
3946 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3948 new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
3950 if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
3951 state->interface == PHY_INTERFACE_MODE_SGMII ||
3952 phy_interface_mode_is_8023z(state->interface))
3953 new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
3955 if (phylink_test(state->advertising, Pause))
3956 new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
3958 if (!phylink_autoneg_inband(mode)) {
3959 /* Phy or fixed speed - nothing to do, leave the
3960 * configured speed, duplex and flow control as-is.
3962 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
3963 /* SGMII mode receives the state from the PHY */
3964 new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
3965 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3966 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3967 MVNETA_GMAC_FORCE_LINK_PASS |
3968 MVNETA_GMAC_CONFIG_MII_SPEED |
3969 MVNETA_GMAC_CONFIG_GMII_SPEED |
3970 MVNETA_GMAC_CONFIG_FULL_DUPLEX)) |
3971 MVNETA_GMAC_INBAND_AN_ENABLE |
3972 MVNETA_GMAC_AN_SPEED_EN |
3973 MVNETA_GMAC_AN_DUPLEX_EN;
3975 /* 802.3z negotiation - only 1000base-X */
3976 new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
3977 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3978 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3979 MVNETA_GMAC_FORCE_LINK_PASS |
3980 MVNETA_GMAC_CONFIG_MII_SPEED)) |
3981 MVNETA_GMAC_INBAND_AN_ENABLE |
3982 MVNETA_GMAC_CONFIG_GMII_SPEED |
3983 /* The MAC only supports FD mode */
3984 MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3986 if (state->pause & MLO_PAUSE_AN && state->an_enabled)
3987 new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
3990 /* Armada 370 documentation says we can only change the port mode
3991 * and in-band enable when the link is down, so force it down
3992 * while making these changes. We also do this for GMAC_CTRL2
3994 if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
3995 (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
3996 (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
3997 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3998 (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
3999 MVNETA_GMAC_FORCE_LINK_DOWN);
4003 /* When at 2.5G, the link partner can send frames with shortened
4006 if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
4007 new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
4009 if (pp->phy_interface != state->interface) {
4011 WARN_ON(phy_power_off(pp->comphy));
4012 WARN_ON(mvneta_config_interface(pp, state->interface));
4015 if (new_ctrl0 != gmac_ctrl0)
4016 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
4017 if (new_ctrl2 != gmac_ctrl2)
4018 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
4019 if (new_ctrl4 != gmac_ctrl4)
4020 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
4021 if (new_clk != gmac_clk)
4022 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
4023 if (new_an != gmac_an)
4024 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
4026 if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
4027 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
4028 MVNETA_GMAC2_PORT_RESET) != 0)
4033 static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
4037 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
4039 lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
4041 lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
4042 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
4045 static void mvneta_mac_link_down(struct phylink_config *config,
4046 unsigned int mode, phy_interface_t interface)
4048 struct net_device *ndev = to_net_dev(config->dev);
4049 struct mvneta_port *pp = netdev_priv(ndev);
4052 mvneta_port_down(pp);
4054 if (!phylink_autoneg_inband(mode)) {
4055 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4056 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
4057 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
4058 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4061 pp->eee_active = false;
4062 mvneta_set_eee(pp, false);
4065 static void mvneta_mac_link_up(struct phylink_config *config,
4066 struct phy_device *phy,
4067 unsigned int mode, phy_interface_t interface,
4068 int speed, int duplex,
4069 bool tx_pause, bool rx_pause)
4071 struct net_device *ndev = to_net_dev(config->dev);
4072 struct mvneta_port *pp = netdev_priv(ndev);
4075 if (!phylink_autoneg_inband(mode)) {
4076 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4077 val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN |
4078 MVNETA_GMAC_CONFIG_MII_SPEED |
4079 MVNETA_GMAC_CONFIG_GMII_SPEED |
4080 MVNETA_GMAC_CONFIG_FLOW_CTRL |
4081 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
4082 val |= MVNETA_GMAC_FORCE_LINK_PASS;
4084 if (speed == SPEED_1000 || speed == SPEED_2500)
4085 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
4086 else if (speed == SPEED_100)
4087 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
4089 if (duplex == DUPLEX_FULL)
4090 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
4092 if (tx_pause || rx_pause)
4093 val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
4095 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4097 /* When inband doesn't cover flow control or flow control is
4098 * disabled, we need to manually configure it. This bit will
4099 * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset.
4101 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4102 val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL;
4104 if (tx_pause || rx_pause)
4105 val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
4107 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4112 if (phy && pp->eee_enabled) {
4113 pp->eee_active = phy_init_eee(phy, 0) >= 0;
4114 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
4118 static const struct phylink_mac_ops mvneta_phylink_ops = {
4119 .validate = mvneta_validate,
4120 .mac_pcs_get_state = mvneta_mac_pcs_get_state,
4121 .mac_an_restart = mvneta_mac_an_restart,
4122 .mac_config = mvneta_mac_config,
4123 .mac_link_down = mvneta_mac_link_down,
4124 .mac_link_up = mvneta_mac_link_up,
4127 static int mvneta_mdio_probe(struct mvneta_port *pp)
4129 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
4130 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
4133 netdev_err(pp->dev, "could not attach PHY: %d\n", err);
4135 phylink_ethtool_get_wol(pp->phylink, &wol);
4136 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
4138 /* PHY WoL may be enabled but device wakeup disabled */
4140 device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
4145 static void mvneta_mdio_remove(struct mvneta_port *pp)
4147 phylink_disconnect_phy(pp->phylink);
4150 /* Electing a CPU must be done in an atomic way: it should be done
4151 * after or before the removal/insertion of a CPU and this function is
4154 static void mvneta_percpu_elect(struct mvneta_port *pp)
4156 int elected_cpu = 0, max_cpu, cpu, i = 0;
4158 /* Use the cpu associated to the rxq when it is online, in all
4159 * the other cases, use the cpu 0 which can't be offline.
4161 if (cpu_online(pp->rxq_def))
4162 elected_cpu = pp->rxq_def;
4164 max_cpu = num_present_cpus();
4166 for_each_online_cpu(cpu) {
4167 int rxq_map = 0, txq_map = 0;
4170 for (rxq = 0; rxq < rxq_number; rxq++)
4171 if ((rxq % max_cpu) == cpu)
4172 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
4174 if (cpu == elected_cpu)
4175 /* Map the default receive queue to the elected CPU */
4176 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
4178 /* We update the TX queue map only if we have one
4179 * queue. In this case we associate the TX queue to
4180 * the CPU bound to the default RX queue
4182 if (txq_number == 1)
4183 txq_map = (cpu == elected_cpu) ?
4184 MVNETA_CPU_TXQ_ACCESS(1) : 0;
4186 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
4187 MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
4189 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
4191 /* Update the interrupt mask on each CPU according the
4194 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
4201 static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
4204 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4206 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4208 /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
4209 * are routed to CPU 0, so we don't need all the cpu-hotplug support
4211 if (pp->neta_armada3700)
4214 spin_lock(&pp->lock);
4216 * Configuring the driver for a new CPU while the driver is
4217 * stopping is racy, so just avoid it.
4219 if (pp->is_stopped) {
4220 spin_unlock(&pp->lock);
4223 netif_tx_stop_all_queues(pp->dev);
4226 * We have to synchronise on tha napi of each CPU except the one
4227 * just being woken up
4229 for_each_online_cpu(other_cpu) {
4230 if (other_cpu != cpu) {
4231 struct mvneta_pcpu_port *other_port =
4232 per_cpu_ptr(pp->ports, other_cpu);
4234 napi_synchronize(&other_port->napi);
4238 /* Mask all ethernet port interrupts */
4239 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4240 napi_enable(&port->napi);
4243 * Enable per-CPU interrupts on the CPU that is
4246 mvneta_percpu_enable(pp);
4249 * Enable per-CPU interrupt on the one CPU we care
4252 mvneta_percpu_elect(pp);
4254 /* Unmask all ethernet port interrupts */
4255 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4256 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4257 MVNETA_CAUSE_PHY_STATUS_CHANGE |
4258 MVNETA_CAUSE_LINK_CHANGE);
4259 netif_tx_start_all_queues(pp->dev);
4260 spin_unlock(&pp->lock);
4264 static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
4266 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4268 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4271 * Thanks to this lock we are sure that any pending cpu election is
4274 spin_lock(&pp->lock);
4275 /* Mask all ethernet port interrupts */
4276 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4277 spin_unlock(&pp->lock);
4279 napi_synchronize(&port->napi);
4280 napi_disable(&port->napi);
4281 /* Disable per-CPU interrupts on the CPU that is brought down. */
4282 mvneta_percpu_disable(pp);
4286 static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
4288 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4291 /* Check if a new CPU must be elected now this on is down */
4292 spin_lock(&pp->lock);
4293 mvneta_percpu_elect(pp);
4294 spin_unlock(&pp->lock);
4295 /* Unmask all ethernet port interrupts */
4296 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4297 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4298 MVNETA_CAUSE_PHY_STATUS_CHANGE |
4299 MVNETA_CAUSE_LINK_CHANGE);
4300 netif_tx_start_all_queues(pp->dev);
4304 static int mvneta_open(struct net_device *dev)
4306 struct mvneta_port *pp = netdev_priv(dev);
4309 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
4311 ret = mvneta_setup_rxqs(pp);
4315 ret = mvneta_setup_txqs(pp);
4317 goto err_cleanup_rxqs;
4319 /* Connect to port interrupt line */
4320 if (pp->neta_armada3700)
4321 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
4324 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
4325 dev->name, pp->ports);
4327 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
4328 goto err_cleanup_txqs;
4331 if (!pp->neta_armada3700) {
4332 /* Enable per-CPU interrupt on all the CPU to handle our RX
4335 on_each_cpu(mvneta_percpu_enable, pp, true);
4337 pp->is_stopped = false;
4338 /* Register a CPU notifier to handle the case where our CPU
4339 * might be taken offline.
4341 ret = cpuhp_state_add_instance_nocalls(online_hpstate,
4346 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4349 goto err_free_online_hp;
4352 ret = mvneta_mdio_probe(pp);
4354 netdev_err(dev, "cannot probe MDIO bus\n");
4355 goto err_free_dead_hp;
4358 mvneta_start_dev(pp);
4363 if (!pp->neta_armada3700)
4364 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4367 if (!pp->neta_armada3700)
4368 cpuhp_state_remove_instance_nocalls(online_hpstate,
4371 if (pp->neta_armada3700) {
4372 free_irq(pp->dev->irq, pp);
4374 on_each_cpu(mvneta_percpu_disable, pp, true);
4375 free_percpu_irq(pp->dev->irq, pp->ports);
4378 mvneta_cleanup_txqs(pp);
4380 mvneta_cleanup_rxqs(pp);
4384 /* Stop the port, free port interrupt line */
4385 static int mvneta_stop(struct net_device *dev)
4387 struct mvneta_port *pp = netdev_priv(dev);
4389 if (!pp->neta_armada3700) {
4390 /* Inform that we are stopping so we don't want to setup the
4391 * driver for new CPUs in the notifiers. The code of the
4392 * notifier for CPU online is protected by the same spinlock,
4393 * so when we get the lock, the notifer work is done.
4395 spin_lock(&pp->lock);
4396 pp->is_stopped = true;
4397 spin_unlock(&pp->lock);
4399 mvneta_stop_dev(pp);
4400 mvneta_mdio_remove(pp);
4402 cpuhp_state_remove_instance_nocalls(online_hpstate,
4404 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4406 on_each_cpu(mvneta_percpu_disable, pp, true);
4407 free_percpu_irq(dev->irq, pp->ports);
4409 mvneta_stop_dev(pp);
4410 mvneta_mdio_remove(pp);
4411 free_irq(dev->irq, pp);
4414 mvneta_cleanup_rxqs(pp);
4415 mvneta_cleanup_txqs(pp);
4420 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4422 struct mvneta_port *pp = netdev_priv(dev);
4424 return phylink_mii_ioctl(pp->phylink, ifr, cmd);
4427 static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
4428 struct netlink_ext_ack *extack)
4430 bool need_update, running = netif_running(dev);
4431 struct mvneta_port *pp = netdev_priv(dev);
4432 struct bpf_prog *old_prog;
4434 if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
4435 NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
4440 NL_SET_ERR_MSG_MOD(extack,
4441 "Hardware Buffer Management not supported on XDP");
4445 need_update = !!pp->xdp_prog != !!prog;
4446 if (running && need_update)
4449 old_prog = xchg(&pp->xdp_prog, prog);
4451 bpf_prog_put(old_prog);
4453 if (running && need_update)
4454 return mvneta_open(dev);
4459 static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4461 switch (xdp->command) {
4462 case XDP_SETUP_PROG:
4463 return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
4469 /* Ethtool methods */
4471 /* Set link ksettings (phy address, speed) for ethtools */
4473 mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
4474 const struct ethtool_link_ksettings *cmd)
4476 struct mvneta_port *pp = netdev_priv(ndev);
4478 return phylink_ethtool_ksettings_set(pp->phylink, cmd);
4481 /* Get link ksettings for ethtools */
4483 mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
4484 struct ethtool_link_ksettings *cmd)
4486 struct mvneta_port *pp = netdev_priv(ndev);
4488 return phylink_ethtool_ksettings_get(pp->phylink, cmd);
4491 static int mvneta_ethtool_nway_reset(struct net_device *dev)
4493 struct mvneta_port *pp = netdev_priv(dev);
4495 return phylink_ethtool_nway_reset(pp->phylink);
4498 /* Set interrupt coalescing for ethtools */
4499 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
4500 struct ethtool_coalesce *c)
4502 struct mvneta_port *pp = netdev_priv(dev);
4505 for (queue = 0; queue < rxq_number; queue++) {
4506 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4507 rxq->time_coal = c->rx_coalesce_usecs;
4508 rxq->pkts_coal = c->rx_max_coalesced_frames;
4509 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
4510 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
4513 for (queue = 0; queue < txq_number; queue++) {
4514 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4515 txq->done_pkts_coal = c->tx_max_coalesced_frames;
4516 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
4522 /* get coalescing for ethtools */
4523 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
4524 struct ethtool_coalesce *c)
4526 struct mvneta_port *pp = netdev_priv(dev);
4528 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
4529 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
4531 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
4536 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
4537 struct ethtool_drvinfo *drvinfo)
4539 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
4540 sizeof(drvinfo->driver));
4541 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
4542 sizeof(drvinfo->version));
4543 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
4544 sizeof(drvinfo->bus_info));
4548 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
4549 struct ethtool_ringparam *ring)
4551 struct mvneta_port *pp = netdev_priv(netdev);
4553 ring->rx_max_pending = MVNETA_MAX_RXD;
4554 ring->tx_max_pending = MVNETA_MAX_TXD;
4555 ring->rx_pending = pp->rx_ring_size;
4556 ring->tx_pending = pp->tx_ring_size;
4559 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
4560 struct ethtool_ringparam *ring)
4562 struct mvneta_port *pp = netdev_priv(dev);
4564 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
4566 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4567 ring->rx_pending : MVNETA_MAX_RXD;
4569 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4570 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
4571 if (pp->tx_ring_size != ring->tx_pending)
4572 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
4573 pp->tx_ring_size, ring->tx_pending);
4575 if (netif_running(dev)) {
4577 if (mvneta_open(dev)) {
4579 "error on opening device after ring param change\n");
4587 static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
4588 struct ethtool_pauseparam *pause)
4590 struct mvneta_port *pp = netdev_priv(dev);
4592 phylink_ethtool_get_pauseparam(pp->phylink, pause);
4595 static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
4596 struct ethtool_pauseparam *pause)
4598 struct mvneta_port *pp = netdev_priv(dev);
4600 return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4603 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
4606 if (sset == ETH_SS_STATS) {
4609 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4610 memcpy(data + i * ETH_GSTRING_LEN,
4611 mvneta_statistics[i].name, ETH_GSTRING_LEN);
4616 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
4617 struct mvneta_ethtool_stats *es)
4622 for_each_possible_cpu(cpu) {
4623 struct mvneta_pcpu_stats *stats;
4624 u64 skb_alloc_error;
4634 stats = per_cpu_ptr(pp->stats, cpu);
4636 start = u64_stats_fetch_begin_irq(&stats->syncp);
4637 skb_alloc_error = stats->es.skb_alloc_error;
4638 refill_error = stats->es.refill_error;
4639 xdp_redirect = stats->es.ps.xdp_redirect;
4640 xdp_pass = stats->es.ps.xdp_pass;
4641 xdp_drop = stats->es.ps.xdp_drop;
4642 xdp_xmit = stats->es.ps.xdp_xmit;
4643 xdp_xmit_err = stats->es.ps.xdp_xmit_err;
4644 xdp_tx = stats->es.ps.xdp_tx;
4645 xdp_tx_err = stats->es.ps.xdp_tx_err;
4646 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
4648 es->skb_alloc_error += skb_alloc_error;
4649 es->refill_error += refill_error;
4650 es->ps.xdp_redirect += xdp_redirect;
4651 es->ps.xdp_pass += xdp_pass;
4652 es->ps.xdp_drop += xdp_drop;
4653 es->ps.xdp_xmit += xdp_xmit;
4654 es->ps.xdp_xmit_err += xdp_xmit_err;
4655 es->ps.xdp_tx += xdp_tx;
4656 es->ps.xdp_tx_err += xdp_tx_err;
4660 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4662 struct mvneta_ethtool_stats stats = {};
4663 const struct mvneta_statistic *s;
4664 void __iomem *base = pp->base;
4669 mvneta_ethtool_update_pcpu_stats(pp, &stats);
4670 for (i = 0, s = mvneta_statistics;
4671 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
4675 val = readl_relaxed(base + s->offset);
4676 pp->ethtool_stats[i] += val;
4679 /* Docs say to read low 32-bit then high */
4680 low = readl_relaxed(base + s->offset);
4681 high = readl_relaxed(base + s->offset + 4);
4682 val = (u64)high << 32 | low;
4683 pp->ethtool_stats[i] += val;
4686 switch (s->offset) {
4687 case ETHTOOL_STAT_EEE_WAKEUP:
4688 val = phylink_get_eee_err(pp->phylink);
4689 pp->ethtool_stats[i] += val;
4691 case ETHTOOL_STAT_SKB_ALLOC_ERR:
4692 pp->ethtool_stats[i] = stats.skb_alloc_error;
4694 case ETHTOOL_STAT_REFILL_ERR:
4695 pp->ethtool_stats[i] = stats.refill_error;
4697 case ETHTOOL_XDP_REDIRECT:
4698 pp->ethtool_stats[i] = stats.ps.xdp_redirect;
4700 case ETHTOOL_XDP_PASS:
4701 pp->ethtool_stats[i] = stats.ps.xdp_pass;
4703 case ETHTOOL_XDP_DROP:
4704 pp->ethtool_stats[i] = stats.ps.xdp_drop;
4706 case ETHTOOL_XDP_TX:
4707 pp->ethtool_stats[i] = stats.ps.xdp_tx;
4709 case ETHTOOL_XDP_TX_ERR:
4710 pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
4712 case ETHTOOL_XDP_XMIT:
4713 pp->ethtool_stats[i] = stats.ps.xdp_xmit;
4715 case ETHTOOL_XDP_XMIT_ERR:
4716 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
4724 static void mvneta_ethtool_get_stats(struct net_device *dev,
4725 struct ethtool_stats *stats, u64 *data)
4727 struct mvneta_port *pp = netdev_priv(dev);
4730 mvneta_ethtool_update_stats(pp);
4732 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4733 *data++ = pp->ethtool_stats[i];
4736 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
4738 if (sset == ETH_SS_STATS)
4739 return ARRAY_SIZE(mvneta_statistics);
4743 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
4745 return MVNETA_RSS_LU_TABLE_SIZE;
4748 static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
4749 struct ethtool_rxnfc *info,
4750 u32 *rules __always_unused)
4752 switch (info->cmd) {
4753 case ETHTOOL_GRXRINGS:
4754 info->data = rxq_number;
4763 static int mvneta_config_rss(struct mvneta_port *pp)
4768 netif_tx_stop_all_queues(pp->dev);
4770 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4772 if (!pp->neta_armada3700) {
4773 /* We have to synchronise on the napi of each CPU */
4774 for_each_online_cpu(cpu) {
4775 struct mvneta_pcpu_port *pcpu_port =
4776 per_cpu_ptr(pp->ports, cpu);
4778 napi_synchronize(&pcpu_port->napi);
4779 napi_disable(&pcpu_port->napi);
4782 napi_synchronize(&pp->napi);
4783 napi_disable(&pp->napi);
4786 pp->rxq_def = pp->indir[0];
4788 /* Update unicast mapping */
4789 mvneta_set_rx_mode(pp->dev);
4791 /* Update val of portCfg register accordingly with all RxQueue types */
4792 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
4793 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
4795 /* Update the elected CPU matching the new rxq_def */
4796 spin_lock(&pp->lock);
4797 mvneta_percpu_elect(pp);
4798 spin_unlock(&pp->lock);
4800 if (!pp->neta_armada3700) {
4801 /* We have to synchronise on the napi of each CPU */
4802 for_each_online_cpu(cpu) {
4803 struct mvneta_pcpu_port *pcpu_port =
4804 per_cpu_ptr(pp->ports, cpu);
4806 napi_enable(&pcpu_port->napi);
4809 napi_enable(&pp->napi);
4812 netif_tx_start_all_queues(pp->dev);
4817 static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4818 const u8 *key, const u8 hfunc)
4820 struct mvneta_port *pp = netdev_priv(dev);
4822 /* Current code for Armada 3700 doesn't support RSS features yet */
4823 if (pp->neta_armada3700)
4826 /* We require at least one supported parameter to be changed
4827 * and no change in any of the unsupported parameters
4830 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
4836 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
4838 return mvneta_config_rss(pp);
4841 static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4844 struct mvneta_port *pp = netdev_priv(dev);
4846 /* Current code for Armada 3700 doesn't support RSS features yet */
4847 if (pp->neta_armada3700)
4851 *hfunc = ETH_RSS_HASH_TOP;
4856 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
4861 static void mvneta_ethtool_get_wol(struct net_device *dev,
4862 struct ethtool_wolinfo *wol)
4864 struct mvneta_port *pp = netdev_priv(dev);
4866 phylink_ethtool_get_wol(pp->phylink, wol);
4869 static int mvneta_ethtool_set_wol(struct net_device *dev,
4870 struct ethtool_wolinfo *wol)
4872 struct mvneta_port *pp = netdev_priv(dev);
4875 ret = phylink_ethtool_set_wol(pp->phylink, wol);
4877 device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
4882 static int mvneta_ethtool_get_eee(struct net_device *dev,
4883 struct ethtool_eee *eee)
4885 struct mvneta_port *pp = netdev_priv(dev);
4888 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4890 eee->eee_enabled = pp->eee_enabled;
4891 eee->eee_active = pp->eee_active;
4892 eee->tx_lpi_enabled = pp->tx_lpi_enabled;
4893 eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
4895 return phylink_ethtool_get_eee(pp->phylink, eee);
4898 static int mvneta_ethtool_set_eee(struct net_device *dev,
4899 struct ethtool_eee *eee)
4901 struct mvneta_port *pp = netdev_priv(dev);
4904 /* The Armada 37x documents do not give limits for this other than
4905 * it being an 8-bit register.
4907 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
4910 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4911 lpi_ctl0 &= ~(0xff << 8);
4912 lpi_ctl0 |= eee->tx_lpi_timer << 8;
4913 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
4915 pp->eee_enabled = eee->eee_enabled;
4916 pp->tx_lpi_enabled = eee->tx_lpi_enabled;
4918 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
4920 return phylink_ethtool_set_eee(pp->phylink, eee);
4923 static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
4925 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
4928 static void mvneta_setup_rx_prio_map(struct mvneta_port *pp)
4933 for (i = 0; i < rxq_number; i++)
4934 val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]);
4936 mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
4939 static int mvneta_setup_mqprio(struct net_device *dev,
4940 struct tc_mqprio_qopt *qopt)
4942 struct mvneta_port *pp = netdev_priv(dev);
4946 qopt->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4947 num_tc = qopt->num_tc;
4949 if (num_tc > rxq_number)
4953 mvneta_clear_rx_prio_map(pp);
4954 netdev_reset_tc(dev);
4958 memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map));
4960 mvneta_setup_rx_prio_map(pp);
4962 netdev_set_num_tc(dev, qopt->num_tc);
4963 for (i = 0; i < qopt->num_tc; i++)
4964 netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]);
4969 static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type,
4973 case TC_SETUP_QDISC_MQPRIO:
4974 return mvneta_setup_mqprio(dev, type_data);
4980 static const struct net_device_ops mvneta_netdev_ops = {
4981 .ndo_open = mvneta_open,
4982 .ndo_stop = mvneta_stop,
4983 .ndo_start_xmit = mvneta_tx,
4984 .ndo_set_rx_mode = mvneta_set_rx_mode,
4985 .ndo_set_mac_address = mvneta_set_mac_addr,
4986 .ndo_change_mtu = mvneta_change_mtu,
4987 .ndo_fix_features = mvneta_fix_features,
4988 .ndo_get_stats64 = mvneta_get_stats64,
4989 .ndo_do_ioctl = mvneta_ioctl,
4990 .ndo_bpf = mvneta_xdp,
4991 .ndo_xdp_xmit = mvneta_xdp_xmit,
4992 .ndo_setup_tc = mvneta_setup_tc,
4995 static const struct ethtool_ops mvneta_eth_tool_ops = {
4996 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
4997 ETHTOOL_COALESCE_MAX_FRAMES,
4998 .nway_reset = mvneta_ethtool_nway_reset,
4999 .get_link = ethtool_op_get_link,
5000 .set_coalesce = mvneta_ethtool_set_coalesce,
5001 .get_coalesce = mvneta_ethtool_get_coalesce,
5002 .get_drvinfo = mvneta_ethtool_get_drvinfo,
5003 .get_ringparam = mvneta_ethtool_get_ringparam,
5004 .set_ringparam = mvneta_ethtool_set_ringparam,
5005 .get_pauseparam = mvneta_ethtool_get_pauseparam,
5006 .set_pauseparam = mvneta_ethtool_set_pauseparam,
5007 .get_strings = mvneta_ethtool_get_strings,
5008 .get_ethtool_stats = mvneta_ethtool_get_stats,
5009 .get_sset_count = mvneta_ethtool_get_sset_count,
5010 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
5011 .get_rxnfc = mvneta_ethtool_get_rxnfc,
5012 .get_rxfh = mvneta_ethtool_get_rxfh,
5013 .set_rxfh = mvneta_ethtool_set_rxfh,
5014 .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
5015 .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
5016 .get_wol = mvneta_ethtool_get_wol,
5017 .set_wol = mvneta_ethtool_set_wol,
5018 .get_eee = mvneta_ethtool_get_eee,
5019 .set_eee = mvneta_ethtool_set_eee,
5023 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
5028 mvneta_port_disable(pp);
5030 /* Set port default values */
5031 mvneta_defaults_set(pp);
5033 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
5037 /* Initialize TX descriptor rings */
5038 for (queue = 0; queue < txq_number; queue++) {
5039 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5041 txq->size = pp->tx_ring_size;
5042 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
5045 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
5049 /* Create Rx descriptor rings */
5050 for (queue = 0; queue < rxq_number; queue++) {
5051 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5053 rxq->size = pp->rx_ring_size;
5054 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
5055 rxq->time_coal = MVNETA_RX_COAL_USEC;
5057 = devm_kmalloc_array(pp->dev->dev.parent,
5059 sizeof(*rxq->buf_virt_addr),
5061 if (!rxq->buf_virt_addr)
5068 /* platform glue : initialize decoding windows */
5069 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
5070 const struct mbus_dram_target_info *dram)
5076 for (i = 0; i < 6; i++) {
5077 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
5078 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
5081 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
5088 for (i = 0; i < dram->num_cs; i++) {
5089 const struct mbus_dram_window *cs = dram->cs + i;
5091 mvreg_write(pp, MVNETA_WIN_BASE(i),
5092 (cs->base & 0xffff0000) |
5093 (cs->mbus_attr << 8) |
5094 dram->mbus_dram_target_id);
5096 mvreg_write(pp, MVNETA_WIN_SIZE(i),
5097 (cs->size - 1) & 0xffff0000);
5099 win_enable &= ~(1 << i);
5100 win_protect |= 3 << (2 * i);
5103 /* For Armada3700 open default 4GB Mbus window, leaving
5104 * arbitration of target/attribute to a different layer
5107 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
5108 win_enable &= ~BIT(0);
5112 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
5113 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
5116 /* Power up the port */
5117 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
5119 /* MAC Cause register should be cleared */
5120 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
5122 if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
5123 phy_mode != PHY_INTERFACE_MODE_SGMII &&
5124 !phy_interface_mode_is_8023z(phy_mode) &&
5125 !phy_interface_mode_is_rgmii(phy_mode))
5131 /* Device initialization routine */
5132 static int mvneta_probe(struct platform_device *pdev)
5134 struct device_node *dn = pdev->dev.of_node;
5135 struct device_node *bm_node;
5136 struct mvneta_port *pp;
5137 struct net_device *dev;
5138 struct phylink *phylink;
5140 char hw_mac_addr[ETH_ALEN];
5141 phy_interface_t phy_mode;
5142 const char *mac_from;
5147 dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
5148 txq_number, rxq_number);
5152 dev->irq = irq_of_parse_and_map(dn, 0);
5156 err = of_get_phy_mode(dn, &phy_mode);
5158 dev_err(&pdev->dev, "incorrect phy-mode\n");
5162 comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
5163 if (comphy == ERR_PTR(-EPROBE_DEFER)) {
5164 err = -EPROBE_DEFER;
5166 } else if (IS_ERR(comphy)) {
5170 pp = netdev_priv(dev);
5171 spin_lock_init(&pp->lock);
5173 pp->phylink_config.dev = &dev->dev;
5174 pp->phylink_config.type = PHYLINK_NETDEV;
5176 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
5177 phy_mode, &mvneta_phylink_ops);
5178 if (IS_ERR(phylink)) {
5179 err = PTR_ERR(phylink);
5183 dev->tx_queue_len = MVNETA_MAX_TXD;
5184 dev->watchdog_timeo = 5 * HZ;
5185 dev->netdev_ops = &mvneta_netdev_ops;
5187 dev->ethtool_ops = &mvneta_eth_tool_ops;
5189 pp->phylink = phylink;
5190 pp->comphy = comphy;
5191 pp->phy_interface = phy_mode;
5194 pp->rxq_def = rxq_def;
5195 pp->indir[0] = rxq_def;
5197 /* Get special SoC configurations */
5198 if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
5199 pp->neta_armada3700 = true;
5201 pp->clk = devm_clk_get(&pdev->dev, "core");
5202 if (IS_ERR(pp->clk))
5203 pp->clk = devm_clk_get(&pdev->dev, NULL);
5204 if (IS_ERR(pp->clk)) {
5205 err = PTR_ERR(pp->clk);
5206 goto err_free_phylink;
5209 clk_prepare_enable(pp->clk);
5211 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
5212 if (!IS_ERR(pp->clk_bus))
5213 clk_prepare_enable(pp->clk_bus);
5215 pp->base = devm_platform_ioremap_resource(pdev, 0);
5216 if (IS_ERR(pp->base)) {
5217 err = PTR_ERR(pp->base);
5221 /* Alloc per-cpu port structure */
5222 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
5228 /* Alloc per-cpu stats */
5229 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
5232 goto err_free_ports;
5235 err = of_get_mac_address(dn, dev->dev_addr);
5237 mac_from = "device tree";
5239 mvneta_get_mac_addr(pp, hw_mac_addr);
5240 if (is_valid_ether_addr(hw_mac_addr)) {
5241 mac_from = "hardware";
5242 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
5244 mac_from = "random";
5245 eth_hw_addr_random(dev);
5249 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
5250 if (tx_csum_limit < 0 ||
5251 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
5252 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
5253 dev_info(&pdev->dev,
5254 "Wrong TX csum limit in DT, set to %dB\n",
5255 MVNETA_TX_CSUM_DEF_SIZE);
5257 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
5258 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
5260 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
5263 pp->tx_csum_limit = tx_csum_limit;
5265 pp->dram_target_info = mv_mbus_dram_info();
5266 /* Armada3700 requires setting default configuration of Mbus
5267 * windows, however without using filled mbus_dram_target_info
5270 if (pp->dram_target_info || pp->neta_armada3700)
5271 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5273 pp->tx_ring_size = MVNETA_MAX_TXD;
5274 pp->rx_ring_size = MVNETA_MAX_RXD;
5277 SET_NETDEV_DEV(dev, &pdev->dev);
5279 pp->id = global_port_id++;
5281 /* Obtain access to BM resources if enabled and already initialized */
5282 bm_node = of_parse_phandle(dn, "buffer-manager", 0);
5284 pp->bm_priv = mvneta_bm_get(bm_node);
5286 err = mvneta_bm_port_init(pdev, pp);
5288 dev_info(&pdev->dev,
5289 "use SW buffer management\n");
5290 mvneta_bm_put(pp->bm_priv);
5294 /* Set RX packet offset correction for platforms, whose
5295 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
5296 * platforms and 0B for 32-bit ones.
5298 pp->rx_offset_correction = max(0,
5300 MVNETA_RX_PKT_OFFSET_CORRECTION);
5302 of_node_put(bm_node);
5304 /* sw buffer management */
5306 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5308 err = mvneta_init(&pdev->dev, pp);
5312 err = mvneta_port_power_up(pp, pp->phy_interface);
5314 dev_err(&pdev->dev, "can't power up port\n");
5318 /* Armada3700 network controller does not support per-cpu
5319 * operation, so only single NAPI should be initialized.
5321 if (pp->neta_armada3700) {
5322 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
5324 for_each_present_cpu(cpu) {
5325 struct mvneta_pcpu_port *port =
5326 per_cpu_ptr(pp->ports, cpu);
5328 netif_napi_add(dev, &port->napi, mvneta_poll,
5334 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5335 NETIF_F_TSO | NETIF_F_RXCSUM;
5336 dev->hw_features |= dev->features;
5337 dev->vlan_features |= dev->features;
5338 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5339 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
5341 /* MTU range: 68 - 9676 */
5342 dev->min_mtu = ETH_MIN_MTU;
5343 /* 9676 == 9700 - 20 and rounding to 8 */
5344 dev->max_mtu = 9676;
5346 err = register_netdev(dev);
5348 dev_err(&pdev->dev, "failed to register\n");
5352 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
5355 platform_set_drvdata(pdev, pp->dev);
5361 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5362 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5364 mvneta_bm_put(pp->bm_priv);
5366 free_percpu(pp->stats);
5368 free_percpu(pp->ports);
5370 clk_disable_unprepare(pp->clk_bus);
5371 clk_disable_unprepare(pp->clk);
5374 phylink_destroy(pp->phylink);
5376 irq_dispose_mapping(dev->irq);
5380 /* Device removal routine */
5381 static int mvneta_remove(struct platform_device *pdev)
5383 struct net_device *dev = platform_get_drvdata(pdev);
5384 struct mvneta_port *pp = netdev_priv(dev);
5386 unregister_netdev(dev);
5387 clk_disable_unprepare(pp->clk_bus);
5388 clk_disable_unprepare(pp->clk);
5389 free_percpu(pp->ports);
5390 free_percpu(pp->stats);
5391 irq_dispose_mapping(dev->irq);
5392 phylink_destroy(pp->phylink);
5395 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5396 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5398 mvneta_bm_put(pp->bm_priv);
5404 #ifdef CONFIG_PM_SLEEP
5405 static int mvneta_suspend(struct device *device)
5408 struct net_device *dev = dev_get_drvdata(device);
5409 struct mvneta_port *pp = netdev_priv(dev);
5411 if (!netif_running(dev))
5414 if (!pp->neta_armada3700) {
5415 spin_lock(&pp->lock);
5416 pp->is_stopped = true;
5417 spin_unlock(&pp->lock);
5419 cpuhp_state_remove_instance_nocalls(online_hpstate,
5421 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
5426 mvneta_stop_dev(pp);
5429 for (queue = 0; queue < rxq_number; queue++) {
5430 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5432 mvneta_rxq_drop_pkts(pp, rxq);
5435 for (queue = 0; queue < txq_number; queue++) {
5436 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5438 mvneta_txq_hw_deinit(pp, txq);
5442 netif_device_detach(dev);
5443 clk_disable_unprepare(pp->clk_bus);
5444 clk_disable_unprepare(pp->clk);
5449 static int mvneta_resume(struct device *device)
5451 struct platform_device *pdev = to_platform_device(device);
5452 struct net_device *dev = dev_get_drvdata(device);
5453 struct mvneta_port *pp = netdev_priv(dev);
5456 clk_prepare_enable(pp->clk);
5457 if (!IS_ERR(pp->clk_bus))
5458 clk_prepare_enable(pp->clk_bus);
5459 if (pp->dram_target_info || pp->neta_armada3700)
5460 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5462 err = mvneta_bm_port_init(pdev, pp);
5464 dev_info(&pdev->dev, "use SW buffer management\n");
5465 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5469 mvneta_defaults_set(pp);
5470 err = mvneta_port_power_up(pp, pp->phy_interface);
5472 dev_err(device, "can't power up port\n");
5476 netif_device_attach(dev);
5478 if (!netif_running(dev))
5481 for (queue = 0; queue < rxq_number; queue++) {
5482 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5484 rxq->next_desc_to_proc = 0;
5485 mvneta_rxq_hw_init(pp, rxq);
5488 for (queue = 0; queue < txq_number; queue++) {
5489 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5491 txq->next_desc_to_proc = 0;
5492 mvneta_txq_hw_init(pp, txq);
5495 if (!pp->neta_armada3700) {
5496 spin_lock(&pp->lock);
5497 pp->is_stopped = false;
5498 spin_unlock(&pp->lock);
5499 cpuhp_state_add_instance_nocalls(online_hpstate,
5501 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
5506 mvneta_start_dev(pp);
5508 mvneta_set_rx_mode(dev);
5514 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
5516 static const struct of_device_id mvneta_match[] = {
5517 { .compatible = "marvell,armada-370-neta" },
5518 { .compatible = "marvell,armada-xp-neta" },
5519 { .compatible = "marvell,armada-3700-neta" },
5522 MODULE_DEVICE_TABLE(of, mvneta_match);
5524 static struct platform_driver mvneta_driver = {
5525 .probe = mvneta_probe,
5526 .remove = mvneta_remove,
5528 .name = MVNETA_DRIVER_NAME,
5529 .of_match_table = mvneta_match,
5530 .pm = &mvneta_pm_ops,
5534 static int __init mvneta_driver_init(void)
5538 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online",
5540 mvneta_cpu_down_prepare);
5543 online_hpstate = ret;
5544 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
5545 NULL, mvneta_cpu_dead);
5549 ret = platform_driver_register(&mvneta_driver);
5555 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
5557 cpuhp_remove_multi_state(online_hpstate);
5561 module_init(mvneta_driver_init);
5563 static void __exit mvneta_driver_exit(void)
5565 platform_driver_unregister(&mvneta_driver);
5566 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
5567 cpuhp_remove_multi_state(online_hpstate);
5569 module_exit(mvneta_driver_exit);
5571 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
5572 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
5573 MODULE_LICENSE("GPL");
5575 module_param(rxq_number, int, 0444);
5576 module_param(txq_number, int, 0444);
5578 module_param(rxq_def, int, 0444);
5579 module_param(rx_copybreak, int, 0644);