1 // SPDX-License-Identifier: GPL-2.0+
3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
6 * Right now, I am very wasteful with the buffers. I allocate memory
7 * pages and then divide them into 2K frame buffers. This way I know I
8 * have buffers large enough to hold one frame within one buffer descriptor.
9 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
10 * will be much more memory efficient and will easily handle lots of
13 * Much better multiple PHY support by Magnus Damm.
14 * Copyright (c) 2000 Ericsson Radio Systems AB.
16 * Support for FEC controller of ColdFire processors.
17 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
19 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
20 * Copyright (c) 2004-2006 Macq Electronique SA.
22 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/ptrace.h>
30 #include <linux/errno.h>
31 #include <linux/ioport.h>
32 #include <linux/slab.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
41 #include <net/page_pool/helpers.h>
42 #include <net/selftests.h>
44 #include <linux/tcp.h>
45 #include <linux/udp.h>
46 #include <linux/icmp.h>
47 #include <linux/spinlock.h>
48 #include <linux/workqueue.h>
49 #include <linux/bitops.h>
51 #include <linux/irq.h>
52 #include <linux/clk.h>
53 #include <linux/crc32.h>
54 #include <linux/platform_device.h>
55 #include <linux/property.h>
56 #include <linux/mdio.h>
57 #include <linux/phy.h>
58 #include <linux/fec.h>
60 #include <linux/of_mdio.h>
61 #include <linux/of_net.h>
62 #include <linux/regulator/consumer.h>
63 #include <linux/if_vlan.h>
64 #include <linux/pinctrl/consumer.h>
65 #include <linux/gpio/consumer.h>
66 #include <linux/prefetch.h>
67 #include <linux/mfd/syscon.h>
68 #include <linux/regmap.h>
69 #include <soc/imx/cpuidle.h>
70 #include <linux/filter.h>
71 #include <linux/bpf.h>
72 #include <linux/bpf_trace.h>
74 #include <asm/cacheflush.h>
78 static void set_multicast_list(struct net_device *ndev);
79 static void fec_enet_itr_coal_set(struct net_device *ndev);
80 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
81 int cpu, struct xdp_buff *xdp,
84 #define DRIVER_NAME "fec"
86 static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
88 /* Pause frame feild and FIFO threshold */
89 #define FEC_ENET_FCE (1 << 5)
90 #define FEC_ENET_RSEM_V 0x84
91 #define FEC_ENET_RSFL_V 16
92 #define FEC_ENET_RAEM_V 0x8
93 #define FEC_ENET_RAFL_V 0x8
94 #define FEC_ENET_OPD_V 0xFFF0
95 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */
97 #define FEC_ENET_XDP_PASS 0
98 #define FEC_ENET_XDP_CONSUMED BIT(0)
99 #define FEC_ENET_XDP_TX BIT(1)
100 #define FEC_ENET_XDP_REDIR BIT(2)
106 static const struct fec_devinfo fec_imx25_info = {
107 .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
108 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
111 static const struct fec_devinfo fec_imx27_info = {
112 .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
113 FEC_QUIRK_HAS_MDIO_C45,
116 static const struct fec_devinfo fec_imx28_info = {
117 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
118 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
119 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
120 FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
123 static const struct fec_devinfo fec_imx6q_info = {
124 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
125 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
126 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
127 FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
128 FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
131 static const struct fec_devinfo fec_mvf600_info = {
132 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
133 FEC_QUIRK_HAS_MDIO_C45,
136 static const struct fec_devinfo fec_imx6x_info = {
137 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
138 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
139 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
140 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
141 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
142 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
143 FEC_QUIRK_HAS_MDIO_C45,
146 static const struct fec_devinfo fec_imx6ul_info = {
147 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
148 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
149 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
150 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
151 FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
152 FEC_QUIRK_HAS_MDIO_C45,
155 static const struct fec_devinfo fec_imx8mq_info = {
156 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
157 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
158 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
159 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
160 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
161 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
162 FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
163 FEC_QUIRK_HAS_MDIO_C45,
166 static const struct fec_devinfo fec_imx8qm_info = {
167 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
168 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
169 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
170 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
171 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
172 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
173 FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
176 static const struct fec_devinfo fec_s32v234_info = {
177 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
178 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
179 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
180 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
181 FEC_QUIRK_HAS_MDIO_C45,
184 static struct platform_device_id fec_devtype[] = {
186 /* keep it for coldfire */
193 MODULE_DEVICE_TABLE(platform, fec_devtype);
195 static const struct of_device_id fec_dt_ids[] = {
196 { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, },
197 { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, },
198 { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
199 { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
200 { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
201 { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, },
202 { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
203 { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
204 { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
205 { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, },
208 MODULE_DEVICE_TABLE(of, fec_dt_ids);
210 static unsigned char macaddr[ETH_ALEN];
211 module_param_array(macaddr, byte, NULL, 0);
212 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
214 #if defined(CONFIG_M5272)
216 * Some hardware gets it MAC address out of local flash memory.
217 * if this is non-zero then assume it is the address to get MAC from.
219 #if defined(CONFIG_NETtel)
220 #define FEC_FLASHMAC 0xf0006006
221 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
222 #define FEC_FLASHMAC 0xf0006000
223 #elif defined(CONFIG_CANCam)
224 #define FEC_FLASHMAC 0xf0020000
225 #elif defined (CONFIG_M5272C3)
226 #define FEC_FLASHMAC (0xffe04000 + 4)
227 #elif defined(CONFIG_MOD5272)
228 #define FEC_FLASHMAC 0xffc0406b
230 #define FEC_FLASHMAC 0
232 #endif /* CONFIG_M5272 */
234 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
236 * 2048 byte skbufs are allocated. However, alignment requirements
237 * varies between FEC variants. Worst case is 64, so round down by 64.
239 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
240 #define PKT_MINBUF_SIZE 64
242 /* FEC receive acceleration */
243 #define FEC_RACC_IPDIS (1 << 1)
244 #define FEC_RACC_PRODIS (1 << 2)
245 #define FEC_RACC_SHIFT16 BIT(7)
246 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
248 /* MIB Control Register */
249 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
252 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
253 * size bits. Other FEC hardware does not, so we need to take that into
254 * account when setting it.
256 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
257 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
258 defined(CONFIG_ARM64)
259 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
261 #define OPT_FRAME_SIZE 0
264 /* FEC MII MMFR bits definition */
265 #define FEC_MMFR_ST (1 << 30)
266 #define FEC_MMFR_ST_C45 (0)
267 #define FEC_MMFR_OP_READ (2 << 28)
268 #define FEC_MMFR_OP_READ_C45 (3 << 28)
269 #define FEC_MMFR_OP_WRITE (1 << 28)
270 #define FEC_MMFR_OP_ADDR_WRITE (0)
271 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
272 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
273 #define FEC_MMFR_TA (2 << 16)
274 #define FEC_MMFR_DATA(v) (v & 0xffff)
275 /* FEC ECR bits definition */
276 #define FEC_ECR_MAGICEN (1 << 2)
277 #define FEC_ECR_SLEEP (1 << 3)
279 #define FEC_MII_TIMEOUT 30000 /* us */
281 /* Transmitter timeout */
282 #define TX_TIMEOUT (2 * HZ)
284 #define FEC_PAUSE_FLAG_AUTONEG 0x1
285 #define FEC_PAUSE_FLAG_ENABLE 0x2
286 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
287 #define FEC_WOL_FLAG_ENABLE (0x1 << 1)
288 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
290 /* Max number of allowed TCP segments for software TSO */
291 #define FEC_MAX_TSO_SEGS 100
292 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
294 #define IS_TSO_HEADER(txq, addr) \
295 ((addr >= txq->tso_hdrs_dma) && \
296 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
300 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
301 struct bufdesc_prop *bd)
303 return (bdp >= bd->last) ? bd->base
304 : (struct bufdesc *)(((void *)bdp) + bd->dsize);
307 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
308 struct bufdesc_prop *bd)
310 return (bdp <= bd->base) ? bd->last
311 : (struct bufdesc *)(((void *)bdp) - bd->dsize);
314 static int fec_enet_get_bd_index(struct bufdesc *bdp,
315 struct bufdesc_prop *bd)
317 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
320 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
324 entries = (((const char *)txq->dirty_tx -
325 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
327 return entries >= 0 ? entries : entries + txq->bd.ring_size;
330 static void swap_buffer(void *bufaddr, int len)
333 unsigned int *buf = bufaddr;
335 for (i = 0; i < len; i += 4, buf++)
339 static void fec_dump(struct net_device *ndev)
341 struct fec_enet_private *fep = netdev_priv(ndev);
343 struct fec_enet_priv_tx_q *txq;
346 netdev_info(ndev, "TX ring dump\n");
347 pr_info("Nr SC addr len SKB\n");
349 txq = fep->tx_queue[0];
353 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
355 bdp == txq->bd.cur ? 'S' : ' ',
356 bdp == txq->dirty_tx ? 'H' : ' ',
357 fec16_to_cpu(bdp->cbd_sc),
358 fec32_to_cpu(bdp->cbd_bufaddr),
359 fec16_to_cpu(bdp->cbd_datlen),
360 txq->tx_buf[index].buf_p);
361 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
363 } while (bdp != txq->bd.base);
367 * Coldfire does not support DMA coherent allocations, and has historically used
368 * a band-aid with a manual flush in fec_enet_rx_queue.
370 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
371 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
374 return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp);
377 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
380 dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL);
382 #else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
383 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
386 return dma_alloc_coherent(dev, size, handle, gfp);
389 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
392 dma_free_coherent(dev, size, cpu_addr, handle);
394 #endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
396 struct fec_dma_devres {
399 dma_addr_t dma_handle;
402 static void fec_dmam_release(struct device *dev, void *res)
404 struct fec_dma_devres *this = res;
406 fec_dma_free(dev, this->size, this->vaddr, this->dma_handle);
409 static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle,
412 struct fec_dma_devres *dr;
415 dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp);
418 vaddr = fec_dma_alloc(dev, size, handle, gfp);
424 dr->dma_handle = *handle;
430 static inline bool is_ipv4_pkt(struct sk_buff *skb)
432 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
436 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
438 /* Only run for packets requiring a checksum. */
439 if (skb->ip_summed != CHECKSUM_PARTIAL)
442 if (unlikely(skb_cow_head(skb, 0)))
445 if (is_ipv4_pkt(skb))
446 ip_hdr(skb)->check = 0;
447 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
453 fec_enet_create_page_pool(struct fec_enet_private *fep,
454 struct fec_enet_priv_rx_q *rxq, int size)
456 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
457 struct page_pool_params pp_params = {
459 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
461 .nid = dev_to_node(&fep->pdev->dev),
462 .dev = &fep->pdev->dev,
463 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
464 .offset = FEC_ENET_XDP_HEADROOM,
465 .max_len = FEC_ENET_RX_FRSIZE,
469 rxq->page_pool = page_pool_create(&pp_params);
470 if (IS_ERR(rxq->page_pool)) {
471 err = PTR_ERR(rxq->page_pool);
472 rxq->page_pool = NULL;
476 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
480 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
483 goto err_unregister_rxq;
488 xdp_rxq_info_unreg(&rxq->xdp_rxq);
490 page_pool_destroy(rxq->page_pool);
491 rxq->page_pool = NULL;
495 static struct bufdesc *
496 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
498 struct net_device *ndev)
500 struct fec_enet_private *fep = netdev_priv(ndev);
501 struct bufdesc *bdp = txq->bd.cur;
502 struct bufdesc_ex *ebdp;
503 int nr_frags = skb_shinfo(skb)->nr_frags;
505 unsigned short status;
506 unsigned int estatus = 0;
507 skb_frag_t *this_frag;
513 for (frag = 0; frag < nr_frags; frag++) {
514 this_frag = &skb_shinfo(skb)->frags[frag];
515 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
516 ebdp = (struct bufdesc_ex *)bdp;
518 status = fec16_to_cpu(bdp->cbd_sc);
519 status &= ~BD_ENET_TX_STATS;
520 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
521 frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
523 /* Handle the last BD specially */
524 if (frag == nr_frags - 1) {
525 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
526 if (fep->bufdesc_ex) {
527 estatus |= BD_ENET_TX_INT;
528 if (unlikely(skb_shinfo(skb)->tx_flags &
529 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
530 estatus |= BD_ENET_TX_TS;
534 if (fep->bufdesc_ex) {
535 if (fep->quirks & FEC_QUIRK_HAS_AVB)
536 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
537 if (skb->ip_summed == CHECKSUM_PARTIAL)
538 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
541 ebdp->cbd_esc = cpu_to_fec32(estatus);
544 bufaddr = skb_frag_address(this_frag);
546 index = fec_enet_get_bd_index(bdp, &txq->bd);
547 if (((unsigned long) bufaddr) & fep->tx_align ||
548 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
549 memcpy(txq->tx_bounce[index], bufaddr, frag_len);
550 bufaddr = txq->tx_bounce[index];
552 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
553 swap_buffer(bufaddr, frag_len);
556 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
558 if (dma_mapping_error(&fep->pdev->dev, addr)) {
560 netdev_err(ndev, "Tx DMA memory map failed\n");
561 goto dma_mapping_error;
564 bdp->cbd_bufaddr = cpu_to_fec32(addr);
565 bdp->cbd_datlen = cpu_to_fec16(frag_len);
566 /* Make sure the updates to rest of the descriptor are
567 * performed before transferring ownership.
570 bdp->cbd_sc = cpu_to_fec16(status);
576 for (i = 0; i < frag; i++) {
577 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
578 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
579 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
581 return ERR_PTR(-ENOMEM);
584 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
585 struct sk_buff *skb, struct net_device *ndev)
587 struct fec_enet_private *fep = netdev_priv(ndev);
588 int nr_frags = skb_shinfo(skb)->nr_frags;
589 struct bufdesc *bdp, *last_bdp;
592 unsigned short status;
593 unsigned short buflen;
594 unsigned int estatus = 0;
598 entries_free = fec_enet_get_free_txdesc_num(txq);
599 if (entries_free < MAX_SKB_FRAGS + 1) {
600 dev_kfree_skb_any(skb);
602 netdev_err(ndev, "NOT enough BD for SG!\n");
606 /* Protocol checksum off-load for TCP and UDP. */
607 if (fec_enet_clear_csum(skb, ndev)) {
608 dev_kfree_skb_any(skb);
612 /* Fill in a Tx ring entry */
615 status = fec16_to_cpu(bdp->cbd_sc);
616 status &= ~BD_ENET_TX_STATS;
618 /* Set buffer length and buffer pointer */
620 buflen = skb_headlen(skb);
622 index = fec_enet_get_bd_index(bdp, &txq->bd);
623 if (((unsigned long) bufaddr) & fep->tx_align ||
624 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
625 memcpy(txq->tx_bounce[index], skb->data, buflen);
626 bufaddr = txq->tx_bounce[index];
628 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
629 swap_buffer(bufaddr, buflen);
632 /* Push the data cache so the CPM does not get stale memory data. */
633 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
634 if (dma_mapping_error(&fep->pdev->dev, addr)) {
635 dev_kfree_skb_any(skb);
637 netdev_err(ndev, "Tx DMA memory map failed\n");
642 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
643 if (IS_ERR(last_bdp)) {
644 dma_unmap_single(&fep->pdev->dev, addr,
645 buflen, DMA_TO_DEVICE);
646 dev_kfree_skb_any(skb);
650 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
651 if (fep->bufdesc_ex) {
652 estatus = BD_ENET_TX_INT;
653 if (unlikely(skb_shinfo(skb)->tx_flags &
654 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
655 estatus |= BD_ENET_TX_TS;
658 bdp->cbd_bufaddr = cpu_to_fec32(addr);
659 bdp->cbd_datlen = cpu_to_fec16(buflen);
661 if (fep->bufdesc_ex) {
663 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
665 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
667 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
669 if (fep->quirks & FEC_QUIRK_HAS_AVB)
670 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
672 if (skb->ip_summed == CHECKSUM_PARTIAL)
673 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
676 ebdp->cbd_esc = cpu_to_fec32(estatus);
679 index = fec_enet_get_bd_index(last_bdp, &txq->bd);
680 /* Save skb pointer */
681 txq->tx_buf[index].buf_p = skb;
683 /* Make sure the updates to rest of the descriptor are performed before
684 * transferring ownership.
688 /* Send it on its way. Tell FEC it's ready, interrupt when done,
689 * it's the last BD of the frame, and to put the CRC on the end.
691 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
692 bdp->cbd_sc = cpu_to_fec16(status);
694 /* If this was the last BD in the ring, start at the beginning again. */
695 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
697 skb_tx_timestamp(skb);
699 /* Make sure the update to bdp is performed before txq->bd.cur. */
703 /* Trigger transmission start */
704 writel(0, txq->bd.reg_desc_active);
710 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
711 struct net_device *ndev,
712 struct bufdesc *bdp, int index, char *data,
713 int size, bool last_tcp, bool is_last)
715 struct fec_enet_private *fep = netdev_priv(ndev);
716 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
717 unsigned short status;
718 unsigned int estatus = 0;
721 status = fec16_to_cpu(bdp->cbd_sc);
722 status &= ~BD_ENET_TX_STATS;
724 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
726 if (((unsigned long) data) & fep->tx_align ||
727 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
728 memcpy(txq->tx_bounce[index], data, size);
729 data = txq->tx_bounce[index];
731 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
732 swap_buffer(data, size);
735 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
736 if (dma_mapping_error(&fep->pdev->dev, addr)) {
737 dev_kfree_skb_any(skb);
739 netdev_err(ndev, "Tx DMA memory map failed\n");
743 bdp->cbd_datlen = cpu_to_fec16(size);
744 bdp->cbd_bufaddr = cpu_to_fec32(addr);
746 if (fep->bufdesc_ex) {
747 if (fep->quirks & FEC_QUIRK_HAS_AVB)
748 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
749 if (skb->ip_summed == CHECKSUM_PARTIAL)
750 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
752 ebdp->cbd_esc = cpu_to_fec32(estatus);
755 /* Handle the last BD specially */
757 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
759 status |= BD_ENET_TX_INTR;
761 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
764 bdp->cbd_sc = cpu_to_fec16(status);
770 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
771 struct sk_buff *skb, struct net_device *ndev,
772 struct bufdesc *bdp, int index)
774 struct fec_enet_private *fep = netdev_priv(ndev);
775 int hdr_len = skb_tcp_all_headers(skb);
776 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
778 unsigned long dmabuf;
779 unsigned short status;
780 unsigned int estatus = 0;
782 status = fec16_to_cpu(bdp->cbd_sc);
783 status &= ~BD_ENET_TX_STATS;
784 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
786 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
787 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
788 if (((unsigned long)bufaddr) & fep->tx_align ||
789 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
790 memcpy(txq->tx_bounce[index], skb->data, hdr_len);
791 bufaddr = txq->tx_bounce[index];
793 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
794 swap_buffer(bufaddr, hdr_len);
796 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
797 hdr_len, DMA_TO_DEVICE);
798 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
799 dev_kfree_skb_any(skb);
801 netdev_err(ndev, "Tx DMA memory map failed\n");
806 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
807 bdp->cbd_datlen = cpu_to_fec16(hdr_len);
809 if (fep->bufdesc_ex) {
810 if (fep->quirks & FEC_QUIRK_HAS_AVB)
811 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
812 if (skb->ip_summed == CHECKSUM_PARTIAL)
813 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
815 ebdp->cbd_esc = cpu_to_fec32(estatus);
818 bdp->cbd_sc = cpu_to_fec16(status);
823 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
825 struct net_device *ndev)
827 struct fec_enet_private *fep = netdev_priv(ndev);
828 int hdr_len, total_len, data_left;
829 struct bufdesc *bdp = txq->bd.cur;
831 unsigned int index = 0;
834 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
835 dev_kfree_skb_any(skb);
837 netdev_err(ndev, "NOT enough BD for TSO!\n");
841 /* Protocol checksum off-load for TCP and UDP. */
842 if (fec_enet_clear_csum(skb, ndev)) {
843 dev_kfree_skb_any(skb);
847 /* Initialize the TSO handler, and prepare the first payload */
848 hdr_len = tso_start(skb, &tso);
850 total_len = skb->len - hdr_len;
851 while (total_len > 0) {
854 index = fec_enet_get_bd_index(bdp, &txq->bd);
855 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
856 total_len -= data_left;
858 /* prepare packet headers: MAC + IP + TCP */
859 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
860 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
861 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
865 while (data_left > 0) {
868 size = min_t(int, tso.size, data_left);
869 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
870 index = fec_enet_get_bd_index(bdp, &txq->bd);
871 ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
880 tso_build_data(skb, &tso, size);
883 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
886 /* Save skb pointer */
887 txq->tx_buf[index].buf_p = skb;
889 skb_tx_timestamp(skb);
892 /* Trigger transmission start */
893 if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
894 !readl(txq->bd.reg_desc_active) ||
895 !readl(txq->bd.reg_desc_active) ||
896 !readl(txq->bd.reg_desc_active) ||
897 !readl(txq->bd.reg_desc_active))
898 writel(0, txq->bd.reg_desc_active);
903 /* TODO: Release all used data descriptors for TSO */
908 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
910 struct fec_enet_private *fep = netdev_priv(ndev);
912 unsigned short queue;
913 struct fec_enet_priv_tx_q *txq;
914 struct netdev_queue *nq;
917 queue = skb_get_queue_mapping(skb);
918 txq = fep->tx_queue[queue];
919 nq = netdev_get_tx_queue(ndev, queue);
922 ret = fec_enet_txq_submit_tso(txq, skb, ndev);
924 ret = fec_enet_txq_submit_skb(txq, skb, ndev);
928 entries_free = fec_enet_get_free_txdesc_num(txq);
929 if (entries_free <= txq->tx_stop_threshold)
930 netif_tx_stop_queue(nq);
935 /* Init RX & TX buffer descriptors
937 static void fec_enet_bd_init(struct net_device *dev)
939 struct fec_enet_private *fep = netdev_priv(dev);
940 struct fec_enet_priv_tx_q *txq;
941 struct fec_enet_priv_rx_q *rxq;
946 for (q = 0; q < fep->num_rx_queues; q++) {
947 /* Initialize the receive buffer descriptors. */
948 rxq = fep->rx_queue[q];
951 for (i = 0; i < rxq->bd.ring_size; i++) {
953 /* Initialize the BD for every fragment in the page. */
954 if (bdp->cbd_bufaddr)
955 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
957 bdp->cbd_sc = cpu_to_fec16(0);
958 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
961 /* Set the last buffer to wrap */
962 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
963 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
965 rxq->bd.cur = rxq->bd.base;
968 for (q = 0; q < fep->num_tx_queues; q++) {
969 /* ...and the same for transmit */
970 txq = fep->tx_queue[q];
974 for (i = 0; i < txq->bd.ring_size; i++) {
975 /* Initialize the BD for every fragment in the page. */
976 bdp->cbd_sc = cpu_to_fec16(0);
977 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
978 if (bdp->cbd_bufaddr &&
979 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
980 dma_unmap_single(&fep->pdev->dev,
981 fec32_to_cpu(bdp->cbd_bufaddr),
982 fec16_to_cpu(bdp->cbd_datlen),
984 if (txq->tx_buf[i].buf_p)
985 dev_kfree_skb_any(txq->tx_buf[i].buf_p);
986 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
987 if (bdp->cbd_bufaddr)
988 dma_unmap_single(&fep->pdev->dev,
989 fec32_to_cpu(bdp->cbd_bufaddr),
990 fec16_to_cpu(bdp->cbd_datlen),
993 if (txq->tx_buf[i].buf_p)
994 xdp_return_frame(txq->tx_buf[i].buf_p);
996 struct page *page = txq->tx_buf[i].buf_p;
999 page_pool_put_page(page->pp, page, 0, false);
1002 txq->tx_buf[i].buf_p = NULL;
1003 /* restore default tx buffer type: FEC_TXBUF_T_SKB */
1004 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
1005 bdp->cbd_bufaddr = cpu_to_fec32(0);
1006 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1009 /* Set the last buffer to wrap */
1010 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
1011 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
1012 txq->dirty_tx = bdp;
1016 static void fec_enet_active_rxring(struct net_device *ndev)
1018 struct fec_enet_private *fep = netdev_priv(ndev);
1021 for (i = 0; i < fep->num_rx_queues; i++)
1022 writel(0, fep->rx_queue[i]->bd.reg_desc_active);
1025 static void fec_enet_enable_ring(struct net_device *ndev)
1027 struct fec_enet_private *fep = netdev_priv(ndev);
1028 struct fec_enet_priv_tx_q *txq;
1029 struct fec_enet_priv_rx_q *rxq;
1032 for (i = 0; i < fep->num_rx_queues; i++) {
1033 rxq = fep->rx_queue[i];
1034 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
1035 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
1039 writel(RCMR_MATCHEN | RCMR_CMP(i),
1040 fep->hwp + FEC_RCMR(i));
1043 for (i = 0; i < fep->num_tx_queues; i++) {
1044 txq = fep->tx_queue[i];
1045 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
1049 writel(DMA_CLASS_EN | IDLE_SLOPE(i),
1050 fep->hwp + FEC_DMA_CFG(i));
1055 * This function is called to start or restart the FEC during a link
1056 * change, transmit timeout, or to reconfigure the FEC. The network
1057 * packet processing for this device must be stopped before this call.
1060 fec_restart(struct net_device *ndev)
1062 struct fec_enet_private *fep = netdev_priv(ndev);
1064 u32 rcntl = OPT_FRAME_SIZE | 0x04;
1065 u32 ecntl = 0x2; /* ETHEREN */
1067 /* Whack a reset. We should wait for this.
1068 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1069 * instead of reset MAC itself.
1071 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
1072 ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
1073 writel(0, fep->hwp + FEC_ECNTRL);
1075 writel(1, fep->hwp + FEC_ECNTRL);
1080 * enet-mac reset will reset mac address registers too,
1081 * so need to reconfigure it.
1083 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1084 writel((__force u32)cpu_to_be32(temp_mac[0]),
1085 fep->hwp + FEC_ADDR_LOW);
1086 writel((__force u32)cpu_to_be32(temp_mac[1]),
1087 fep->hwp + FEC_ADDR_HIGH);
1089 /* Clear any outstanding interrupt, except MDIO. */
1090 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
1092 fec_enet_bd_init(ndev);
1094 fec_enet_enable_ring(ndev);
1096 /* Enable MII mode */
1097 if (fep->full_duplex == DUPLEX_FULL) {
1099 writel(0x04, fep->hwp + FEC_X_CNTRL);
1101 /* No Rcv on Xmit */
1103 writel(0x0, fep->hwp + FEC_X_CNTRL);
1107 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1109 #if !defined(CONFIG_M5272)
1110 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1111 u32 val = readl(fep->hwp + FEC_RACC);
1113 /* align IP header */
1114 val |= FEC_RACC_SHIFT16;
1115 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1116 /* set RX checksum */
1117 val |= FEC_RACC_OPTIONS;
1119 val &= ~FEC_RACC_OPTIONS;
1120 writel(val, fep->hwp + FEC_RACC);
1121 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1126 * The phy interface and speed need to get configured
1127 * differently on enet-mac.
1129 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1130 /* Enable flow control and length check */
1131 rcntl |= 0x40000000 | 0x00000020;
1133 /* RGMII, RMII or MII */
1134 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
1135 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1136 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
1137 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
1139 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1144 /* 1G, 100M or 10M */
1146 if (ndev->phydev->speed == SPEED_1000)
1148 else if (ndev->phydev->speed == SPEED_100)
1154 #ifdef FEC_MIIGSK_ENR
1155 if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1157 /* disable the gasket and wait */
1158 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1159 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1163 * configure the gasket:
1164 * RMII, 50 MHz, no loopback, no echo
1165 * MII, 25 MHz, no loopback, no echo
1167 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1168 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1169 if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1170 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1171 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1173 /* re-enable the gasket */
1174 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1179 #if !defined(CONFIG_M5272)
1180 /* enable pause frame*/
1181 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1182 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1183 ndev->phydev && ndev->phydev->pause)) {
1184 rcntl |= FEC_ENET_FCE;
1186 /* set FIFO threshold parameter to reduce overrun */
1187 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1188 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1189 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1190 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1193 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1195 rcntl &= ~FEC_ENET_FCE;
1197 #endif /* !defined(CONFIG_M5272) */
1199 writel(rcntl, fep->hwp + FEC_R_CNTRL);
1201 /* Setup multicast filter. */
1202 set_multicast_list(ndev);
1203 #ifndef CONFIG_M5272
1204 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1205 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1208 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1209 /* enable ENET endian swap */
1211 /* enable ENET store and forward mode */
1212 writel(1 << 8, fep->hwp + FEC_X_WMRK);
1215 if (fep->bufdesc_ex)
1218 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1220 ecntl |= FEC_ENET_TXC_DLY;
1221 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1223 ecntl |= FEC_ENET_RXC_DLY;
1225 #ifndef CONFIG_M5272
1226 /* Enable the MIB statistic event counters */
1227 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1230 /* And last, enable the transmit and receive processing */
1231 writel(ecntl, fep->hwp + FEC_ECNTRL);
1232 fec_enet_active_rxring(ndev);
1234 if (fep->bufdesc_ex)
1235 fec_ptp_start_cyclecounter(ndev);
1237 /* Enable interrupts we wish to service */
1239 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1241 writel(0, fep->hwp + FEC_IMASK);
1243 /* Init the interrupt coalescing */
1244 if (fep->quirks & FEC_QUIRK_HAS_COALESCE)
1245 fec_enet_itr_coal_set(ndev);
1248 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
1250 if (!(of_machine_is_compatible("fsl,imx8qm") ||
1251 of_machine_is_compatible("fsl,imx8qxp") ||
1252 of_machine_is_compatible("fsl,imx8dxl")))
1255 return imx_scu_get_handle(&fep->ipc_handle);
1258 static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
1260 struct device_node *np = fep->pdev->dev.of_node;
1264 if (!np || !fep->ipc_handle)
1267 idx = of_alias_get_id(np, "ethernet");
1270 rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
1272 val = enabled ? 1 : 0;
1273 imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
1276 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
1278 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1279 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
1281 if (stop_gpr->gpr) {
1283 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1285 BIT(stop_gpr->bit));
1287 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1288 BIT(stop_gpr->bit), 0);
1289 } else if (pdata && pdata->sleep_mode_enable) {
1290 pdata->sleep_mode_enable(enabled);
1292 fec_enet_ipg_stop_set(fep, enabled);
1296 static void fec_irqs_disable(struct net_device *ndev)
1298 struct fec_enet_private *fep = netdev_priv(ndev);
1300 writel(0, fep->hwp + FEC_IMASK);
1303 static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
1305 struct fec_enet_private *fep = netdev_priv(ndev);
1307 writel(0, fep->hwp + FEC_IMASK);
1308 writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1312 fec_stop(struct net_device *ndev)
1314 struct fec_enet_private *fep = netdev_priv(ndev);
1315 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
1318 /* We cannot expect a graceful transmit stop without link !!! */
1320 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1322 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1323 netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1326 /* Whack a reset. We should wait for this.
1327 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1328 * instead of reset MAC itself.
1330 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1331 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
1332 writel(0, fep->hwp + FEC_ECNTRL);
1334 writel(1, fep->hwp + FEC_ECNTRL);
1338 val = readl(fep->hwp + FEC_ECNTRL);
1339 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1340 writel(val, fep->hwp + FEC_ECNTRL);
1342 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1343 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1345 /* We have to keep ENET enabled to have MII interrupt stay working */
1346 if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1347 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1348 writel(2, fep->hwp + FEC_ECNTRL);
1349 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1355 fec_timeout(struct net_device *ndev, unsigned int txqueue)
1357 struct fec_enet_private *fep = netdev_priv(ndev);
1361 ndev->stats.tx_errors++;
1363 schedule_work(&fep->tx_timeout_work);
1366 static void fec_enet_timeout_work(struct work_struct *work)
1368 struct fec_enet_private *fep =
1369 container_of(work, struct fec_enet_private, tx_timeout_work);
1370 struct net_device *ndev = fep->netdev;
1373 if (netif_device_present(ndev) || netif_running(ndev)) {
1374 napi_disable(&fep->napi);
1375 netif_tx_lock_bh(ndev);
1377 netif_tx_wake_all_queues(ndev);
1378 netif_tx_unlock_bh(ndev);
1379 napi_enable(&fep->napi);
1385 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1386 struct skb_shared_hwtstamps *hwtstamps)
1388 unsigned long flags;
1391 spin_lock_irqsave(&fep->tmreg_lock, flags);
1392 ns = timecounter_cyc2time(&fep->tc, ts);
1393 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1395 memset(hwtstamps, 0, sizeof(*hwtstamps));
1396 hwtstamps->hwtstamp = ns_to_ktime(ns);
1400 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
1402 struct fec_enet_private *fep;
1403 struct xdp_frame *xdpf;
1404 struct bufdesc *bdp;
1405 unsigned short status;
1406 struct sk_buff *skb;
1407 struct fec_enet_priv_tx_q *txq;
1408 struct netdev_queue *nq;
1414 fep = netdev_priv(ndev);
1416 txq = fep->tx_queue[queue_id];
1417 /* get next bdp of dirty_tx */
1418 nq = netdev_get_tx_queue(ndev, queue_id);
1419 bdp = txq->dirty_tx;
1421 /* get next bdp of dirty_tx */
1422 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1424 while (bdp != READ_ONCE(txq->bd.cur)) {
1425 /* Order the load of bd.cur and cbd_sc */
1427 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1428 if (status & BD_ENET_TX_READY)
1431 index = fec_enet_get_bd_index(bdp, &txq->bd);
1433 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1434 skb = txq->tx_buf[index].buf_p;
1435 if (bdp->cbd_bufaddr &&
1436 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1437 dma_unmap_single(&fep->pdev->dev,
1438 fec32_to_cpu(bdp->cbd_bufaddr),
1439 fec16_to_cpu(bdp->cbd_datlen),
1441 bdp->cbd_bufaddr = cpu_to_fec32(0);
1445 /* Tx processing cannot call any XDP (or page pool) APIs if
1446 * the "budget" is 0. Because NAPI is called with budget of
1447 * 0 (such as netpoll) indicates we may be in an IRQ context,
1448 * however, we can't use the page pool from IRQ context.
1450 if (unlikely(!budget))
1453 if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1454 xdpf = txq->tx_buf[index].buf_p;
1455 if (bdp->cbd_bufaddr)
1456 dma_unmap_single(&fep->pdev->dev,
1457 fec32_to_cpu(bdp->cbd_bufaddr),
1458 fec16_to_cpu(bdp->cbd_datlen),
1461 page = txq->tx_buf[index].buf_p;
1464 bdp->cbd_bufaddr = cpu_to_fec32(0);
1465 if (unlikely(!txq->tx_buf[index].buf_p)) {
1466 txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1470 frame_len = fec16_to_cpu(bdp->cbd_datlen);
1473 /* Check for errors. */
1474 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1475 BD_ENET_TX_RL | BD_ENET_TX_UN |
1477 ndev->stats.tx_errors++;
1478 if (status & BD_ENET_TX_HB) /* No heartbeat */
1479 ndev->stats.tx_heartbeat_errors++;
1480 if (status & BD_ENET_TX_LC) /* Late collision */
1481 ndev->stats.tx_window_errors++;
1482 if (status & BD_ENET_TX_RL) /* Retrans limit */
1483 ndev->stats.tx_aborted_errors++;
1484 if (status & BD_ENET_TX_UN) /* Underrun */
1485 ndev->stats.tx_fifo_errors++;
1486 if (status & BD_ENET_TX_CSL) /* Carrier lost */
1487 ndev->stats.tx_carrier_errors++;
1489 ndev->stats.tx_packets++;
1491 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
1492 ndev->stats.tx_bytes += skb->len;
1494 ndev->stats.tx_bytes += frame_len;
1497 /* Deferred means some collisions occurred during transmit,
1498 * but we eventually sent the packet OK.
1500 if (status & BD_ENET_TX_DEF)
1501 ndev->stats.collisions++;
1503 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1504 /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1505 * are to time stamp the packet, so we still need to check time
1506 * stamping enabled flag.
1508 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
1509 fep->hwts_tx_en) && fep->bufdesc_ex) {
1510 struct skb_shared_hwtstamps shhwtstamps;
1511 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1513 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1514 skb_tstamp_tx(skb, &shhwtstamps);
1517 /* Free the sk buffer associated with this last transmit */
1518 napi_consume_skb(skb, budget);
1519 } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1520 xdp_return_frame_rx_napi(xdpf);
1521 } else { /* recycle pages of XDP_TX frames */
1522 /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
1523 page_pool_put_page(page->pp, page, 0, true);
1526 txq->tx_buf[index].buf_p = NULL;
1527 /* restore default tx buffer type: FEC_TXBUF_T_SKB */
1528 txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1531 /* Make sure the update to bdp and tx_buf are performed
1535 txq->dirty_tx = bdp;
1537 /* Update pointer to next buffer descriptor to be transmitted */
1538 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1540 /* Since we have freed up a buffer, the ring is no longer full
1542 if (netif_tx_queue_stopped(nq)) {
1543 entries_free = fec_enet_get_free_txdesc_num(txq);
1544 if (entries_free >= txq->tx_wake_threshold)
1545 netif_tx_wake_queue(nq);
1549 /* ERR006358: Keep the transmitter going */
1550 if (bdp != txq->bd.cur &&
1551 readl(txq->bd.reg_desc_active) == 0)
1552 writel(0, txq->bd.reg_desc_active);
1555 static void fec_enet_tx(struct net_device *ndev, int budget)
1557 struct fec_enet_private *fep = netdev_priv(ndev);
1560 /* Make sure that AVB queues are processed first. */
1561 for (i = fep->num_tx_queues - 1; i >= 0; i--)
1562 fec_enet_tx_queue(ndev, i, budget);
1565 static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
1566 struct bufdesc *bdp, int index)
1568 struct page *new_page;
1569 dma_addr_t phys_addr;
1571 new_page = page_pool_dev_alloc_pages(rxq->page_pool);
1573 rxq->rx_skb_info[index].page = new_page;
1575 rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
1576 phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
1577 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
1581 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
1582 struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
1584 unsigned int sync, len = xdp->data_end - xdp->data;
1585 u32 ret = FEC_ENET_XDP_PASS;
1590 act = bpf_prog_run_xdp(prog, xdp);
1592 /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
1595 sync = xdp->data_end - xdp->data;
1596 sync = max(sync, len);
1600 rxq->stats[RX_XDP_PASS]++;
1601 ret = FEC_ENET_XDP_PASS;
1605 rxq->stats[RX_XDP_REDIRECT]++;
1606 err = xdp_do_redirect(fep->netdev, xdp, prog);
1610 ret = FEC_ENET_XDP_REDIR;
1614 rxq->stats[RX_XDP_TX]++;
1615 err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
1616 if (unlikely(err)) {
1617 rxq->stats[RX_XDP_TX_ERRORS]++;
1621 ret = FEC_ENET_XDP_TX;
1625 bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
1629 fallthrough; /* handle aborts by dropping packet */
1632 rxq->stats[RX_XDP_DROP]++;
1634 ret = FEC_ENET_XDP_CONSUMED;
1635 page = virt_to_head_page(xdp->data);
1636 page_pool_put_page(rxq->page_pool, page, sync, true);
1637 if (act != XDP_DROP)
1638 trace_xdp_exception(fep->netdev, prog, act);
1645 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1646 * When we update through the ring, if the next incoming buffer has
1647 * not been given to the system, we just set the empty indicator,
1648 * effectively tossing the packet.
1651 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1653 struct fec_enet_private *fep = netdev_priv(ndev);
1654 struct fec_enet_priv_rx_q *rxq;
1655 struct bufdesc *bdp;
1656 unsigned short status;
1657 struct sk_buff *skb;
1660 int pkt_received = 0;
1661 struct bufdesc_ex *ebdp = NULL;
1662 bool vlan_packet_rcvd = false;
1665 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1666 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
1667 u32 ret, xdp_result = FEC_ENET_XDP_PASS;
1668 u32 data_start = FEC_ENET_XDP_HEADROOM;
1669 int cpu = smp_processor_id();
1670 struct xdp_buff xdp;
1674 #if !defined(CONFIG_M5272)
1675 /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
1676 * FEC_RACC_SHIFT16 is set by default in the probe function.
1678 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1684 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
1686 * Hacky flush of all caches instead of using the DMA API for the TSO
1691 rxq = fep->rx_queue[queue_id];
1693 /* First, grab all of the stats for the incoming packet.
1694 * These get messed up if we get called due to a busy condition.
1697 xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
1699 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1701 if (pkt_received >= budget)
1705 writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
1707 /* Check for errors. */
1708 status ^= BD_ENET_RX_LAST;
1709 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1710 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
1712 ndev->stats.rx_errors++;
1713 if (status & BD_ENET_RX_OV) {
1715 ndev->stats.rx_fifo_errors++;
1716 goto rx_processing_done;
1718 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
1719 | BD_ENET_RX_LAST)) {
1720 /* Frame too long or too short. */
1721 ndev->stats.rx_length_errors++;
1722 if (status & BD_ENET_RX_LAST)
1723 netdev_err(ndev, "rcv is not +last\n");
1725 if (status & BD_ENET_RX_CR) /* CRC Error */
1726 ndev->stats.rx_crc_errors++;
1727 /* Report late collisions as a frame error. */
1728 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
1729 ndev->stats.rx_frame_errors++;
1730 goto rx_processing_done;
1733 /* Process the incoming frame. */
1734 ndev->stats.rx_packets++;
1735 pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1736 ndev->stats.rx_bytes += pkt_len;
1738 index = fec_enet_get_bd_index(bdp, &rxq->bd);
1739 page = rxq->rx_skb_info[index].page;
1740 dma_sync_single_for_cpu(&fep->pdev->dev,
1741 fec32_to_cpu(bdp->cbd_bufaddr),
1744 prefetch(page_address(page));
1745 fec_enet_update_cbd(rxq, bdp, index);
1748 xdp_buff_clear_frags_flag(&xdp);
1749 /* subtract 16bit shift and FCS */
1750 xdp_prepare_buff(&xdp, page_address(page),
1751 data_start, pkt_len - sub_len, false);
1752 ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
1754 if (ret != FEC_ENET_XDP_PASS)
1755 goto rx_processing_done;
1758 /* The packet length includes FCS, but we don't want to
1759 * include that when passing upstream as it messes up
1760 * bridging applications.
1762 skb = build_skb(page_address(page), PAGE_SIZE);
1763 if (unlikely(!skb)) {
1764 page_pool_recycle_direct(rxq->page_pool, page);
1765 ndev->stats.rx_dropped++;
1767 netdev_err_once(ndev, "build_skb failed!\n");
1768 goto rx_processing_done;
1771 skb_reserve(skb, data_start);
1772 skb_put(skb, pkt_len - sub_len);
1773 skb_mark_for_recycle(skb);
1775 if (unlikely(need_swap)) {
1776 data = page_address(page) + FEC_ENET_XDP_HEADROOM;
1777 swap_buffer(data, pkt_len);
1781 /* Extract the enhanced buffer descriptor */
1783 if (fep->bufdesc_ex)
1784 ebdp = (struct bufdesc_ex *)bdp;
1786 /* If this is a VLAN packet remove the VLAN Tag */
1787 vlan_packet_rcvd = false;
1788 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1790 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1791 /* Push and remove the vlan tag */
1792 struct vlan_hdr *vlan_header =
1793 (struct vlan_hdr *) (data + ETH_HLEN);
1794 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1796 vlan_packet_rcvd = true;
1798 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1799 skb_pull(skb, VLAN_HLEN);
1802 skb->protocol = eth_type_trans(skb, ndev);
1804 /* Get receive timestamp from the skb */
1805 if (fep->hwts_rx_en && fep->bufdesc_ex)
1806 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1807 skb_hwtstamps(skb));
1809 if (fep->bufdesc_ex &&
1810 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1811 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1812 /* don't check it */
1813 skb->ip_summed = CHECKSUM_UNNECESSARY;
1815 skb_checksum_none_assert(skb);
1819 /* Handle received VLAN packets */
1820 if (vlan_packet_rcvd)
1821 __vlan_hwaccel_put_tag(skb,
1825 skb_record_rx_queue(skb, queue_id);
1826 napi_gro_receive(&fep->napi, skb);
1829 /* Clear the status flags for this buffer */
1830 status &= ~BD_ENET_RX_STATS;
1832 /* Mark the buffer empty */
1833 status |= BD_ENET_RX_EMPTY;
1835 if (fep->bufdesc_ex) {
1836 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1838 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1842 /* Make sure the updates to rest of the descriptor are
1843 * performed before transferring ownership.
1846 bdp->cbd_sc = cpu_to_fec16(status);
1848 /* Update BD pointer to next entry */
1849 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1851 /* Doing this here will keep the FEC running while we process
1852 * incoming frames. On a heavily loaded network, we should be
1853 * able to keep up at the expense of system resources.
1855 writel(0, rxq->bd.reg_desc_active);
1859 if (xdp_result & FEC_ENET_XDP_REDIR)
1862 return pkt_received;
1865 static int fec_enet_rx(struct net_device *ndev, int budget)
1867 struct fec_enet_private *fep = netdev_priv(ndev);
1870 /* Make sure that AVB queues are processed first. */
1871 for (i = fep->num_rx_queues - 1; i >= 0; i--)
1872 done += fec_enet_rx_queue(ndev, budget - done, i);
1877 static bool fec_enet_collect_events(struct fec_enet_private *fep)
1881 int_events = readl(fep->hwp + FEC_IEVENT);
1883 /* Don't clear MDIO events, we poll for those */
1884 int_events &= ~FEC_ENET_MII;
1886 writel(int_events, fep->hwp + FEC_IEVENT);
1888 return int_events != 0;
1892 fec_enet_interrupt(int irq, void *dev_id)
1894 struct net_device *ndev = dev_id;
1895 struct fec_enet_private *fep = netdev_priv(ndev);
1896 irqreturn_t ret = IRQ_NONE;
1898 if (fec_enet_collect_events(fep) && fep->link) {
1901 if (napi_schedule_prep(&fep->napi)) {
1902 /* Disable interrupts */
1903 writel(0, fep->hwp + FEC_IMASK);
1904 __napi_schedule(&fep->napi);
1911 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1913 struct net_device *ndev = napi->dev;
1914 struct fec_enet_private *fep = netdev_priv(ndev);
1918 done += fec_enet_rx(ndev, budget - done);
1919 fec_enet_tx(ndev, budget);
1920 } while ((done < budget) && fec_enet_collect_events(fep));
1922 if (done < budget) {
1923 napi_complete_done(napi, done);
1924 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1930 /* ------------------------------------------------------------------------- */
1931 static int fec_get_mac(struct net_device *ndev)
1933 struct fec_enet_private *fep = netdev_priv(ndev);
1934 unsigned char *iap, tmpaddr[ETH_ALEN];
1938 * try to get mac address in following order:
1940 * 1) module parameter via kernel command line in form
1941 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1946 * 2) from device tree data
1948 if (!is_valid_ether_addr(iap)) {
1949 struct device_node *np = fep->pdev->dev.of_node;
1951 ret = of_get_mac_address(np, tmpaddr);
1954 else if (ret == -EPROBE_DEFER)
1960 * 3) from flash or fuse (via platform data)
1962 if (!is_valid_ether_addr(iap)) {
1965 iap = (unsigned char *)FEC_FLASHMAC;
1967 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1970 iap = (unsigned char *)&pdata->mac;
1975 * 4) FEC mac registers set by bootloader
1977 if (!is_valid_ether_addr(iap)) {
1978 *((__be32 *) &tmpaddr[0]) =
1979 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1980 *((__be16 *) &tmpaddr[4]) =
1981 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1986 * 5) random mac address
1988 if (!is_valid_ether_addr(iap)) {
1989 /* Report it and use a random ethernet address instead */
1990 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
1991 eth_hw_addr_random(ndev);
1992 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
1997 /* Adjust MAC if using macaddr */
1998 eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
2003 /* ------------------------------------------------------------------------- */
2008 static void fec_enet_adjust_link(struct net_device *ndev)
2010 struct fec_enet_private *fep = netdev_priv(ndev);
2011 struct phy_device *phy_dev = ndev->phydev;
2012 int status_change = 0;
2015 * If the netdev is down, or is going down, we're not interested
2016 * in link state events, so just mark our idea of the link as down
2017 * and ignore the event.
2019 if (!netif_running(ndev) || !netif_device_present(ndev)) {
2021 } else if (phy_dev->link) {
2023 fep->link = phy_dev->link;
2027 if (fep->full_duplex != phy_dev->duplex) {
2028 fep->full_duplex = phy_dev->duplex;
2032 if (phy_dev->speed != fep->speed) {
2033 fep->speed = phy_dev->speed;
2037 /* if any of the above changed restart the FEC */
2038 if (status_change) {
2039 napi_disable(&fep->napi);
2040 netif_tx_lock_bh(ndev);
2042 netif_tx_wake_all_queues(ndev);
2043 netif_tx_unlock_bh(ndev);
2044 napi_enable(&fep->napi);
2048 napi_disable(&fep->napi);
2049 netif_tx_lock_bh(ndev);
2051 netif_tx_unlock_bh(ndev);
2052 napi_enable(&fep->napi);
2053 fep->link = phy_dev->link;
2059 phy_print_status(phy_dev);
2062 static int fec_enet_mdio_wait(struct fec_enet_private *fep)
2067 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
2068 ievent & FEC_ENET_MII, 2, 30000);
2071 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2076 static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
2078 struct fec_enet_private *fep = bus->priv;
2079 struct device *dev = &fep->pdev->dev;
2080 int ret = 0, frame_start, frame_addr, frame_op;
2082 ret = pm_runtime_resume_and_get(dev);
2087 frame_op = FEC_MMFR_OP_READ;
2088 frame_start = FEC_MMFR_ST;
2089 frame_addr = regnum;
2091 /* start a read op */
2092 writel(frame_start | frame_op |
2093 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2094 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2096 /* wait for end of transfer */
2097 ret = fec_enet_mdio_wait(fep);
2099 netdev_err(fep->netdev, "MDIO read timeout\n");
2103 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2106 pm_runtime_mark_last_busy(dev);
2107 pm_runtime_put_autosuspend(dev);
2112 static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
2113 int devad, int regnum)
2115 struct fec_enet_private *fep = bus->priv;
2116 struct device *dev = &fep->pdev->dev;
2117 int ret = 0, frame_start, frame_op;
2119 ret = pm_runtime_resume_and_get(dev);
2123 frame_start = FEC_MMFR_ST_C45;
2126 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2127 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2128 FEC_MMFR_TA | (regnum & 0xFFFF),
2129 fep->hwp + FEC_MII_DATA);
2131 /* wait for end of transfer */
2132 ret = fec_enet_mdio_wait(fep);
2134 netdev_err(fep->netdev, "MDIO address write timeout\n");
2138 frame_op = FEC_MMFR_OP_READ_C45;
2140 /* start a read op */
2141 writel(frame_start | frame_op |
2142 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2143 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2145 /* wait for end of transfer */
2146 ret = fec_enet_mdio_wait(fep);
2148 netdev_err(fep->netdev, "MDIO read timeout\n");
2152 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2155 pm_runtime_mark_last_busy(dev);
2156 pm_runtime_put_autosuspend(dev);
2161 static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
2164 struct fec_enet_private *fep = bus->priv;
2165 struct device *dev = &fep->pdev->dev;
2166 int ret, frame_start, frame_addr;
2168 ret = pm_runtime_resume_and_get(dev);
2173 frame_start = FEC_MMFR_ST;
2174 frame_addr = regnum;
2176 /* start a write op */
2177 writel(frame_start | FEC_MMFR_OP_WRITE |
2178 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2179 FEC_MMFR_TA | FEC_MMFR_DATA(value),
2180 fep->hwp + FEC_MII_DATA);
2182 /* wait for end of transfer */
2183 ret = fec_enet_mdio_wait(fep);
2185 netdev_err(fep->netdev, "MDIO write timeout\n");
2187 pm_runtime_mark_last_busy(dev);
2188 pm_runtime_put_autosuspend(dev);
2193 static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
2194 int devad, int regnum, u16 value)
2196 struct fec_enet_private *fep = bus->priv;
2197 struct device *dev = &fep->pdev->dev;
2198 int ret, frame_start;
2200 ret = pm_runtime_resume_and_get(dev);
2204 frame_start = FEC_MMFR_ST_C45;
2207 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2208 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2209 FEC_MMFR_TA | (regnum & 0xFFFF),
2210 fep->hwp + FEC_MII_DATA);
2212 /* wait for end of transfer */
2213 ret = fec_enet_mdio_wait(fep);
2215 netdev_err(fep->netdev, "MDIO address write timeout\n");
2219 /* start a write op */
2220 writel(frame_start | FEC_MMFR_OP_WRITE |
2221 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2222 FEC_MMFR_TA | FEC_MMFR_DATA(value),
2223 fep->hwp + FEC_MII_DATA);
2225 /* wait for end of transfer */
2226 ret = fec_enet_mdio_wait(fep);
2228 netdev_err(fep->netdev, "MDIO write timeout\n");
2231 pm_runtime_mark_last_busy(dev);
2232 pm_runtime_put_autosuspend(dev);
2237 static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
2239 struct fec_enet_private *fep = netdev_priv(ndev);
2240 struct phy_device *phy_dev = ndev->phydev;
2243 phy_reset_after_clk_enable(phy_dev);
2244 } else if (fep->phy_node) {
2246 * If the PHY still is not bound to the MAC, but there is
2247 * OF PHY node and a matching PHY device instance already,
2248 * use the OF PHY node to obtain the PHY device instance,
2249 * and then use that PHY device instance when triggering
2252 phy_dev = of_phy_find_device(fep->phy_node);
2253 phy_reset_after_clk_enable(phy_dev);
2254 put_device(&phy_dev->mdio.dev);
2258 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
2260 struct fec_enet_private *fep = netdev_priv(ndev);
2264 ret = clk_prepare_enable(fep->clk_enet_out);
2269 mutex_lock(&fep->ptp_clk_mutex);
2270 ret = clk_prepare_enable(fep->clk_ptp);
2272 mutex_unlock(&fep->ptp_clk_mutex);
2273 goto failed_clk_ptp;
2275 fep->ptp_clk_on = true;
2277 mutex_unlock(&fep->ptp_clk_mutex);
2280 ret = clk_prepare_enable(fep->clk_ref);
2282 goto failed_clk_ref;
2284 ret = clk_prepare_enable(fep->clk_2x_txclk);
2286 goto failed_clk_2x_txclk;
2288 fec_enet_phy_reset_after_clk_enable(ndev);
2290 clk_disable_unprepare(fep->clk_enet_out);
2292 mutex_lock(&fep->ptp_clk_mutex);
2293 clk_disable_unprepare(fep->clk_ptp);
2294 fep->ptp_clk_on = false;
2295 mutex_unlock(&fep->ptp_clk_mutex);
2297 clk_disable_unprepare(fep->clk_ref);
2298 clk_disable_unprepare(fep->clk_2x_txclk);
2303 failed_clk_2x_txclk:
2305 clk_disable_unprepare(fep->clk_ref);
2308 mutex_lock(&fep->ptp_clk_mutex);
2309 clk_disable_unprepare(fep->clk_ptp);
2310 fep->ptp_clk_on = false;
2311 mutex_unlock(&fep->ptp_clk_mutex);
2314 clk_disable_unprepare(fep->clk_enet_out);
2319 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
2320 struct device_node *np)
2322 u32 rgmii_tx_delay, rgmii_rx_delay;
2324 /* For rgmii tx internal delay, valid values are 0ps and 2000ps */
2325 if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) {
2326 if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) {
2327 dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps");
2329 } else if (rgmii_tx_delay == 2000) {
2330 fep->rgmii_txc_dly = true;
2334 /* For rgmii rx internal delay, valid values are 0ps and 2000ps */
2335 if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) {
2336 if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) {
2337 dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps");
2339 } else if (rgmii_rx_delay == 2000) {
2340 fep->rgmii_rxc_dly = true;
2347 static int fec_enet_mii_probe(struct net_device *ndev)
2349 struct fec_enet_private *fep = netdev_priv(ndev);
2350 struct phy_device *phy_dev = NULL;
2351 char mdio_bus_id[MII_BUS_ID_SIZE];
2352 char phy_name[MII_BUS_ID_SIZE + 3];
2354 int dev_id = fep->dev_id;
2356 if (fep->phy_node) {
2357 phy_dev = of_phy_connect(ndev, fep->phy_node,
2358 &fec_enet_adjust_link, 0,
2359 fep->phy_interface);
2361 netdev_err(ndev, "Unable to connect to phy\n");
2365 /* check for attached phy */
2366 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
2367 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
2371 strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
2375 if (phy_id >= PHY_MAX_ADDR) {
2376 netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
2377 strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
2381 snprintf(phy_name, sizeof(phy_name),
2382 PHY_ID_FMT, mdio_bus_id, phy_id);
2383 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
2384 fep->phy_interface);
2387 if (IS_ERR(phy_dev)) {
2388 netdev_err(ndev, "could not attach to PHY\n");
2389 return PTR_ERR(phy_dev);
2392 /* mask with MAC supported features */
2393 if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
2394 phy_set_max_speed(phy_dev, 1000);
2395 phy_remove_link_mode(phy_dev,
2396 ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2397 #if !defined(CONFIG_M5272)
2398 phy_support_sym_pause(phy_dev);
2402 phy_set_max_speed(phy_dev, 100);
2405 fep->full_duplex = 0;
2407 phy_dev->mac_managed_pm = true;
2409 phy_attached_info(phy_dev);
2414 static int fec_enet_mii_init(struct platform_device *pdev)
2416 static struct mii_bus *fec0_mii_bus;
2417 struct net_device *ndev = platform_get_drvdata(pdev);
2418 struct fec_enet_private *fep = netdev_priv(ndev);
2419 bool suppress_preamble = false;
2420 struct device_node *node;
2422 u32 mii_speed, holdtime;
2426 * The i.MX28 dual fec interfaces are not equal.
2427 * Here are the differences:
2429 * - fec0 supports MII & RMII modes while fec1 only supports RMII
2430 * - fec0 acts as the 1588 time master while fec1 is slave
2431 * - external phys can only be configured by fec0
2433 * That is to say fec1 can not work independently. It only works
2434 * when fec0 is working. The reason behind this design is that the
2435 * second interface is added primarily for Switch mode.
2437 * Because of the last point above, both phys are attached on fec0
2438 * mdio interface in board design, and need to be configured by
2441 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
2442 /* fec1 uses fec0 mii_bus */
2443 if (mii_cnt && fec0_mii_bus) {
2444 fep->mii_bus = fec0_mii_bus;
2451 bus_freq = 2500000; /* 2.5MHz by default */
2452 node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2454 of_property_read_u32(node, "clock-frequency", &bus_freq);
2455 suppress_preamble = of_property_read_bool(node,
2456 "suppress-preamble");
2460 * Set MII speed (= clk_get_rate() / 2 * phy_speed)
2462 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2463 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
2464 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2467 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
2468 if (fep->quirks & FEC_QUIRK_ENET_MAC)
2470 if (mii_speed > 63) {
2472 "fec clock (%lu) too fast to get right mii speed\n",
2473 clk_get_rate(fep->clk_ipg));
2479 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2480 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2481 * versions are RAZ there, so just ignore the difference and write the
2483 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2484 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2486 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2487 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2488 * holdtime cannot result in a value greater than 3.
2490 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2492 fep->phy_speed = mii_speed << 1 | holdtime << 8;
2494 if (suppress_preamble)
2495 fep->phy_speed |= BIT(7);
2497 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
2498 /* Clear MMFR to avoid to generate MII event by writing MSCR.
2499 * MII event generation condition:
2501 * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
2502 * mscr_reg_data_in[7:0] != 0
2504 * - mscr[7:0]_not_zero
2506 writel(0, fep->hwp + FEC_MII_DATA);
2509 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2511 /* Clear any pending transaction complete indication */
2512 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2514 fep->mii_bus = mdiobus_alloc();
2515 if (fep->mii_bus == NULL) {
2520 fep->mii_bus->name = "fec_enet_mii_bus";
2521 fep->mii_bus->read = fec_enet_mdio_read_c22;
2522 fep->mii_bus->write = fec_enet_mdio_write_c22;
2523 if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
2524 fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
2525 fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
2527 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2528 pdev->name, fep->dev_id + 1);
2529 fep->mii_bus->priv = fep;
2530 fep->mii_bus->parent = &pdev->dev;
2532 err = of_mdiobus_register(fep->mii_bus, node);
2534 goto err_out_free_mdiobus;
2539 /* save fec0 mii_bus */
2540 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2541 fec0_mii_bus = fep->mii_bus;
2545 err_out_free_mdiobus:
2546 mdiobus_free(fep->mii_bus);
2552 static void fec_enet_mii_remove(struct fec_enet_private *fep)
2554 if (--mii_cnt == 0) {
2555 mdiobus_unregister(fep->mii_bus);
2556 mdiobus_free(fep->mii_bus);
2560 static void fec_enet_get_drvinfo(struct net_device *ndev,
2561 struct ethtool_drvinfo *info)
2563 struct fec_enet_private *fep = netdev_priv(ndev);
2565 strscpy(info->driver, fep->pdev->dev.driver->name,
2566 sizeof(info->driver));
2567 strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2570 static int fec_enet_get_regs_len(struct net_device *ndev)
2572 struct fec_enet_private *fep = netdev_priv(ndev);
2576 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2578 s = resource_size(r);
2583 /* List of registers that can be safety be read to dump them with ethtool */
2584 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2585 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2586 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2587 static __u32 fec_enet_register_version = 2;
2588 static u32 fec_enet_register_offset[] = {
2589 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2590 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2591 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
2592 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
2593 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
2594 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
2595 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
2596 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
2597 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2598 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
2599 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
2600 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
2601 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2602 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2603 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2604 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2605 RMON_T_P_GTE2048, RMON_T_OCTETS,
2606 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2607 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2608 IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2609 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2610 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2611 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2612 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2613 RMON_R_P_GTE2048, RMON_R_OCTETS,
2614 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2615 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2618 static u32 fec_enet_register_offset_6ul[] = {
2619 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2620 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2621 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
2622 FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
2623 FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
2624 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2625 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
2626 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2627 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2628 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2629 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2630 RMON_T_P_GTE2048, RMON_T_OCTETS,
2631 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2632 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2633 IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2634 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2635 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2636 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2637 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2638 RMON_R_P_GTE2048, RMON_R_OCTETS,
2639 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2640 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2643 static __u32 fec_enet_register_version = 1;
2644 static u32 fec_enet_register_offset[] = {
2645 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2646 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
2647 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
2648 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
2649 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
2650 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
2651 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
2652 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
2653 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
2657 static void fec_enet_get_regs(struct net_device *ndev,
2658 struct ethtool_regs *regs, void *regbuf)
2660 struct fec_enet_private *fep = netdev_priv(ndev);
2661 u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2662 struct device *dev = &fep->pdev->dev;
2663 u32 *buf = (u32 *)regbuf;
2666 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2667 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2668 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2672 if (!of_machine_is_compatible("fsl,imx6ul")) {
2673 reg_list = fec_enet_register_offset;
2674 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2676 reg_list = fec_enet_register_offset_6ul;
2677 reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
2681 static u32 *reg_list = fec_enet_register_offset;
2682 static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2684 ret = pm_runtime_resume_and_get(dev);
2688 regs->version = fec_enet_register_version;
2690 memset(buf, 0, regs->len);
2692 for (i = 0; i < reg_cnt; i++) {
2695 if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
2696 !(fep->quirks & FEC_QUIRK_HAS_FRREG))
2700 buf[off] = readl(&theregs[off]);
2703 pm_runtime_mark_last_busy(dev);
2704 pm_runtime_put_autosuspend(dev);
2707 static int fec_enet_get_ts_info(struct net_device *ndev,
2708 struct ethtool_ts_info *info)
2710 struct fec_enet_private *fep = netdev_priv(ndev);
2712 if (fep->bufdesc_ex) {
2714 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2715 SOF_TIMESTAMPING_RX_SOFTWARE |
2716 SOF_TIMESTAMPING_SOFTWARE |
2717 SOF_TIMESTAMPING_TX_HARDWARE |
2718 SOF_TIMESTAMPING_RX_HARDWARE |
2719 SOF_TIMESTAMPING_RAW_HARDWARE;
2721 info->phc_index = ptp_clock_index(fep->ptp_clock);
2723 info->phc_index = -1;
2725 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2726 (1 << HWTSTAMP_TX_ON);
2728 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2729 (1 << HWTSTAMP_FILTER_ALL);
2732 return ethtool_op_get_ts_info(ndev, info);
2736 #if !defined(CONFIG_M5272)
2738 static void fec_enet_get_pauseparam(struct net_device *ndev,
2739 struct ethtool_pauseparam *pause)
2741 struct fec_enet_private *fep = netdev_priv(ndev);
2743 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2744 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2745 pause->rx_pause = pause->tx_pause;
2748 static int fec_enet_set_pauseparam(struct net_device *ndev,
2749 struct ethtool_pauseparam *pause)
2751 struct fec_enet_private *fep = netdev_priv(ndev);
2756 if (pause->tx_pause != pause->rx_pause) {
2758 "hardware only support enable/disable both tx and rx");
2762 fep->pause_flag = 0;
2764 /* tx pause must be same as rx pause */
2765 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2766 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2768 phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
2771 if (pause->autoneg) {
2772 if (netif_running(ndev))
2774 phy_start_aneg(ndev->phydev);
2776 if (netif_running(ndev)) {
2777 napi_disable(&fep->napi);
2778 netif_tx_lock_bh(ndev);
2780 netif_tx_wake_all_queues(ndev);
2781 netif_tx_unlock_bh(ndev);
2782 napi_enable(&fep->napi);
2788 static const struct fec_stat {
2789 char name[ETH_GSTRING_LEN];
2793 { "tx_dropped", RMON_T_DROP },
2794 { "tx_packets", RMON_T_PACKETS },
2795 { "tx_broadcast", RMON_T_BC_PKT },
2796 { "tx_multicast", RMON_T_MC_PKT },
2797 { "tx_crc_errors", RMON_T_CRC_ALIGN },
2798 { "tx_undersize", RMON_T_UNDERSIZE },
2799 { "tx_oversize", RMON_T_OVERSIZE },
2800 { "tx_fragment", RMON_T_FRAG },
2801 { "tx_jabber", RMON_T_JAB },
2802 { "tx_collision", RMON_T_COL },
2803 { "tx_64byte", RMON_T_P64 },
2804 { "tx_65to127byte", RMON_T_P65TO127 },
2805 { "tx_128to255byte", RMON_T_P128TO255 },
2806 { "tx_256to511byte", RMON_T_P256TO511 },
2807 { "tx_512to1023byte", RMON_T_P512TO1023 },
2808 { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2809 { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2810 { "tx_octets", RMON_T_OCTETS },
2813 { "IEEE_tx_drop", IEEE_T_DROP },
2814 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2815 { "IEEE_tx_1col", IEEE_T_1COL },
2816 { "IEEE_tx_mcol", IEEE_T_MCOL },
2817 { "IEEE_tx_def", IEEE_T_DEF },
2818 { "IEEE_tx_lcol", IEEE_T_LCOL },
2819 { "IEEE_tx_excol", IEEE_T_EXCOL },
2820 { "IEEE_tx_macerr", IEEE_T_MACERR },
2821 { "IEEE_tx_cserr", IEEE_T_CSERR },
2822 { "IEEE_tx_sqe", IEEE_T_SQE },
2823 { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2824 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2827 { "rx_packets", RMON_R_PACKETS },
2828 { "rx_broadcast", RMON_R_BC_PKT },
2829 { "rx_multicast", RMON_R_MC_PKT },
2830 { "rx_crc_errors", RMON_R_CRC_ALIGN },
2831 { "rx_undersize", RMON_R_UNDERSIZE },
2832 { "rx_oversize", RMON_R_OVERSIZE },
2833 { "rx_fragment", RMON_R_FRAG },
2834 { "rx_jabber", RMON_R_JAB },
2835 { "rx_64byte", RMON_R_P64 },
2836 { "rx_65to127byte", RMON_R_P65TO127 },
2837 { "rx_128to255byte", RMON_R_P128TO255 },
2838 { "rx_256to511byte", RMON_R_P256TO511 },
2839 { "rx_512to1023byte", RMON_R_P512TO1023 },
2840 { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2841 { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2842 { "rx_octets", RMON_R_OCTETS },
2845 { "IEEE_rx_drop", IEEE_R_DROP },
2846 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2847 { "IEEE_rx_crc", IEEE_R_CRC },
2848 { "IEEE_rx_align", IEEE_R_ALIGN },
2849 { "IEEE_rx_macerr", IEEE_R_MACERR },
2850 { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2851 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2854 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
2856 static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = {
2857 "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */
2858 "rx_xdp_pass", /* RX_XDP_PASS, */
2859 "rx_xdp_drop", /* RX_XDP_DROP, */
2860 "rx_xdp_tx", /* RX_XDP_TX, */
2861 "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */
2862 "tx_xdp_xmit", /* TX_XDP_XMIT, */
2863 "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */
2866 static void fec_enet_update_ethtool_stats(struct net_device *dev)
2868 struct fec_enet_private *fep = netdev_priv(dev);
2871 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2872 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2875 static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
2877 u64 xdp_stats[XDP_STATS_TOTAL] = { 0 };
2878 struct fec_enet_priv_rx_q *rxq;
2881 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2882 rxq = fep->rx_queue[i];
2884 for (j = 0; j < XDP_STATS_TOTAL; j++)
2885 xdp_stats[j] += rxq->stats[j];
2888 memcpy(data, xdp_stats, sizeof(xdp_stats));
2891 static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
2893 #ifdef CONFIG_PAGE_POOL_STATS
2894 struct page_pool_stats stats = {};
2895 struct fec_enet_priv_rx_q *rxq;
2898 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2899 rxq = fep->rx_queue[i];
2901 if (!rxq->page_pool)
2904 page_pool_get_stats(rxq->page_pool, &stats);
2907 page_pool_ethtool_stats_get(data, &stats);
2911 static void fec_enet_get_ethtool_stats(struct net_device *dev,
2912 struct ethtool_stats *stats, u64 *data)
2914 struct fec_enet_private *fep = netdev_priv(dev);
2916 if (netif_running(dev))
2917 fec_enet_update_ethtool_stats(dev);
2919 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2920 data += FEC_STATS_SIZE / sizeof(u64);
2922 fec_enet_get_xdp_stats(fep, data);
2923 data += XDP_STATS_TOTAL;
2925 fec_enet_page_pool_stats(fep, data);
2928 static void fec_enet_get_strings(struct net_device *netdev,
2929 u32 stringset, u8 *data)
2932 switch (stringset) {
2934 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
2935 ethtool_puts(&data, fec_stats[i].name);
2937 for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
2938 ethtool_puts(&data, fec_xdp_stat_strs[i]);
2940 page_pool_ethtool_stats_get_strings(data);
2944 net_selftest_get_strings(data);
2949 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
2955 count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL;
2956 count += page_pool_ethtool_stats_get_count();
2960 return net_selftest_get_count();
2966 static void fec_enet_clear_ethtool_stats(struct net_device *dev)
2968 struct fec_enet_private *fep = netdev_priv(dev);
2969 struct fec_enet_priv_rx_q *rxq;
2972 /* Disable MIB statistics counters */
2973 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
2975 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2976 writel(0, fep->hwp + fec_stats[i].offset);
2978 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2979 rxq = fep->rx_queue[i];
2980 for (j = 0; j < XDP_STATS_TOTAL; j++)
2984 /* Don't disable MIB statistics counters */
2985 writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
2988 #else /* !defined(CONFIG_M5272) */
2989 #define FEC_STATS_SIZE 0
2990 static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
2994 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
2997 #endif /* !defined(CONFIG_M5272) */
2999 /* ITR clock source is enet system clock (clk_ahb).
3000 * TCTT unit is cycle_ns * 64 cycle
3001 * So, the ICTT value = X us / (cycle_ns * 64)
3003 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
3005 struct fec_enet_private *fep = netdev_priv(ndev);
3007 return us * (fep->itr_clk_rate / 64000) / 1000;
3010 /* Set threshold for interrupt coalescing */
3011 static void fec_enet_itr_coal_set(struct net_device *ndev)
3013 struct fec_enet_private *fep = netdev_priv(ndev);
3016 /* Must be greater than zero to avoid unpredictable behavior */
3017 if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
3018 !fep->tx_time_itr || !fep->tx_pkts_itr)
3021 /* Select enet system clock as Interrupt Coalescing
3022 * timer Clock Source
3024 rx_itr = FEC_ITR_CLK_SEL;
3025 tx_itr = FEC_ITR_CLK_SEL;
3027 /* set ICFT and ICTT */
3028 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
3029 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
3030 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
3031 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
3033 rx_itr |= FEC_ITR_EN;
3034 tx_itr |= FEC_ITR_EN;
3036 writel(tx_itr, fep->hwp + FEC_TXIC0);
3037 writel(rx_itr, fep->hwp + FEC_RXIC0);
3038 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
3039 writel(tx_itr, fep->hwp + FEC_TXIC1);
3040 writel(rx_itr, fep->hwp + FEC_RXIC1);
3041 writel(tx_itr, fep->hwp + FEC_TXIC2);
3042 writel(rx_itr, fep->hwp + FEC_RXIC2);
3046 static int fec_enet_get_coalesce(struct net_device *ndev,
3047 struct ethtool_coalesce *ec,
3048 struct kernel_ethtool_coalesce *kernel_coal,
3049 struct netlink_ext_ack *extack)
3051 struct fec_enet_private *fep = netdev_priv(ndev);
3053 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3056 ec->rx_coalesce_usecs = fep->rx_time_itr;
3057 ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
3059 ec->tx_coalesce_usecs = fep->tx_time_itr;
3060 ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
3065 static int fec_enet_set_coalesce(struct net_device *ndev,
3066 struct ethtool_coalesce *ec,
3067 struct kernel_ethtool_coalesce *kernel_coal,
3068 struct netlink_ext_ack *extack)
3070 struct fec_enet_private *fep = netdev_priv(ndev);
3071 struct device *dev = &fep->pdev->dev;
3074 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3077 if (ec->rx_max_coalesced_frames > 255) {
3078 dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
3082 if (ec->tx_max_coalesced_frames > 255) {
3083 dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
3087 cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
3088 if (cycle > 0xFFFF) {
3089 dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
3093 cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
3094 if (cycle > 0xFFFF) {
3095 dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
3099 fep->rx_time_itr = ec->rx_coalesce_usecs;
3100 fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
3102 fep->tx_time_itr = ec->tx_coalesce_usecs;
3103 fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
3105 fec_enet_itr_coal_set(ndev);
3110 /* LPI Sleep Ts count base on tx clk (clk_ref).
3111 * The lpi sleep cnt value = X us / (cycle_ns).
3113 static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
3115 struct fec_enet_private *fep = netdev_priv(ndev);
3117 return us * (fep->clk_ref_rate / 1000) / 1000;
3120 static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
3122 struct fec_enet_private *fep = netdev_priv(ndev);
3123 struct ethtool_eee *p = &fep->eee;
3124 unsigned int sleep_cycle, wake_cycle;
3128 ret = phy_init_eee(ndev->phydev, false);
3132 sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
3133 wake_cycle = sleep_cycle;
3139 p->tx_lpi_enabled = enable;
3140 p->eee_enabled = enable;
3141 p->eee_active = enable;
3143 writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
3144 writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
3150 fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
3152 struct fec_enet_private *fep = netdev_priv(ndev);
3153 struct ethtool_eee *p = &fep->eee;
3155 if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3158 if (!netif_running(ndev))
3161 edata->eee_enabled = p->eee_enabled;
3162 edata->eee_active = p->eee_active;
3163 edata->tx_lpi_timer = p->tx_lpi_timer;
3164 edata->tx_lpi_enabled = p->tx_lpi_enabled;
3166 return phy_ethtool_get_eee(ndev->phydev, edata);
3170 fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
3172 struct fec_enet_private *fep = netdev_priv(ndev);
3173 struct ethtool_eee *p = &fep->eee;
3176 if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3179 if (!netif_running(ndev))
3182 p->tx_lpi_timer = edata->tx_lpi_timer;
3184 if (!edata->eee_enabled || !edata->tx_lpi_enabled ||
3185 !edata->tx_lpi_timer)
3186 ret = fec_enet_eee_mode_set(ndev, false);
3188 ret = fec_enet_eee_mode_set(ndev, true);
3193 return phy_ethtool_set_eee(ndev->phydev, edata);
3197 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3199 struct fec_enet_private *fep = netdev_priv(ndev);
3201 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
3202 wol->supported = WAKE_MAGIC;
3203 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
3205 wol->supported = wol->wolopts = 0;
3210 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3212 struct fec_enet_private *fep = netdev_priv(ndev);
3214 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
3217 if (wol->wolopts & ~WAKE_MAGIC)
3220 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
3221 if (device_may_wakeup(&ndev->dev))
3222 fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
3224 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
3229 static const struct ethtool_ops fec_enet_ethtool_ops = {
3230 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3231 ETHTOOL_COALESCE_MAX_FRAMES,
3232 .get_drvinfo = fec_enet_get_drvinfo,
3233 .get_regs_len = fec_enet_get_regs_len,
3234 .get_regs = fec_enet_get_regs,
3235 .nway_reset = phy_ethtool_nway_reset,
3236 .get_link = ethtool_op_get_link,
3237 .get_coalesce = fec_enet_get_coalesce,
3238 .set_coalesce = fec_enet_set_coalesce,
3239 #ifndef CONFIG_M5272
3240 .get_pauseparam = fec_enet_get_pauseparam,
3241 .set_pauseparam = fec_enet_set_pauseparam,
3242 .get_strings = fec_enet_get_strings,
3243 .get_ethtool_stats = fec_enet_get_ethtool_stats,
3244 .get_sset_count = fec_enet_get_sset_count,
3246 .get_ts_info = fec_enet_get_ts_info,
3247 .get_wol = fec_enet_get_wol,
3248 .set_wol = fec_enet_set_wol,
3249 .get_eee = fec_enet_get_eee,
3250 .set_eee = fec_enet_set_eee,
3251 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3252 .set_link_ksettings = phy_ethtool_set_link_ksettings,
3253 .self_test = net_selftest,
3256 static void fec_enet_free_buffers(struct net_device *ndev)
3258 struct fec_enet_private *fep = netdev_priv(ndev);
3260 struct fec_enet_priv_tx_q *txq;
3261 struct fec_enet_priv_rx_q *rxq;
3264 for (q = 0; q < fep->num_rx_queues; q++) {
3265 rxq = fep->rx_queue[q];
3266 for (i = 0; i < rxq->bd.ring_size; i++)
3267 page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
3269 for (i = 0; i < XDP_STATS_TOTAL; i++)
3272 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
3273 xdp_rxq_info_unreg(&rxq->xdp_rxq);
3274 page_pool_destroy(rxq->page_pool);
3275 rxq->page_pool = NULL;
3278 for (q = 0; q < fep->num_tx_queues; q++) {
3279 txq = fep->tx_queue[q];
3280 for (i = 0; i < txq->bd.ring_size; i++) {
3281 kfree(txq->tx_bounce[i]);
3282 txq->tx_bounce[i] = NULL;
3284 if (!txq->tx_buf[i].buf_p) {
3285 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3289 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
3290 dev_kfree_skb(txq->tx_buf[i].buf_p);
3291 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
3292 xdp_return_frame(txq->tx_buf[i].buf_p);
3294 struct page *page = txq->tx_buf[i].buf_p;
3296 page_pool_put_page(page->pp, page, 0, false);
3299 txq->tx_buf[i].buf_p = NULL;
3300 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3305 static void fec_enet_free_queue(struct net_device *ndev)
3307 struct fec_enet_private *fep = netdev_priv(ndev);
3309 struct fec_enet_priv_tx_q *txq;
3311 for (i = 0; i < fep->num_tx_queues; i++)
3312 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
3313 txq = fep->tx_queue[i];
3314 fec_dma_free(&fep->pdev->dev,
3315 txq->bd.ring_size * TSO_HEADER_SIZE,
3316 txq->tso_hdrs, txq->tso_hdrs_dma);
3319 for (i = 0; i < fep->num_rx_queues; i++)
3320 kfree(fep->rx_queue[i]);
3321 for (i = 0; i < fep->num_tx_queues; i++)
3322 kfree(fep->tx_queue[i]);
3325 static int fec_enet_alloc_queue(struct net_device *ndev)
3327 struct fec_enet_private *fep = netdev_priv(ndev);
3330 struct fec_enet_priv_tx_q *txq;
3332 for (i = 0; i < fep->num_tx_queues; i++) {
3333 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
3339 fep->tx_queue[i] = txq;
3340 txq->bd.ring_size = TX_RING_SIZE;
3341 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
3343 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
3344 txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
3346 txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev,
3347 txq->bd.ring_size * TSO_HEADER_SIZE,
3348 &txq->tso_hdrs_dma, GFP_KERNEL);
3349 if (!txq->tso_hdrs) {
3355 for (i = 0; i < fep->num_rx_queues; i++) {
3356 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
3358 if (!fep->rx_queue[i]) {
3363 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
3364 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
3369 fec_enet_free_queue(ndev);
3374 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
3376 struct fec_enet_private *fep = netdev_priv(ndev);
3377 struct fec_enet_priv_rx_q *rxq;
3378 dma_addr_t phys_addr;
3379 struct bufdesc *bdp;
3383 rxq = fep->rx_queue[queue];
3386 err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
3388 netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
3392 for (i = 0; i < rxq->bd.ring_size; i++) {
3393 page = page_pool_dev_alloc_pages(rxq->page_pool);
3397 phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
3398 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
3400 rxq->rx_skb_info[i].page = page;
3401 rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
3402 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
3404 if (fep->bufdesc_ex) {
3405 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3406 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
3409 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
3412 /* Set the last buffer to wrap. */
3413 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
3414 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3418 fec_enet_free_buffers(ndev);
3423 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
3425 struct fec_enet_private *fep = netdev_priv(ndev);
3427 struct bufdesc *bdp;
3428 struct fec_enet_priv_tx_q *txq;
3430 txq = fep->tx_queue[queue];
3432 for (i = 0; i < txq->bd.ring_size; i++) {
3433 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
3434 if (!txq->tx_bounce[i])
3437 bdp->cbd_sc = cpu_to_fec16(0);
3438 bdp->cbd_bufaddr = cpu_to_fec32(0);
3440 if (fep->bufdesc_ex) {
3441 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3442 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
3445 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3448 /* Set the last buffer to wrap. */
3449 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
3450 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3455 fec_enet_free_buffers(ndev);
3459 static int fec_enet_alloc_buffers(struct net_device *ndev)
3461 struct fec_enet_private *fep = netdev_priv(ndev);
3464 for (i = 0; i < fep->num_rx_queues; i++)
3465 if (fec_enet_alloc_rxq_buffers(ndev, i))
3468 for (i = 0; i < fep->num_tx_queues; i++)
3469 if (fec_enet_alloc_txq_buffers(ndev, i))
3475 fec_enet_open(struct net_device *ndev)
3477 struct fec_enet_private *fep = netdev_priv(ndev);
3481 ret = pm_runtime_resume_and_get(&fep->pdev->dev);
3485 pinctrl_pm_select_default_state(&fep->pdev->dev);
3486 ret = fec_enet_clk_enable(ndev, true);
3490 /* During the first fec_enet_open call the PHY isn't probed at this
3491 * point. Therefore the phy_reset_after_clk_enable() call within
3492 * fec_enet_clk_enable() fails. As we need this reset in order to be
3493 * sure the PHY is working correctly we check if we need to reset again
3494 * later when the PHY is probed
3496 if (ndev->phydev && ndev->phydev->drv)
3497 reset_again = false;
3501 /* I should reset the ring buffers here, but I don't yet know
3502 * a simple way to do that.
3505 ret = fec_enet_alloc_buffers(ndev);
3507 goto err_enet_alloc;
3509 /* Init MAC prior to mii bus probe */
3512 /* Call phy_reset_after_clk_enable() again if it failed during
3513 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
3516 fec_enet_phy_reset_after_clk_enable(ndev);
3518 /* Probe and connect to PHY when open the interface */
3519 ret = fec_enet_mii_probe(ndev);
3521 goto err_enet_mii_probe;
3523 if (fep->quirks & FEC_QUIRK_ERR006687)
3524 imx6q_cpuidle_fec_irqs_used();
3526 if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3527 cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
3529 napi_enable(&fep->napi);
3530 phy_start(ndev->phydev);
3531 netif_tx_start_all_queues(ndev);
3533 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
3534 FEC_WOL_FLAG_ENABLE);
3539 fec_enet_free_buffers(ndev);
3541 fec_enet_clk_enable(ndev, false);
3543 pm_runtime_mark_last_busy(&fep->pdev->dev);
3544 pm_runtime_put_autosuspend(&fep->pdev->dev);
3545 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3550 fec_enet_close(struct net_device *ndev)
3552 struct fec_enet_private *fep = netdev_priv(ndev);
3554 phy_stop(ndev->phydev);
3556 if (netif_device_present(ndev)) {
3557 napi_disable(&fep->napi);
3558 netif_tx_disable(ndev);
3562 phy_disconnect(ndev->phydev);
3564 if (fep->quirks & FEC_QUIRK_ERR006687)
3565 imx6q_cpuidle_fec_irqs_unused();
3567 fec_enet_update_ethtool_stats(ndev);
3569 fec_enet_clk_enable(ndev, false);
3570 if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3571 cpu_latency_qos_remove_request(&fep->pm_qos_req);
3573 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3574 pm_runtime_mark_last_busy(&fep->pdev->dev);
3575 pm_runtime_put_autosuspend(&fep->pdev->dev);
3577 fec_enet_free_buffers(ndev);
3582 /* Set or clear the multicast filter for this adaptor.
3583 * Skeleton taken from sunlance driver.
3584 * The CPM Ethernet implementation allows Multicast as well as individual
3585 * MAC address filtering. Some of the drivers check to make sure it is
3586 * a group multicast address, and discard those that are not. I guess I
3587 * will do the same for now, but just remove the test if you want
3588 * individual filtering as well (do the upper net layers want or support
3589 * this kind of feature?).
3592 #define FEC_HASH_BITS 6 /* #bits in hash */
3594 static void set_multicast_list(struct net_device *ndev)
3596 struct fec_enet_private *fep = netdev_priv(ndev);
3597 struct netdev_hw_addr *ha;
3598 unsigned int crc, tmp;
3600 unsigned int hash_high = 0, hash_low = 0;
3602 if (ndev->flags & IFF_PROMISC) {
3603 tmp = readl(fep->hwp + FEC_R_CNTRL);
3605 writel(tmp, fep->hwp + FEC_R_CNTRL);
3609 tmp = readl(fep->hwp + FEC_R_CNTRL);
3611 writel(tmp, fep->hwp + FEC_R_CNTRL);
3613 if (ndev->flags & IFF_ALLMULTI) {
3614 /* Catch all multicast addresses, so set the
3617 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3618 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3623 /* Add the addresses in hash register */
3624 netdev_for_each_mc_addr(ha, ndev) {
3625 /* calculate crc32 value of mac address */
3626 crc = ether_crc_le(ndev->addr_len, ha->addr);
3628 /* only upper 6 bits (FEC_HASH_BITS) are used
3629 * which point to specific bit in the hash registers
3631 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
3634 hash_high |= 1 << (hash - 32);
3636 hash_low |= 1 << hash;
3639 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3640 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3643 /* Set a MAC change in hardware. */
3645 fec_set_mac_address(struct net_device *ndev, void *p)
3647 struct fec_enet_private *fep = netdev_priv(ndev);
3648 struct sockaddr *addr = p;
3651 if (!is_valid_ether_addr(addr->sa_data))
3652 return -EADDRNOTAVAIL;
3653 eth_hw_addr_set(ndev, addr->sa_data);
3656 /* Add netif status check here to avoid system hang in below case:
3657 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3658 * After ethx down, fec all clocks are gated off and then register
3659 * access causes system hang.
3661 if (!netif_running(ndev))
3664 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
3665 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
3666 fep->hwp + FEC_ADDR_LOW);
3667 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
3668 fep->hwp + FEC_ADDR_HIGH);
3672 #ifdef CONFIG_NET_POLL_CONTROLLER
3674 * fec_poll_controller - FEC Poll controller function
3675 * @dev: The FEC network adapter
3677 * Polled functionality used by netconsole and others in non interrupt mode
3680 static void fec_poll_controller(struct net_device *dev)
3683 struct fec_enet_private *fep = netdev_priv(dev);
3685 for (i = 0; i < FEC_IRQ_NUM; i++) {
3686 if (fep->irq[i] > 0) {
3687 disable_irq(fep->irq[i]);
3688 fec_enet_interrupt(fep->irq[i], dev);
3689 enable_irq(fep->irq[i]);
3695 static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3696 netdev_features_t features)
3698 struct fec_enet_private *fep = netdev_priv(netdev);
3699 netdev_features_t changed = features ^ netdev->features;
3701 netdev->features = features;
3703 /* Receive checksum has been changed */
3704 if (changed & NETIF_F_RXCSUM) {
3705 if (features & NETIF_F_RXCSUM)
3706 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3708 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3712 static int fec_set_features(struct net_device *netdev,
3713 netdev_features_t features)
3715 struct fec_enet_private *fep = netdev_priv(netdev);
3716 netdev_features_t changed = features ^ netdev->features;
3718 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3719 napi_disable(&fep->napi);
3720 netif_tx_lock_bh(netdev);
3722 fec_enet_set_netdev_features(netdev, features);
3723 fec_restart(netdev);
3724 netif_tx_wake_all_queues(netdev);
3725 netif_tx_unlock_bh(netdev);
3726 napi_enable(&fep->napi);
3728 fec_enet_set_netdev_features(netdev, features);
3734 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
3735 struct net_device *sb_dev)
3737 struct fec_enet_private *fep = netdev_priv(ndev);
3740 if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
3741 return netdev_pick_tx(ndev, skb, NULL);
3743 /* VLAN is present in the payload.*/
3744 if (eth_type_vlan(skb->protocol)) {
3745 struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
3747 vlan_tag = ntohs(vhdr->h_vlan_TCI);
3748 /* VLAN is present in the skb but not yet pushed in the payload.*/
3749 } else if (skb_vlan_tag_present(skb)) {
3750 vlan_tag = skb->vlan_tci;
3755 return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
3758 static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
3760 struct fec_enet_private *fep = netdev_priv(dev);
3761 bool is_run = netif_running(dev);
3762 struct bpf_prog *old_prog;
3764 switch (bpf->command) {
3765 case XDP_SETUP_PROG:
3766 /* No need to support the SoCs that require to
3767 * do the frame swap because the performance wouldn't be
3768 * better than the skb mode.
3770 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
3774 xdp_features_clear_redirect_target(dev);
3777 napi_disable(&fep->napi);
3778 netif_tx_disable(dev);
3781 old_prog = xchg(&fep->xdp_prog, bpf->prog);
3783 bpf_prog_put(old_prog);
3788 napi_enable(&fep->napi);
3789 netif_tx_start_all_queues(dev);
3793 xdp_features_set_redirect_target(dev, false);
3797 case XDP_SETUP_XSK_POOL:
3806 fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
3808 if (unlikely(index < 0))
3811 return (index % fep->num_tx_queues);
3814 static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
3815 struct fec_enet_priv_tx_q *txq,
3816 void *frame, u32 dma_sync_len,
3819 unsigned int index, status, estatus;
3820 struct bufdesc *bdp;
3821 dma_addr_t dma_addr;
3825 entries_free = fec_enet_get_free_txdesc_num(txq);
3826 if (entries_free < MAX_SKB_FRAGS + 1) {
3827 netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");
3831 /* Fill in a Tx ring entry */
3833 status = fec16_to_cpu(bdp->cbd_sc);
3834 status &= ~BD_ENET_TX_STATS;
3836 index = fec_enet_get_bd_index(bdp, &txq->bd);
3839 struct xdp_frame *xdpf = frame;
3841 dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
3842 xdpf->len, DMA_TO_DEVICE);
3843 if (dma_mapping_error(&fep->pdev->dev, dma_addr))
3846 frame_len = xdpf->len;
3847 txq->tx_buf[index].buf_p = xdpf;
3848 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
3850 struct xdp_buff *xdpb = frame;
3853 page = virt_to_page(xdpb->data);
3854 dma_addr = page_pool_get_dma_addr(page) +
3855 (xdpb->data - xdpb->data_hard_start);
3856 dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
3857 dma_sync_len, DMA_BIDIRECTIONAL);
3858 frame_len = xdpb->data_end - xdpb->data;
3859 txq->tx_buf[index].buf_p = page;
3860 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
3863 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
3864 if (fep->bufdesc_ex)
3865 estatus = BD_ENET_TX_INT;
3867 bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
3868 bdp->cbd_datlen = cpu_to_fec16(frame_len);
3870 if (fep->bufdesc_ex) {
3871 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3873 if (fep->quirks & FEC_QUIRK_HAS_AVB)
3874 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
3877 ebdp->cbd_esc = cpu_to_fec32(estatus);
3880 /* Make sure the updates to rest of the descriptor are performed before
3881 * transferring ownership.
3885 /* Send it on its way. Tell FEC it's ready, interrupt when done,
3886 * it's the last BD of the frame, and to put the CRC on the end.
3888 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
3889 bdp->cbd_sc = cpu_to_fec16(status);
3891 /* If this was the last BD in the ring, start at the beginning again. */
3892 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3894 /* Make sure the update to bdp are performed before txq->bd.cur. */
3899 /* Trigger transmission start */
3900 writel(0, txq->bd.reg_desc_active);
3905 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
3906 int cpu, struct xdp_buff *xdp,
3909 struct fec_enet_priv_tx_q *txq;
3910 struct netdev_queue *nq;
3913 queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3914 txq = fep->tx_queue[queue];
3915 nq = netdev_get_tx_queue(fep->netdev, queue);
3917 __netif_tx_lock(nq, cpu);
3919 /* Avoid tx timeout as XDP shares the queue with kernel stack */
3920 txq_trans_cond_update(nq);
3921 ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
3923 __netif_tx_unlock(nq);
3928 static int fec_enet_xdp_xmit(struct net_device *dev,
3930 struct xdp_frame **frames,
3933 struct fec_enet_private *fep = netdev_priv(dev);
3934 struct fec_enet_priv_tx_q *txq;
3935 int cpu = smp_processor_id();
3936 unsigned int sent_frames = 0;
3937 struct netdev_queue *nq;
3941 queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3942 txq = fep->tx_queue[queue];
3943 nq = netdev_get_tx_queue(fep->netdev, queue);
3945 __netif_tx_lock(nq, cpu);
3947 /* Avoid tx timeout as XDP shares the queue with kernel stack */
3948 txq_trans_cond_update(nq);
3949 for (i = 0; i < num_frames; i++) {
3950 if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
3955 __netif_tx_unlock(nq);
3960 static int fec_hwtstamp_get(struct net_device *ndev,
3961 struct kernel_hwtstamp_config *config)
3963 struct fec_enet_private *fep = netdev_priv(ndev);
3965 if (!netif_running(ndev))
3968 if (!fep->bufdesc_ex)
3971 fec_ptp_get(ndev, config);
3976 static int fec_hwtstamp_set(struct net_device *ndev,
3977 struct kernel_hwtstamp_config *config,
3978 struct netlink_ext_ack *extack)
3980 struct fec_enet_private *fep = netdev_priv(ndev);
3982 if (!netif_running(ndev))
3985 if (!fep->bufdesc_ex)
3988 return fec_ptp_set(ndev, config, extack);
3991 static const struct net_device_ops fec_netdev_ops = {
3992 .ndo_open = fec_enet_open,
3993 .ndo_stop = fec_enet_close,
3994 .ndo_start_xmit = fec_enet_start_xmit,
3995 .ndo_select_queue = fec_enet_select_queue,
3996 .ndo_set_rx_mode = set_multicast_list,
3997 .ndo_validate_addr = eth_validate_addr,
3998 .ndo_tx_timeout = fec_timeout,
3999 .ndo_set_mac_address = fec_set_mac_address,
4000 .ndo_eth_ioctl = phy_do_ioctl_running,
4001 #ifdef CONFIG_NET_POLL_CONTROLLER
4002 .ndo_poll_controller = fec_poll_controller,
4004 .ndo_set_features = fec_set_features,
4005 .ndo_bpf = fec_enet_bpf,
4006 .ndo_xdp_xmit = fec_enet_xdp_xmit,
4007 .ndo_hwtstamp_get = fec_hwtstamp_get,
4008 .ndo_hwtstamp_set = fec_hwtstamp_set,
4011 static const unsigned short offset_des_active_rxq[] = {
4012 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
4015 static const unsigned short offset_des_active_txq[] = {
4016 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
4020 * XXX: We need to clean up on failure exits here.
4023 static int fec_enet_init(struct net_device *ndev)
4025 struct fec_enet_private *fep = netdev_priv(ndev);
4026 struct bufdesc *cbd_base;
4030 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
4031 sizeof(struct bufdesc);
4032 unsigned dsize_log2 = __fls(dsize);
4035 WARN_ON(dsize != (1 << dsize_log2));
4036 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
4037 fep->rx_align = 0xf;
4038 fep->tx_align = 0xf;
4040 fep->rx_align = 0x3;
4041 fep->tx_align = 0x3;
4043 fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4044 fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4045 fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT;
4046 fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;
4048 /* Check mask of the streaming and coherent API */
4049 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
4051 dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
4055 ret = fec_enet_alloc_queue(ndev);
4059 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
4061 /* Allocate memory for buffer descriptors. */
4062 cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma,
4066 goto free_queue_mem;
4069 /* Get the Ethernet address */
4070 ret = fec_get_mac(ndev);
4072 goto free_queue_mem;
4074 /* Set receive and transmit descriptor base. */
4075 for (i = 0; i < fep->num_rx_queues; i++) {
4076 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
4077 unsigned size = dsize * rxq->bd.ring_size;
4080 rxq->bd.base = cbd_base;
4081 rxq->bd.cur = cbd_base;
4082 rxq->bd.dma = bd_dma;
4083 rxq->bd.dsize = dsize;
4084 rxq->bd.dsize_log2 = dsize_log2;
4085 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
4087 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4088 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4091 for (i = 0; i < fep->num_tx_queues; i++) {
4092 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
4093 unsigned size = dsize * txq->bd.ring_size;
4096 txq->bd.base = cbd_base;
4097 txq->bd.cur = cbd_base;
4098 txq->bd.dma = bd_dma;
4099 txq->bd.dsize = dsize;
4100 txq->bd.dsize_log2 = dsize_log2;
4101 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
4103 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4104 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4108 /* The FEC Ethernet specific entries in the device structure */
4109 ndev->watchdog_timeo = TX_TIMEOUT;
4110 ndev->netdev_ops = &fec_netdev_ops;
4111 ndev->ethtool_ops = &fec_enet_ethtool_ops;
4113 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
4114 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
4116 if (fep->quirks & FEC_QUIRK_HAS_VLAN)
4117 /* enable hw VLAN support */
4118 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4120 if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
4121 netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
4123 /* enable hw accelerator */
4124 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
4125 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
4126 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
4129 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
4131 fep->rx_align = 0x3f;
4134 ndev->hw_features = ndev->features;
4136 if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
4137 ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
4138 NETDEV_XDP_ACT_REDIRECT;
4142 if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
4143 fec_enet_clear_ethtool_stats(ndev);
4145 fec_enet_update_ethtool_stats(ndev);
4150 fec_enet_free_queue(ndev);
4155 static int fec_reset_phy(struct platform_device *pdev)
4157 struct gpio_desc *phy_reset;
4158 int msec = 1, phy_post_delay = 0;
4159 struct device_node *np = pdev->dev.of_node;
4165 err = of_property_read_u32(np, "phy-reset-duration", &msec);
4166 /* A sane reset duration should not be longer than 1s */
4167 if (!err && msec > 1000)
4170 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
4171 /* valid reset duration should be less than 1s */
4172 if (!err && phy_post_delay > 1000)
4175 phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset",
4177 if (IS_ERR(phy_reset))
4178 return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset),
4179 "failed to get phy-reset-gpios\n");
4187 usleep_range(msec * 1000, msec * 1000 + 1000);
4189 gpiod_set_value_cansleep(phy_reset, 0);
4191 if (!phy_post_delay)
4194 if (phy_post_delay > 20)
4195 msleep(phy_post_delay);
4197 usleep_range(phy_post_delay * 1000,
4198 phy_post_delay * 1000 + 1000);
4202 #else /* CONFIG_OF */
4203 static int fec_reset_phy(struct platform_device *pdev)
4206 * In case of platform probe, the reset has been done
4211 #endif /* CONFIG_OF */
4214 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
4216 struct device_node *np = pdev->dev.of_node;
4218 *num_tx = *num_rx = 1;
4220 if (!np || !of_device_is_available(np))
4223 /* parse the num of tx and rx queues */
4224 of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
4226 of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
4228 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
4229 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
4235 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
4236 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
4244 static int fec_enet_get_irq_cnt(struct platform_device *pdev)
4246 int irq_cnt = platform_irq_count(pdev);
4248 if (irq_cnt > FEC_IRQ_NUM)
4249 irq_cnt = FEC_IRQ_NUM; /* last for pps */
4250 else if (irq_cnt == 2)
4251 irq_cnt = 1; /* last for pps */
4252 else if (irq_cnt <= 0)
4253 irq_cnt = 1; /* At least 1 irq is needed */
4257 static void fec_enet_get_wakeup_irq(struct platform_device *pdev)
4259 struct net_device *ndev = platform_get_drvdata(pdev);
4260 struct fec_enet_private *fep = netdev_priv(ndev);
4262 if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2)
4263 fep->wake_irq = fep->irq[2];
4265 fep->wake_irq = fep->irq[0];
4268 static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
4269 struct device_node *np)
4271 struct device_node *gpr_np;
4275 gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
4279 ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
4280 ARRAY_SIZE(out_val));
4282 dev_dbg(&fep->pdev->dev, "no stop mode property\n");
4286 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
4287 if (IS_ERR(fep->stop_gpr.gpr)) {
4288 dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
4289 ret = PTR_ERR(fep->stop_gpr.gpr);
4290 fep->stop_gpr.gpr = NULL;
4294 fep->stop_gpr.reg = out_val[1];
4295 fep->stop_gpr.bit = out_val[2];
4298 of_node_put(gpr_np);
4304 fec_probe(struct platform_device *pdev)
4306 struct fec_enet_private *fep;
4307 struct fec_platform_data *pdata;
4308 phy_interface_t interface;
4309 struct net_device *ndev;
4310 int i, irq, ret = 0;
4312 struct device_node *np = pdev->dev.of_node, *phy_node;
4317 const struct fec_devinfo *dev_info;
4319 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
4321 /* Init network device */
4322 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
4323 FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
4327 SET_NETDEV_DEV(ndev, &pdev->dev);
4329 /* setup board info structure */
4330 fep = netdev_priv(ndev);
4332 dev_info = device_get_match_data(&pdev->dev);
4334 dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data;
4336 fep->quirks = dev_info->quirks;
4339 fep->num_rx_queues = num_rx_qs;
4340 fep->num_tx_queues = num_tx_qs;
4342 #if !defined(CONFIG_M5272)
4343 /* default enable pause frame auto negotiation */
4344 if (fep->quirks & FEC_QUIRK_HAS_GBIT)
4345 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
4348 /* Select default pin state */
4349 pinctrl_pm_select_default_state(&pdev->dev);
4351 fep->hwp = devm_platform_ioremap_resource(pdev, 0);
4352 if (IS_ERR(fep->hwp)) {
4353 ret = PTR_ERR(fep->hwp);
4354 goto failed_ioremap;
4358 fep->dev_id = dev_id++;
4360 platform_set_drvdata(pdev, ndev);
4362 if ((of_machine_is_compatible("fsl,imx6q") ||
4363 of_machine_is_compatible("fsl,imx6dl")) &&
4364 !of_property_read_bool(np, "fsl,err006687-workaround-present"))
4365 fep->quirks |= FEC_QUIRK_ERR006687;
4367 ret = fec_enet_ipc_handle_init(fep);
4369 goto failed_ipc_init;
4371 if (of_property_read_bool(np, "fsl,magic-packet"))
4372 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
4374 ret = fec_enet_init_stop_mode(fep, np);
4376 goto failed_stop_mode;
4378 phy_node = of_parse_phandle(np, "phy-handle", 0);
4379 if (!phy_node && of_phy_is_fixed_link(np)) {
4380 ret = of_phy_register_fixed_link(np);
4383 "broken fixed-link specification\n");
4386 phy_node = of_node_get(np);
4388 fep->phy_node = phy_node;
4390 ret = of_get_phy_mode(pdev->dev.of_node, &interface);
4392 pdata = dev_get_platdata(&pdev->dev);
4394 fep->phy_interface = pdata->phy;
4396 fep->phy_interface = PHY_INTERFACE_MODE_MII;
4398 fep->phy_interface = interface;
4401 ret = fec_enet_parse_rgmii_delay(fep, np);
4403 goto failed_rgmii_delay;
4405 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
4406 if (IS_ERR(fep->clk_ipg)) {
4407 ret = PTR_ERR(fep->clk_ipg);
4411 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
4412 if (IS_ERR(fep->clk_ahb)) {
4413 ret = PTR_ERR(fep->clk_ahb);
4417 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
4419 /* enet_out is optional, depends on board */
4420 fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
4421 if (IS_ERR(fep->clk_enet_out)) {
4422 ret = PTR_ERR(fep->clk_enet_out);
4426 fep->ptp_clk_on = false;
4427 mutex_init(&fep->ptp_clk_mutex);
4429 /* clk_ref is optional, depends on board */
4430 fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
4431 if (IS_ERR(fep->clk_ref)) {
4432 ret = PTR_ERR(fep->clk_ref);
4435 fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
4437 /* clk_2x_txclk is optional, depends on board */
4438 if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) {
4439 fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
4440 if (IS_ERR(fep->clk_2x_txclk))
4441 fep->clk_2x_txclk = NULL;
4444 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
4445 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
4446 if (IS_ERR(fep->clk_ptp)) {
4447 fep->clk_ptp = NULL;
4448 fep->bufdesc_ex = false;
4451 ret = fec_enet_clk_enable(ndev, true);
4455 ret = clk_prepare_enable(fep->clk_ipg);
4457 goto failed_clk_ipg;
4458 ret = clk_prepare_enable(fep->clk_ahb);
4460 goto failed_clk_ahb;
4462 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
4463 if (!IS_ERR(fep->reg_phy)) {
4464 ret = regulator_enable(fep->reg_phy);
4467 "Failed to enable phy regulator: %d\n", ret);
4468 goto failed_regulator;
4471 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
4472 ret = -EPROBE_DEFER;
4473 goto failed_regulator;
4475 fep->reg_phy = NULL;
4478 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
4479 pm_runtime_use_autosuspend(&pdev->dev);
4480 pm_runtime_get_noresume(&pdev->dev);
4481 pm_runtime_set_active(&pdev->dev);
4482 pm_runtime_enable(&pdev->dev);
4484 ret = fec_reset_phy(pdev);
4488 irq_cnt = fec_enet_get_irq_cnt(pdev);
4489 if (fep->bufdesc_ex)
4490 fec_ptp_init(pdev, irq_cnt);
4492 ret = fec_enet_init(ndev);
4496 for (i = 0; i < irq_cnt; i++) {
4497 snprintf(irq_name, sizeof(irq_name), "int%d", i);
4498 irq = platform_get_irq_byname_optional(pdev, irq_name);
4500 irq = platform_get_irq(pdev, i);
4505 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
4506 0, pdev->name, ndev);
4513 /* Decide which interrupt line is wakeup capable */
4514 fec_enet_get_wakeup_irq(pdev);
4516 ret = fec_enet_mii_init(pdev);
4518 goto failed_mii_init;
4520 /* Carrier starts down, phylib will bring it up */
4521 netif_carrier_off(ndev);
4522 fec_enet_clk_enable(ndev, false);
4523 pinctrl_pm_select_sleep_state(&pdev->dev);
4525 ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
4527 ret = register_netdev(ndev);
4529 goto failed_register;
4531 device_init_wakeup(&ndev->dev, fep->wol_flag &
4532 FEC_WOL_HAS_MAGIC_PACKET);
4534 if (fep->bufdesc_ex && fep->ptp_clock)
4535 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
4537 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
4539 pm_runtime_mark_last_busy(&pdev->dev);
4540 pm_runtime_put_autosuspend(&pdev->dev);
4545 fec_enet_mii_remove(fep);
4551 pm_runtime_put_noidle(&pdev->dev);
4552 pm_runtime_disable(&pdev->dev);
4554 regulator_disable(fep->reg_phy);
4556 clk_disable_unprepare(fep->clk_ahb);
4558 clk_disable_unprepare(fep->clk_ipg);
4560 fec_enet_clk_enable(ndev, false);
4563 if (of_phy_is_fixed_link(np))
4564 of_phy_deregister_fixed_link(np);
4565 of_node_put(phy_node);
4577 fec_drv_remove(struct platform_device *pdev)
4579 struct net_device *ndev = platform_get_drvdata(pdev);
4580 struct fec_enet_private *fep = netdev_priv(ndev);
4581 struct device_node *np = pdev->dev.of_node;
4584 ret = pm_runtime_get_sync(&pdev->dev);
4587 "Failed to resume device in remove callback (%pe)\n",
4590 cancel_work_sync(&fep->tx_timeout_work);
4592 unregister_netdev(ndev);
4593 fec_enet_mii_remove(fep);
4595 regulator_disable(fep->reg_phy);
4597 if (of_phy_is_fixed_link(np))
4598 of_phy_deregister_fixed_link(np);
4599 of_node_put(fep->phy_node);
4601 /* After pm_runtime_get_sync() failed, the clks are still off, so skip
4602 * disabling them again.
4605 clk_disable_unprepare(fep->clk_ahb);
4606 clk_disable_unprepare(fep->clk_ipg);
4608 pm_runtime_put_noidle(&pdev->dev);
4609 pm_runtime_disable(&pdev->dev);
4614 static int __maybe_unused fec_suspend(struct device *dev)
4616 struct net_device *ndev = dev_get_drvdata(dev);
4617 struct fec_enet_private *fep = netdev_priv(ndev);
4621 if (netif_running(ndev)) {
4622 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
4623 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
4624 phy_stop(ndev->phydev);
4625 napi_disable(&fep->napi);
4626 netif_tx_lock_bh(ndev);
4627 netif_device_detach(ndev);
4628 netif_tx_unlock_bh(ndev);
4630 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4631 fec_irqs_disable(ndev);
4632 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
4634 fec_irqs_disable_except_wakeup(ndev);
4635 if (fep->wake_irq > 0) {
4636 disable_irq(fep->wake_irq);
4637 enable_irq_wake(fep->wake_irq);
4639 fec_enet_stop_mode(fep, true);
4641 /* It's safe to disable clocks since interrupts are masked */
4642 fec_enet_clk_enable(ndev, false);
4644 fep->rpm_active = !pm_runtime_status_suspended(dev);
4645 if (fep->rpm_active) {
4646 ret = pm_runtime_force_suspend(dev);
4655 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
4656 regulator_disable(fep->reg_phy);
4658 /* SOC supply clock to phy, when clock is disabled, phy link down
4659 * SOC control phy regulator, when regulator is disabled, phy link down
4661 if (fep->clk_enet_out || fep->reg_phy)
4667 static int __maybe_unused fec_resume(struct device *dev)
4669 struct net_device *ndev = dev_get_drvdata(dev);
4670 struct fec_enet_private *fep = netdev_priv(ndev);
4674 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4675 ret = regulator_enable(fep->reg_phy);
4681 if (netif_running(ndev)) {
4682 if (fep->rpm_active)
4683 pm_runtime_force_resume(dev);
4685 ret = fec_enet_clk_enable(ndev, true);
4690 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
4691 fec_enet_stop_mode(fep, false);
4692 if (fep->wake_irq) {
4693 disable_irq_wake(fep->wake_irq);
4694 enable_irq(fep->wake_irq);
4697 val = readl(fep->hwp + FEC_ECNTRL);
4698 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
4699 writel(val, fep->hwp + FEC_ECNTRL);
4700 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
4702 pinctrl_pm_select_default_state(&fep->pdev->dev);
4705 netif_tx_lock_bh(ndev);
4706 netif_device_attach(ndev);
4707 netif_tx_unlock_bh(ndev);
4708 napi_enable(&fep->napi);
4709 phy_init_hw(ndev->phydev);
4710 phy_start(ndev->phydev);
4718 regulator_disable(fep->reg_phy);
4722 static int __maybe_unused fec_runtime_suspend(struct device *dev)
4724 struct net_device *ndev = dev_get_drvdata(dev);
4725 struct fec_enet_private *fep = netdev_priv(ndev);
4727 clk_disable_unprepare(fep->clk_ahb);
4728 clk_disable_unprepare(fep->clk_ipg);
4733 static int __maybe_unused fec_runtime_resume(struct device *dev)
4735 struct net_device *ndev = dev_get_drvdata(dev);
4736 struct fec_enet_private *fep = netdev_priv(ndev);
4739 ret = clk_prepare_enable(fep->clk_ahb);
4742 ret = clk_prepare_enable(fep->clk_ipg);
4744 goto failed_clk_ipg;
4749 clk_disable_unprepare(fep->clk_ahb);
4753 static const struct dev_pm_ops fec_pm_ops = {
4754 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
4755 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
4758 static struct platform_driver fec_driver = {
4760 .name = DRIVER_NAME,
4762 .of_match_table = fec_dt_ids,
4763 .suppress_bind_attrs = true,
4765 .id_table = fec_devtype,
4767 .remove_new = fec_drv_remove,
4770 module_platform_driver(fec_driver);
4772 MODULE_LICENSE("GPL");