1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
11 Documentation available at:
12 http://www.stlinux.com
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <net/pkt_cls.h>
42 #include "stmmac_ptp.h"
44 #include <linux/reset.h>
45 #include <linux/of_mdio.h>
46 #include "dwmac1000.h"
50 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
51 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
53 /* Module parameters */
55 static int watchdog = TX_TIMEO;
56 module_param(watchdog, int, 0644);
57 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
59 static int debug = -1;
60 module_param(debug, int, 0644);
61 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
63 static int phyaddr = -1;
64 module_param(phyaddr, int, 0444);
65 MODULE_PARM_DESC(phyaddr, "Physical device address");
67 #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
68 #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
70 static int flow_ctrl = FLOW_AUTO;
71 module_param(flow_ctrl, int, 0644);
72 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
74 static int pause = PAUSE_TIME;
75 module_param(pause, int, 0644);
76 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
79 static int tc = TC_DEFAULT;
80 module_param(tc, int, 0644);
81 MODULE_PARM_DESC(tc, "DMA threshold control value");
83 #define DEFAULT_BUFSIZE 1536
84 static int buf_sz = DEFAULT_BUFSIZE;
85 module_param(buf_sz, int, 0644);
86 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
88 #define STMMAC_RX_COPYBREAK 256
90 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
91 NETIF_MSG_LINK | NETIF_MSG_IFUP |
92 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
94 #define STMMAC_DEFAULT_LPI_TIMER 1000
95 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
96 module_param(eee_timer, int, 0644);
97 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
98 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
100 /* By default the driver will use the ring mode to manage tx and rx descriptors,
101 * but allow user to force to use the chain instead of the ring
103 static unsigned int chain_mode;
104 module_param(chain_mode, int, 0444);
105 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
107 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
108 /* For MSI interrupts handling */
109 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
110 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
111 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
112 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
114 #ifdef CONFIG_DEBUG_FS
115 static const struct net_device_ops stmmac_netdev_ops;
116 static void stmmac_init_fs(struct net_device *dev);
117 static void stmmac_exit_fs(struct net_device *dev);
120 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
122 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
127 ret = clk_prepare_enable(priv->plat->stmmac_clk);
130 ret = clk_prepare_enable(priv->plat->pclk);
132 clk_disable_unprepare(priv->plat->stmmac_clk);
135 if (priv->plat->clks_config) {
136 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
138 clk_disable_unprepare(priv->plat->stmmac_clk);
139 clk_disable_unprepare(priv->plat->pclk);
144 clk_disable_unprepare(priv->plat->stmmac_clk);
145 clk_disable_unprepare(priv->plat->pclk);
146 if (priv->plat->clks_config)
147 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
152 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
155 * stmmac_verify_args - verify the driver parameters.
156 * Description: it checks the driver parameters and set a default in case of
159 static void stmmac_verify_args(void)
161 if (unlikely(watchdog < 0))
163 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
164 buf_sz = DEFAULT_BUFSIZE;
165 if (unlikely(flow_ctrl > 1))
166 flow_ctrl = FLOW_AUTO;
167 else if (likely(flow_ctrl < 0))
168 flow_ctrl = FLOW_OFF;
169 if (unlikely((pause < 0) || (pause > 0xffff)))
172 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
176 * stmmac_disable_all_queues - Disable all queues
177 * @priv: driver private structure
179 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
181 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
182 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
183 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
186 for (queue = 0; queue < maxq; queue++) {
187 struct stmmac_channel *ch = &priv->channel[queue];
189 if (queue < rx_queues_cnt)
190 napi_disable(&ch->rx_napi);
191 if (queue < tx_queues_cnt)
192 napi_disable(&ch->tx_napi);
197 * stmmac_enable_all_queues - Enable all queues
198 * @priv: driver private structure
200 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
202 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
203 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
204 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
207 for (queue = 0; queue < maxq; queue++) {
208 struct stmmac_channel *ch = &priv->channel[queue];
210 if (queue < rx_queues_cnt)
211 napi_enable(&ch->rx_napi);
212 if (queue < tx_queues_cnt)
213 napi_enable(&ch->tx_napi);
217 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
219 if (!test_bit(STMMAC_DOWN, &priv->state) &&
220 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
221 queue_work(priv->wq, &priv->service_task);
224 static void stmmac_global_err(struct stmmac_priv *priv)
226 netif_carrier_off(priv->dev);
227 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
228 stmmac_service_event_schedule(priv);
232 * stmmac_clk_csr_set - dynamically set the MDC clock
233 * @priv: driver private structure
234 * Description: this is to dynamically set the MDC clock according to the csr
237 * If a specific clk_csr value is passed from the platform
238 * this means that the CSR Clock Range selection cannot be
239 * changed at run-time and it is fixed (as reported in the driver
240 * documentation). Viceversa the driver will try to set the MDC
241 * clock dynamically according to the actual clock input.
243 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
247 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
249 /* Platform provided default clk_csr would be assumed valid
250 * for all other cases except for the below mentioned ones.
251 * For values higher than the IEEE 802.3 specified frequency
252 * we can not estimate the proper divider as it is not known
253 * the frequency of clk_csr_i. So we do not change the default
256 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
257 if (clk_rate < CSR_F_35M)
258 priv->clk_csr = STMMAC_CSR_20_35M;
259 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
260 priv->clk_csr = STMMAC_CSR_35_60M;
261 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
262 priv->clk_csr = STMMAC_CSR_60_100M;
263 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
264 priv->clk_csr = STMMAC_CSR_100_150M;
265 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
266 priv->clk_csr = STMMAC_CSR_150_250M;
267 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
268 priv->clk_csr = STMMAC_CSR_250_300M;
271 if (priv->plat->has_sun8i) {
272 if (clk_rate > 160000000)
273 priv->clk_csr = 0x03;
274 else if (clk_rate > 80000000)
275 priv->clk_csr = 0x02;
276 else if (clk_rate > 40000000)
277 priv->clk_csr = 0x01;
282 if (priv->plat->has_xgmac) {
283 if (clk_rate > 400000000)
285 else if (clk_rate > 350000000)
287 else if (clk_rate > 300000000)
289 else if (clk_rate > 250000000)
291 else if (clk_rate > 150000000)
298 static void print_pkt(unsigned char *buf, int len)
300 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
301 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
304 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
306 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
309 if (tx_q->dirty_tx > tx_q->cur_tx)
310 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
312 avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
318 * stmmac_rx_dirty - Get RX queue dirty
319 * @priv: driver private structure
320 * @queue: RX queue index
322 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
324 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
327 if (rx_q->dirty_rx <= rx_q->cur_rx)
328 dirty = rx_q->cur_rx - rx_q->dirty_rx;
330 dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
335 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
339 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
340 priv->eee_sw_timer_en = en ? 0 : 1;
341 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
342 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
346 * stmmac_enable_eee_mode - check and enter in LPI mode
347 * @priv: driver private structure
348 * Description: this function is to verify and enter in LPI mode in case of
351 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
353 u32 tx_cnt = priv->plat->tx_queues_to_use;
356 /* check if all TX queues have the work finished */
357 for (queue = 0; queue < tx_cnt; queue++) {
358 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
360 if (tx_q->dirty_tx != tx_q->cur_tx)
361 return; /* still unfinished work */
364 /* Check and enter in LPI mode */
365 if (!priv->tx_path_in_lpi_mode)
366 stmmac_set_eee_mode(priv, priv->hw,
367 priv->plat->en_tx_lpi_clockgating);
371 * stmmac_disable_eee_mode - disable and exit from LPI mode
372 * @priv: driver private structure
373 * Description: this function is to exit and disable EEE in case of
374 * LPI state is true. This is called by the xmit.
376 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
378 if (!priv->eee_sw_timer_en) {
379 stmmac_lpi_entry_timer_config(priv, 0);
383 stmmac_reset_eee_mode(priv, priv->hw);
384 del_timer_sync(&priv->eee_ctrl_timer);
385 priv->tx_path_in_lpi_mode = false;
389 * stmmac_eee_ctrl_timer - EEE TX SW timer.
390 * @t: timer_list struct containing private info
392 * if there is no data transfer and if we are not in LPI state,
393 * then MAC Transmitter can be moved to LPI state.
395 static void stmmac_eee_ctrl_timer(struct timer_list *t)
397 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
399 stmmac_enable_eee_mode(priv);
400 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
404 * stmmac_eee_init - init EEE
405 * @priv: driver private structure
407 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
408 * can also manage EEE, this function enable the LPI state and start related
411 bool stmmac_eee_init(struct stmmac_priv *priv)
413 int eee_tw_timer = priv->eee_tw_timer;
415 /* Using PCS we cannot dial with the phy registers at this stage
416 * so we do not support extra feature like EEE.
418 if (priv->hw->pcs == STMMAC_PCS_TBI ||
419 priv->hw->pcs == STMMAC_PCS_RTBI)
422 /* Check if MAC core supports the EEE feature. */
423 if (!priv->dma_cap.eee)
426 mutex_lock(&priv->lock);
428 /* Check if it needs to be deactivated */
429 if (!priv->eee_active) {
430 if (priv->eee_enabled) {
431 netdev_dbg(priv->dev, "disable EEE\n");
432 stmmac_lpi_entry_timer_config(priv, 0);
433 del_timer_sync(&priv->eee_ctrl_timer);
434 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
436 mutex_unlock(&priv->lock);
440 if (priv->eee_active && !priv->eee_enabled) {
441 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
442 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
446 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
447 del_timer_sync(&priv->eee_ctrl_timer);
448 priv->tx_path_in_lpi_mode = false;
449 stmmac_lpi_entry_timer_config(priv, 1);
451 stmmac_lpi_entry_timer_config(priv, 0);
452 mod_timer(&priv->eee_ctrl_timer,
453 STMMAC_LPI_T(priv->tx_lpi_timer));
456 mutex_unlock(&priv->lock);
457 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
461 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
462 * @priv: driver private structure
463 * @p : descriptor pointer
464 * @skb : the socket buffer
466 * This function will read timestamp from the descriptor & pass it to stack.
467 * and also perform some sanity checks.
469 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
470 struct dma_desc *p, struct sk_buff *skb)
472 struct skb_shared_hwtstamps shhwtstamp;
477 if (!priv->hwts_tx_en)
480 /* exit if skb doesn't support hw tstamp */
481 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
484 /* check tx tstamp status */
485 if (stmmac_get_tx_timestamp_status(priv, p)) {
486 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
488 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
493 /* Correct the clk domain crossing(CDC) error */
494 if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
495 adjust += -(2 * (NSEC_PER_SEC /
496 priv->plat->clk_ptp_rate));
500 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
501 shhwtstamp.hwtstamp = ns_to_ktime(ns);
503 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
504 /* pass tstamp to stack */
505 skb_tstamp_tx(skb, &shhwtstamp);
509 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
510 * @priv: driver private structure
511 * @p : descriptor pointer
512 * @np : next descriptor pointer
513 * @skb : the socket buffer
515 * This function will read received packet's timestamp from the descriptor
516 * and pass it to stack. It also perform some sanity checks.
518 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
519 struct dma_desc *np, struct sk_buff *skb)
521 struct skb_shared_hwtstamps *shhwtstamp = NULL;
522 struct dma_desc *desc = p;
526 if (!priv->hwts_rx_en)
528 /* For GMAC4, the valid timestamp is from CTX next desc. */
529 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
532 /* Check if timestamp is available */
533 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
534 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
536 /* Correct the clk domain crossing(CDC) error */
537 if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
538 adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate);
542 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
543 shhwtstamp = skb_hwtstamps(skb);
544 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
545 shhwtstamp->hwtstamp = ns_to_ktime(ns);
547 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
552 * stmmac_hwtstamp_set - control hardware timestamping.
553 * @dev: device pointer.
554 * @ifr: An IOCTL specific structure, that can contain a pointer to
555 * a proprietary structure used to pass information to the driver.
557 * This function configures the MAC to enable/disable both outgoing(TX)
558 * and incoming(RX) packets time stamping based on user input.
560 * 0 on success and an appropriate -ve integer on failure.
562 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
564 struct stmmac_priv *priv = netdev_priv(dev);
565 struct hwtstamp_config config;
566 struct timespec64 now;
570 u32 ptp_over_ipv4_udp = 0;
571 u32 ptp_over_ipv6_udp = 0;
572 u32 ptp_over_ethernet = 0;
573 u32 snap_type_sel = 0;
574 u32 ts_master_en = 0;
580 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
582 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
583 netdev_alert(priv->dev, "No support for HW time stamping\n");
584 priv->hwts_tx_en = 0;
585 priv->hwts_rx_en = 0;
590 if (copy_from_user(&config, ifr->ifr_data,
594 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
595 __func__, config.flags, config.tx_type, config.rx_filter);
597 /* reserved for future extensions */
601 if (config.tx_type != HWTSTAMP_TX_OFF &&
602 config.tx_type != HWTSTAMP_TX_ON)
606 switch (config.rx_filter) {
607 case HWTSTAMP_FILTER_NONE:
608 /* time stamp no incoming packet at all */
609 config.rx_filter = HWTSTAMP_FILTER_NONE;
612 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
613 /* PTP v1, UDP, any kind of event packet */
614 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
615 /* 'xmac' hardware can support Sync, Pdelay_Req and
616 * Pdelay_resp by setting bit14 and bits17/16 to 01
617 * This leaves Delay_Req timestamps out.
618 * Enable all events *and* general purpose message
621 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
622 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
623 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
626 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
627 /* PTP v1, UDP, Sync packet */
628 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
629 /* take time stamp for SYNC messages only */
630 ts_event_en = PTP_TCR_TSEVNTENA;
632 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
633 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
637 /* PTP v1, UDP, Delay_req packet */
638 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
639 /* take time stamp for Delay_Req messages only */
640 ts_master_en = PTP_TCR_TSMSTRENA;
641 ts_event_en = PTP_TCR_TSEVNTENA;
643 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
644 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
647 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
648 /* PTP v2, UDP, any kind of event packet */
649 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
650 ptp_v2 = PTP_TCR_TSVER2ENA;
651 /* take time stamp for all event messages */
652 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
654 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
655 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
659 /* PTP v2, UDP, Sync packet */
660 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
661 ptp_v2 = PTP_TCR_TSVER2ENA;
662 /* take time stamp for SYNC messages only */
663 ts_event_en = PTP_TCR_TSEVNTENA;
665 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
666 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
670 /* PTP v2, UDP, Delay_req packet */
671 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
672 ptp_v2 = PTP_TCR_TSVER2ENA;
673 /* take time stamp for Delay_Req messages only */
674 ts_master_en = PTP_TCR_TSMSTRENA;
675 ts_event_en = PTP_TCR_TSEVNTENA;
677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
681 case HWTSTAMP_FILTER_PTP_V2_EVENT:
682 /* PTP v2/802.AS1 any layer, any kind of event packet */
683 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
684 ptp_v2 = PTP_TCR_TSVER2ENA;
685 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
686 if (priv->synopsys_id != DWMAC_CORE_5_10)
687 ts_event_en = PTP_TCR_TSEVNTENA;
688 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 ptp_over_ethernet = PTP_TCR_TSIPENA;
693 case HWTSTAMP_FILTER_PTP_V2_SYNC:
694 /* PTP v2/802.AS1, any layer, Sync packet */
695 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
696 ptp_v2 = PTP_TCR_TSVER2ENA;
697 /* take time stamp for SYNC messages only */
698 ts_event_en = PTP_TCR_TSEVNTENA;
700 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
701 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
702 ptp_over_ethernet = PTP_TCR_TSIPENA;
705 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
706 /* PTP v2/802.AS1, any layer, Delay_req packet */
707 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
708 ptp_v2 = PTP_TCR_TSVER2ENA;
709 /* take time stamp for Delay_Req messages only */
710 ts_master_en = PTP_TCR_TSMSTRENA;
711 ts_event_en = PTP_TCR_TSEVNTENA;
713 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
714 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
715 ptp_over_ethernet = PTP_TCR_TSIPENA;
718 case HWTSTAMP_FILTER_NTP_ALL:
719 case HWTSTAMP_FILTER_ALL:
720 /* time stamp any incoming packet */
721 config.rx_filter = HWTSTAMP_FILTER_ALL;
722 tstamp_all = PTP_TCR_TSENALL;
729 switch (config.rx_filter) {
730 case HWTSTAMP_FILTER_NONE:
731 config.rx_filter = HWTSTAMP_FILTER_NONE;
734 /* PTP v1, UDP, any kind of event packet */
735 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
739 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
740 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
742 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
743 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
745 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
746 tstamp_all | ptp_v2 | ptp_over_ethernet |
747 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
748 ts_master_en | snap_type_sel);
749 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
751 /* program Sub Second Increment reg */
752 stmmac_config_sub_second_increment(priv,
753 priv->ptpaddr, priv->plat->clk_ptp_rate,
755 temp = div_u64(1000000000ULL, sec_inc);
757 /* Store sub second increment and flags for later use */
758 priv->sub_second_inc = sec_inc;
759 priv->systime_flags = value;
761 /* calculate default added value:
763 * addend = (2^32)/freq_div_ratio;
764 * where, freq_div_ratio = 1e9ns/sec_inc
766 temp = (u64)(temp << 32);
767 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
768 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
770 /* initialize system time */
771 ktime_get_real_ts64(&now);
773 /* lower 32 bits of tv_sec are safe until y2106 */
774 stmmac_init_systime(priv, priv->ptpaddr,
775 (u32)now.tv_sec, now.tv_nsec);
778 memcpy(&priv->tstamp_config, &config, sizeof(config));
780 return copy_to_user(ifr->ifr_data, &config,
781 sizeof(config)) ? -EFAULT : 0;
785 * stmmac_hwtstamp_get - read hardware timestamping.
786 * @dev: device pointer.
787 * @ifr: An IOCTL specific structure, that can contain a pointer to
788 * a proprietary structure used to pass information to the driver.
790 * This function obtain the current hardware timestamping settings
793 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
795 struct stmmac_priv *priv = netdev_priv(dev);
796 struct hwtstamp_config *config = &priv->tstamp_config;
798 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
801 return copy_to_user(ifr->ifr_data, config,
802 sizeof(*config)) ? -EFAULT : 0;
806 * stmmac_init_ptp - init PTP
807 * @priv: driver private structure
808 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
809 * This is done by looking at the HW cap. register.
810 * This function also registers the ptp driver.
812 static int stmmac_init_ptp(struct stmmac_priv *priv)
814 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
816 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
820 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
821 if (xmac && priv->dma_cap.atime_stamp)
823 /* Dwmac 3.x core with extend_desc can support adv_ts */
824 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
827 if (priv->dma_cap.time_stamp)
828 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
831 netdev_info(priv->dev,
832 "IEEE 1588-2008 Advanced Timestamp supported\n");
834 priv->hwts_tx_en = 0;
835 priv->hwts_rx_en = 0;
837 stmmac_ptp_register(priv);
842 static void stmmac_release_ptp(struct stmmac_priv *priv)
844 clk_disable_unprepare(priv->plat->clk_ptp_ref);
845 stmmac_ptp_unregister(priv);
849 * stmmac_mac_flow_ctrl - Configure flow control in all queues
850 * @priv: driver private structure
851 * @duplex: duplex passed to the next function
852 * Description: It is used for configuring the flow control in all queues
854 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
856 u32 tx_cnt = priv->plat->tx_queues_to_use;
858 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
859 priv->pause, tx_cnt);
862 static void stmmac_validate(struct phylink_config *config,
863 unsigned long *supported,
864 struct phylink_link_state *state)
866 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
867 __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
868 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
869 int tx_cnt = priv->plat->tx_queues_to_use;
870 int max_speed = priv->plat->max_speed;
872 phylink_set(mac_supported, 10baseT_Half);
873 phylink_set(mac_supported, 10baseT_Full);
874 phylink_set(mac_supported, 100baseT_Half);
875 phylink_set(mac_supported, 100baseT_Full);
876 phylink_set(mac_supported, 1000baseT_Half);
877 phylink_set(mac_supported, 1000baseT_Full);
878 phylink_set(mac_supported, 1000baseKX_Full);
880 phylink_set(mac_supported, Autoneg);
881 phylink_set(mac_supported, Pause);
882 phylink_set(mac_supported, Asym_Pause);
883 phylink_set_port_modes(mac_supported);
885 /* Cut down 1G if asked to */
886 if ((max_speed > 0) && (max_speed < 1000)) {
887 phylink_set(mask, 1000baseT_Full);
888 phylink_set(mask, 1000baseX_Full);
889 } else if (priv->plat->has_xgmac) {
890 if (!max_speed || (max_speed >= 2500)) {
891 phylink_set(mac_supported, 2500baseT_Full);
892 phylink_set(mac_supported, 2500baseX_Full);
894 if (!max_speed || (max_speed >= 5000)) {
895 phylink_set(mac_supported, 5000baseT_Full);
897 if (!max_speed || (max_speed >= 10000)) {
898 phylink_set(mac_supported, 10000baseSR_Full);
899 phylink_set(mac_supported, 10000baseLR_Full);
900 phylink_set(mac_supported, 10000baseER_Full);
901 phylink_set(mac_supported, 10000baseLRM_Full);
902 phylink_set(mac_supported, 10000baseT_Full);
903 phylink_set(mac_supported, 10000baseKX4_Full);
904 phylink_set(mac_supported, 10000baseKR_Full);
906 if (!max_speed || (max_speed >= 25000)) {
907 phylink_set(mac_supported, 25000baseCR_Full);
908 phylink_set(mac_supported, 25000baseKR_Full);
909 phylink_set(mac_supported, 25000baseSR_Full);
911 if (!max_speed || (max_speed >= 40000)) {
912 phylink_set(mac_supported, 40000baseKR4_Full);
913 phylink_set(mac_supported, 40000baseCR4_Full);
914 phylink_set(mac_supported, 40000baseSR4_Full);
915 phylink_set(mac_supported, 40000baseLR4_Full);
917 if (!max_speed || (max_speed >= 50000)) {
918 phylink_set(mac_supported, 50000baseCR2_Full);
919 phylink_set(mac_supported, 50000baseKR2_Full);
920 phylink_set(mac_supported, 50000baseSR2_Full);
921 phylink_set(mac_supported, 50000baseKR_Full);
922 phylink_set(mac_supported, 50000baseSR_Full);
923 phylink_set(mac_supported, 50000baseCR_Full);
924 phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
925 phylink_set(mac_supported, 50000baseDR_Full);
927 if (!max_speed || (max_speed >= 100000)) {
928 phylink_set(mac_supported, 100000baseKR4_Full);
929 phylink_set(mac_supported, 100000baseSR4_Full);
930 phylink_set(mac_supported, 100000baseCR4_Full);
931 phylink_set(mac_supported, 100000baseLR4_ER4_Full);
932 phylink_set(mac_supported, 100000baseKR2_Full);
933 phylink_set(mac_supported, 100000baseSR2_Full);
934 phylink_set(mac_supported, 100000baseCR2_Full);
935 phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
936 phylink_set(mac_supported, 100000baseDR2_Full);
940 /* Half-Duplex can only work with single queue */
942 phylink_set(mask, 10baseT_Half);
943 phylink_set(mask, 100baseT_Half);
944 phylink_set(mask, 1000baseT_Half);
947 linkmode_and(supported, supported, mac_supported);
948 linkmode_andnot(supported, supported, mask);
950 linkmode_and(state->advertising, state->advertising, mac_supported);
951 linkmode_andnot(state->advertising, state->advertising, mask);
953 /* If PCS is supported, check which modes it supports. */
954 stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
957 static void stmmac_mac_pcs_get_state(struct phylink_config *config,
958 struct phylink_link_state *state)
960 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
963 stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
966 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
967 const struct phylink_link_state *state)
969 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
971 stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
974 static void stmmac_mac_an_restart(struct phylink_config *config)
979 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
981 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
982 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
983 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
984 bool *hs_enable = &fpe_cfg->hs_enable;
986 if (is_up && *hs_enable) {
987 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
989 *lo_state = FPE_EVENT_UNKNOWN;
990 *lp_state = FPE_EVENT_UNKNOWN;
994 static void stmmac_mac_link_down(struct phylink_config *config,
995 unsigned int mode, phy_interface_t interface)
997 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
999 stmmac_mac_set(priv, priv->ioaddr, false);
1000 priv->eee_active = false;
1001 priv->tx_lpi_enabled = false;
1002 stmmac_eee_init(priv);
1003 stmmac_set_eee_pls(priv, priv->hw, false);
1005 stmmac_fpe_link_state_handle(priv, false);
1008 static void stmmac_mac_link_up(struct phylink_config *config,
1009 struct phy_device *phy,
1010 unsigned int mode, phy_interface_t interface,
1011 int speed, int duplex,
1012 bool tx_pause, bool rx_pause)
1014 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1017 stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
1019 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1020 ctrl &= ~priv->hw->link.speed_mask;
1022 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1025 ctrl |= priv->hw->link.xgmii.speed10000;
1028 ctrl |= priv->hw->link.xgmii.speed5000;
1031 ctrl |= priv->hw->link.xgmii.speed2500;
1036 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1039 ctrl |= priv->hw->link.xlgmii.speed100000;
1042 ctrl |= priv->hw->link.xlgmii.speed50000;
1045 ctrl |= priv->hw->link.xlgmii.speed40000;
1048 ctrl |= priv->hw->link.xlgmii.speed25000;
1051 ctrl |= priv->hw->link.xgmii.speed10000;
1054 ctrl |= priv->hw->link.speed2500;
1057 ctrl |= priv->hw->link.speed1000;
1065 ctrl |= priv->hw->link.speed2500;
1068 ctrl |= priv->hw->link.speed1000;
1071 ctrl |= priv->hw->link.speed100;
1074 ctrl |= priv->hw->link.speed10;
1081 priv->speed = speed;
1083 if (priv->plat->fix_mac_speed)
1084 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1087 ctrl &= ~priv->hw->link.duplex;
1089 ctrl |= priv->hw->link.duplex;
1091 /* Flow Control operation */
1092 if (tx_pause && rx_pause)
1093 stmmac_mac_flow_ctrl(priv, duplex);
1095 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1097 stmmac_mac_set(priv, priv->ioaddr, true);
1098 if (phy && priv->dma_cap.eee) {
1099 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1100 priv->eee_enabled = stmmac_eee_init(priv);
1101 priv->tx_lpi_enabled = priv->eee_enabled;
1102 stmmac_set_eee_pls(priv, priv->hw, true);
1105 stmmac_fpe_link_state_handle(priv, true);
1108 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1109 .validate = stmmac_validate,
1110 .mac_pcs_get_state = stmmac_mac_pcs_get_state,
1111 .mac_config = stmmac_mac_config,
1112 .mac_an_restart = stmmac_mac_an_restart,
1113 .mac_link_down = stmmac_mac_link_down,
1114 .mac_link_up = stmmac_mac_link_up,
1118 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1119 * @priv: driver private structure
1120 * Description: this is to verify if the HW supports the PCS.
1121 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1122 * configured for the TBI, RTBI, or SGMII PHY interface.
1124 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1126 int interface = priv->plat->interface;
1128 if (priv->dma_cap.pcs) {
1129 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1130 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1131 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1132 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1133 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1134 priv->hw->pcs = STMMAC_PCS_RGMII;
1135 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1136 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1137 priv->hw->pcs = STMMAC_PCS_SGMII;
1143 * stmmac_init_phy - PHY initialization
1144 * @dev: net device structure
1145 * Description: it initializes the driver's PHY state, and attaches the PHY
1146 * to the mac driver.
1150 static int stmmac_init_phy(struct net_device *dev)
1152 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1153 struct stmmac_priv *priv = netdev_priv(dev);
1154 struct device_node *node;
1157 node = priv->plat->phylink_node;
1160 ret = phylink_of_phy_connect(priv->phylink, node, 0);
1162 /* Some DT bindings do not set-up the PHY handle. Let's try to
1166 int addr = priv->plat->phy_addr;
1167 struct phy_device *phydev;
1169 phydev = mdiobus_get_phy(priv->mii, addr);
1171 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1175 ret = phylink_connect_phy(priv->phylink, phydev);
1178 phylink_ethtool_get_wol(priv->phylink, &wol);
1179 device_set_wakeup_capable(priv->device, !!wol.supported);
1184 static int stmmac_phy_setup(struct stmmac_priv *priv)
1186 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1187 int mode = priv->plat->phy_interface;
1188 struct phylink *phylink;
1190 priv->phylink_config.dev = &priv->dev->dev;
1191 priv->phylink_config.type = PHYLINK_NETDEV;
1192 priv->phylink_config.pcs_poll = true;
1193 priv->phylink_config.ovr_an_inband =
1194 priv->plat->mdio_bus_data->xpcs_an_inband;
1197 fwnode = dev_fwnode(priv->device);
1199 phylink = phylink_create(&priv->phylink_config, fwnode,
1200 mode, &stmmac_phylink_mac_ops);
1201 if (IS_ERR(phylink))
1202 return PTR_ERR(phylink);
1204 priv->phylink = phylink;
1208 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1210 u32 rx_cnt = priv->plat->rx_queues_to_use;
1211 unsigned int desc_size;
1215 /* Display RX rings */
1216 for (queue = 0; queue < rx_cnt; queue++) {
1217 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1219 pr_info("\tRX Queue %u rings\n", queue);
1221 if (priv->extend_desc) {
1222 head_rx = (void *)rx_q->dma_erx;
1223 desc_size = sizeof(struct dma_extended_desc);
1225 head_rx = (void *)rx_q->dma_rx;
1226 desc_size = sizeof(struct dma_desc);
1229 /* Display RX ring */
1230 stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1231 rx_q->dma_rx_phy, desc_size);
1235 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1237 u32 tx_cnt = priv->plat->tx_queues_to_use;
1238 unsigned int desc_size;
1242 /* Display TX rings */
1243 for (queue = 0; queue < tx_cnt; queue++) {
1244 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1246 pr_info("\tTX Queue %d rings\n", queue);
1248 if (priv->extend_desc) {
1249 head_tx = (void *)tx_q->dma_etx;
1250 desc_size = sizeof(struct dma_extended_desc);
1251 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1252 head_tx = (void *)tx_q->dma_entx;
1253 desc_size = sizeof(struct dma_edesc);
1255 head_tx = (void *)tx_q->dma_tx;
1256 desc_size = sizeof(struct dma_desc);
1259 stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1260 tx_q->dma_tx_phy, desc_size);
1264 static void stmmac_display_rings(struct stmmac_priv *priv)
1266 /* Display RX ring */
1267 stmmac_display_rx_rings(priv);
1269 /* Display TX ring */
1270 stmmac_display_tx_rings(priv);
1273 static int stmmac_set_bfsize(int mtu, int bufsize)
1277 if (mtu >= BUF_SIZE_8KiB)
1278 ret = BUF_SIZE_16KiB;
1279 else if (mtu >= BUF_SIZE_4KiB)
1280 ret = BUF_SIZE_8KiB;
1281 else if (mtu >= BUF_SIZE_2KiB)
1282 ret = BUF_SIZE_4KiB;
1283 else if (mtu > DEFAULT_BUFSIZE)
1284 ret = BUF_SIZE_2KiB;
1286 ret = DEFAULT_BUFSIZE;
1292 * stmmac_clear_rx_descriptors - clear RX descriptors
1293 * @priv: driver private structure
1294 * @queue: RX queue index
1295 * Description: this function is called to clear the RX descriptors
1296 * in case of both basic and extended descriptors are used.
1298 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1300 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1303 /* Clear the RX descriptors */
1304 for (i = 0; i < priv->dma_rx_size; i++)
1305 if (priv->extend_desc)
1306 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1307 priv->use_riwt, priv->mode,
1308 (i == priv->dma_rx_size - 1),
1311 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1312 priv->use_riwt, priv->mode,
1313 (i == priv->dma_rx_size - 1),
1318 * stmmac_clear_tx_descriptors - clear tx descriptors
1319 * @priv: driver private structure
1320 * @queue: TX queue index.
1321 * Description: this function is called to clear the TX descriptors
1322 * in case of both basic and extended descriptors are used.
1324 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1326 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1329 /* Clear the TX descriptors */
1330 for (i = 0; i < priv->dma_tx_size; i++) {
1331 int last = (i == (priv->dma_tx_size - 1));
1334 if (priv->extend_desc)
1335 p = &tx_q->dma_etx[i].basic;
1336 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1337 p = &tx_q->dma_entx[i].basic;
1339 p = &tx_q->dma_tx[i];
1341 stmmac_init_tx_desc(priv, p, priv->mode, last);
1346 * stmmac_clear_descriptors - clear descriptors
1347 * @priv: driver private structure
1348 * Description: this function is called to clear the TX and RX descriptors
1349 * in case of both basic and extended descriptors are used.
1351 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1353 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1354 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1357 /* Clear the RX descriptors */
1358 for (queue = 0; queue < rx_queue_cnt; queue++)
1359 stmmac_clear_rx_descriptors(priv, queue);
1361 /* Clear the TX descriptors */
1362 for (queue = 0; queue < tx_queue_cnt; queue++)
1363 stmmac_clear_tx_descriptors(priv, queue);
1367 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1368 * @priv: driver private structure
1369 * @p: descriptor pointer
1370 * @i: descriptor index
1372 * @queue: RX queue index
1373 * Description: this function is called to allocate a receive buffer, perform
1374 * the DMA mapping and init the descriptor.
1376 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1377 int i, gfp_t flags, u32 queue)
1379 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1380 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1382 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1387 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1391 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1392 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1394 buf->sec_page = NULL;
1395 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1398 buf->addr = page_pool_get_dma_addr(buf->page);
1399 stmmac_set_desc_addr(priv, p, buf->addr);
1400 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1401 stmmac_init_desc3(priv, p);
1407 * stmmac_free_rx_buffer - free RX dma buffers
1408 * @priv: private structure
1409 * @queue: RX queue index
1412 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1414 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1415 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1418 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1422 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1423 buf->sec_page = NULL;
1427 * stmmac_free_tx_buffer - free RX dma buffers
1428 * @priv: private structure
1429 * @queue: RX queue index
1432 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1434 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1436 if (tx_q->tx_skbuff_dma[i].buf) {
1437 if (tx_q->tx_skbuff_dma[i].map_as_page)
1438 dma_unmap_page(priv->device,
1439 tx_q->tx_skbuff_dma[i].buf,
1440 tx_q->tx_skbuff_dma[i].len,
1443 dma_unmap_single(priv->device,
1444 tx_q->tx_skbuff_dma[i].buf,
1445 tx_q->tx_skbuff_dma[i].len,
1449 if (tx_q->tx_skbuff[i]) {
1450 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1451 tx_q->tx_skbuff[i] = NULL;
1452 tx_q->tx_skbuff_dma[i].buf = 0;
1453 tx_q->tx_skbuff_dma[i].map_as_page = false;
1458 * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
1459 * @priv: driver private structure
1460 * Description: this function is called to re-allocate a receive buffer, perform
1461 * the DMA mapping and init the descriptor.
1463 static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
1465 u32 rx_count = priv->plat->rx_queues_to_use;
1469 for (queue = 0; queue < rx_count; queue++) {
1470 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1472 for (i = 0; i < priv->dma_rx_size; i++) {
1473 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1476 page_pool_recycle_direct(rx_q->page_pool, buf->page);
1480 if (priv->sph && buf->sec_page) {
1481 page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
1482 buf->sec_page = NULL;
1487 for (queue = 0; queue < rx_count; queue++) {
1488 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1490 for (i = 0; i < priv->dma_rx_size; i++) {
1491 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1494 if (priv->extend_desc)
1495 p = &((rx_q->dma_erx + i)->basic);
1497 p = rx_q->dma_rx + i;
1500 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1502 goto err_reinit_rx_buffers;
1504 buf->addr = page_pool_get_dma_addr(buf->page);
1507 if (priv->sph && !buf->sec_page) {
1508 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1510 goto err_reinit_rx_buffers;
1512 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1515 stmmac_set_desc_addr(priv, p, buf->addr);
1517 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1519 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1520 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1521 stmmac_init_desc3(priv, p);
1527 err_reinit_rx_buffers:
1530 stmmac_free_rx_buffer(priv, queue, i);
1535 i = priv->dma_rx_size;
1536 } while (queue-- > 0);
1540 * init_dma_rx_desc_rings - init the RX descriptor rings
1541 * @dev: net device structure
1543 * Description: this function initializes the DMA RX descriptors
1544 * and allocates the socket buffers. It supports the chained and ring
1547 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1549 struct stmmac_priv *priv = netdev_priv(dev);
1550 u32 rx_count = priv->plat->rx_queues_to_use;
1555 /* RX INITIALIZATION */
1556 netif_dbg(priv, probe, priv->dev,
1557 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1559 for (queue = 0; queue < rx_count; queue++) {
1560 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1562 netif_dbg(priv, probe, priv->dev,
1563 "(%s) dma_rx_phy=0x%08x\n", __func__,
1564 (u32)rx_q->dma_rx_phy);
1566 stmmac_clear_rx_descriptors(priv, queue);
1568 for (i = 0; i < priv->dma_rx_size; i++) {
1571 if (priv->extend_desc)
1572 p = &((rx_q->dma_erx + i)->basic);
1574 p = rx_q->dma_rx + i;
1576 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1579 goto err_init_rx_buffers;
1583 rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
1585 /* Setup the chained descriptor addresses */
1586 if (priv->mode == STMMAC_CHAIN_MODE) {
1587 if (priv->extend_desc)
1588 stmmac_mode_init(priv, rx_q->dma_erx,
1590 priv->dma_rx_size, 1);
1592 stmmac_mode_init(priv, rx_q->dma_rx,
1594 priv->dma_rx_size, 0);
1600 err_init_rx_buffers:
1601 while (queue >= 0) {
1603 stmmac_free_rx_buffer(priv, queue, i);
1608 i = priv->dma_rx_size;
1616 * init_dma_tx_desc_rings - init the TX descriptor rings
1617 * @dev: net device structure.
1618 * Description: this function initializes the DMA TX descriptors
1619 * and allocates the socket buffers. It supports the chained and ring
1622 static int init_dma_tx_desc_rings(struct net_device *dev)
1624 struct stmmac_priv *priv = netdev_priv(dev);
1625 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1629 for (queue = 0; queue < tx_queue_cnt; queue++) {
1630 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1632 netif_dbg(priv, probe, priv->dev,
1633 "(%s) dma_tx_phy=0x%08x\n", __func__,
1634 (u32)tx_q->dma_tx_phy);
1636 /* Setup the chained descriptor addresses */
1637 if (priv->mode == STMMAC_CHAIN_MODE) {
1638 if (priv->extend_desc)
1639 stmmac_mode_init(priv, tx_q->dma_etx,
1641 priv->dma_tx_size, 1);
1642 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1643 stmmac_mode_init(priv, tx_q->dma_tx,
1645 priv->dma_tx_size, 0);
1648 for (i = 0; i < priv->dma_tx_size; i++) {
1650 if (priv->extend_desc)
1651 p = &((tx_q->dma_etx + i)->basic);
1652 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1653 p = &((tx_q->dma_entx + i)->basic);
1655 p = tx_q->dma_tx + i;
1657 stmmac_clear_desc(priv, p);
1659 tx_q->tx_skbuff_dma[i].buf = 0;
1660 tx_q->tx_skbuff_dma[i].map_as_page = false;
1661 tx_q->tx_skbuff_dma[i].len = 0;
1662 tx_q->tx_skbuff_dma[i].last_segment = false;
1663 tx_q->tx_skbuff[i] = NULL;
1670 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1677 * init_dma_desc_rings - init the RX/TX descriptor rings
1678 * @dev: net device structure
1680 * Description: this function initializes the DMA RX/TX descriptors
1681 * and allocates the socket buffers. It supports the chained and ring
1684 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1686 struct stmmac_priv *priv = netdev_priv(dev);
1689 ret = init_dma_rx_desc_rings(dev, flags);
1693 ret = init_dma_tx_desc_rings(dev);
1695 stmmac_clear_descriptors(priv);
1697 if (netif_msg_hw(priv))
1698 stmmac_display_rings(priv);
1704 * dma_free_rx_skbufs - free RX dma buffers
1705 * @priv: private structure
1706 * @queue: RX queue index
1708 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1712 for (i = 0; i < priv->dma_rx_size; i++)
1713 stmmac_free_rx_buffer(priv, queue, i);
1717 * dma_free_tx_skbufs - free TX dma buffers
1718 * @priv: private structure
1719 * @queue: TX queue index
1721 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1725 for (i = 0; i < priv->dma_tx_size; i++)
1726 stmmac_free_tx_buffer(priv, queue, i);
1730 * stmmac_free_tx_skbufs - free TX skb buffers
1731 * @priv: private structure
1733 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1735 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1738 for (queue = 0; queue < tx_queue_cnt; queue++)
1739 dma_free_tx_skbufs(priv, queue);
1743 * free_dma_rx_desc_resources - free RX dma desc resources
1744 * @priv: private structure
1746 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1748 u32 rx_count = priv->plat->rx_queues_to_use;
1751 /* Free RX queue resources */
1752 for (queue = 0; queue < rx_count; queue++) {
1753 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1755 /* Release the DMA RX socket buffers */
1756 dma_free_rx_skbufs(priv, queue);
1758 /* Free DMA regions of consistent memory previously allocated */
1759 if (!priv->extend_desc)
1760 dma_free_coherent(priv->device, priv->dma_rx_size *
1761 sizeof(struct dma_desc),
1762 rx_q->dma_rx, rx_q->dma_rx_phy);
1764 dma_free_coherent(priv->device, priv->dma_rx_size *
1765 sizeof(struct dma_extended_desc),
1766 rx_q->dma_erx, rx_q->dma_rx_phy);
1768 kfree(rx_q->buf_pool);
1769 if (rx_q->page_pool)
1770 page_pool_destroy(rx_q->page_pool);
1775 * free_dma_tx_desc_resources - free TX dma desc resources
1776 * @priv: private structure
1778 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1780 u32 tx_count = priv->plat->tx_queues_to_use;
1783 /* Free TX queue resources */
1784 for (queue = 0; queue < tx_count; queue++) {
1785 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1789 /* Release the DMA TX socket buffers */
1790 dma_free_tx_skbufs(priv, queue);
1792 if (priv->extend_desc) {
1793 size = sizeof(struct dma_extended_desc);
1794 addr = tx_q->dma_etx;
1795 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1796 size = sizeof(struct dma_edesc);
1797 addr = tx_q->dma_entx;
1799 size = sizeof(struct dma_desc);
1800 addr = tx_q->dma_tx;
1803 size *= priv->dma_tx_size;
1805 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1807 kfree(tx_q->tx_skbuff_dma);
1808 kfree(tx_q->tx_skbuff);
1813 * alloc_dma_rx_desc_resources - alloc RX resources.
1814 * @priv: private structure
1815 * Description: according to which descriptor can be used (extend or basic)
1816 * this function allocates the resources for TX and RX paths. In case of
1817 * reception, for example, it pre-allocated the RX socket buffer in order to
1818 * allow zero-copy mechanism.
1820 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1822 u32 rx_count = priv->plat->rx_queues_to_use;
1826 /* RX queues buffers and DMA */
1827 for (queue = 0; queue < rx_count; queue++) {
1828 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1829 struct page_pool_params pp_params = { 0 };
1830 unsigned int num_pages;
1832 rx_q->queue_index = queue;
1833 rx_q->priv_data = priv;
1835 pp_params.flags = PP_FLAG_DMA_MAP;
1836 pp_params.pool_size = priv->dma_rx_size;
1837 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1838 pp_params.order = ilog2(num_pages);
1839 pp_params.nid = dev_to_node(priv->device);
1840 pp_params.dev = priv->device;
1841 pp_params.dma_dir = DMA_FROM_DEVICE;
1843 rx_q->page_pool = page_pool_create(&pp_params);
1844 if (IS_ERR(rx_q->page_pool)) {
1845 ret = PTR_ERR(rx_q->page_pool);
1846 rx_q->page_pool = NULL;
1850 rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1851 sizeof(*rx_q->buf_pool),
1853 if (!rx_q->buf_pool)
1856 if (priv->extend_desc) {
1857 rx_q->dma_erx = dma_alloc_coherent(priv->device,
1859 sizeof(struct dma_extended_desc),
1866 rx_q->dma_rx = dma_alloc_coherent(priv->device,
1868 sizeof(struct dma_desc),
1879 free_dma_rx_desc_resources(priv);
1885 * alloc_dma_tx_desc_resources - alloc TX resources.
1886 * @priv: private structure
1887 * Description: according to which descriptor can be used (extend or basic)
1888 * this function allocates the resources for TX and RX paths. In case of
1889 * reception, for example, it pre-allocated the RX socket buffer in order to
1890 * allow zero-copy mechanism.
1892 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1894 u32 tx_count = priv->plat->tx_queues_to_use;
1898 /* TX queues buffers and DMA */
1899 for (queue = 0; queue < tx_count; queue++) {
1900 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1904 tx_q->queue_index = queue;
1905 tx_q->priv_data = priv;
1907 tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1908 sizeof(*tx_q->tx_skbuff_dma),
1910 if (!tx_q->tx_skbuff_dma)
1913 tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1914 sizeof(struct sk_buff *),
1916 if (!tx_q->tx_skbuff)
1919 if (priv->extend_desc)
1920 size = sizeof(struct dma_extended_desc);
1921 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1922 size = sizeof(struct dma_edesc);
1924 size = sizeof(struct dma_desc);
1926 size *= priv->dma_tx_size;
1928 addr = dma_alloc_coherent(priv->device, size,
1929 &tx_q->dma_tx_phy, GFP_KERNEL);
1933 if (priv->extend_desc)
1934 tx_q->dma_etx = addr;
1935 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1936 tx_q->dma_entx = addr;
1938 tx_q->dma_tx = addr;
1944 free_dma_tx_desc_resources(priv);
1949 * alloc_dma_desc_resources - alloc TX/RX resources.
1950 * @priv: private structure
1951 * Description: according to which descriptor can be used (extend or basic)
1952 * this function allocates the resources for TX and RX paths. In case of
1953 * reception, for example, it pre-allocated the RX socket buffer in order to
1954 * allow zero-copy mechanism.
1956 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1959 int ret = alloc_dma_rx_desc_resources(priv);
1964 ret = alloc_dma_tx_desc_resources(priv);
1970 * free_dma_desc_resources - free dma desc resources
1971 * @priv: private structure
1973 static void free_dma_desc_resources(struct stmmac_priv *priv)
1975 /* Release the DMA RX socket buffers */
1976 free_dma_rx_desc_resources(priv);
1978 /* Release the DMA TX socket buffers */
1979 free_dma_tx_desc_resources(priv);
1983 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1984 * @priv: driver private structure
1985 * Description: It is used for enabling the rx queues in the MAC
1987 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1989 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1993 for (queue = 0; queue < rx_queues_count; queue++) {
1994 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1995 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2000 * stmmac_start_rx_dma - start RX DMA channel
2001 * @priv: driver private structure
2002 * @chan: RX channel index
2004 * This starts a RX DMA channel
2006 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2008 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2009 stmmac_start_rx(priv, priv->ioaddr, chan);
2013 * stmmac_start_tx_dma - start TX DMA channel
2014 * @priv: driver private structure
2015 * @chan: TX channel index
2017 * This starts a TX DMA channel
2019 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2021 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2022 stmmac_start_tx(priv, priv->ioaddr, chan);
2026 * stmmac_stop_rx_dma - stop RX DMA channel
2027 * @priv: driver private structure
2028 * @chan: RX channel index
2030 * This stops a RX DMA channel
2032 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2034 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2035 stmmac_stop_rx(priv, priv->ioaddr, chan);
2039 * stmmac_stop_tx_dma - stop TX DMA channel
2040 * @priv: driver private structure
2041 * @chan: TX channel index
2043 * This stops a TX DMA channel
2045 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2047 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2048 stmmac_stop_tx(priv, priv->ioaddr, chan);
2052 * stmmac_start_all_dma - start all RX and TX DMA channels
2053 * @priv: driver private structure
2055 * This starts all the RX and TX DMA channels
2057 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2059 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2060 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2063 for (chan = 0; chan < rx_channels_count; chan++)
2064 stmmac_start_rx_dma(priv, chan);
2066 for (chan = 0; chan < tx_channels_count; chan++)
2067 stmmac_start_tx_dma(priv, chan);
2071 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2072 * @priv: driver private structure
2074 * This stops the RX and TX DMA channels
2076 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2078 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2079 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2082 for (chan = 0; chan < rx_channels_count; chan++)
2083 stmmac_stop_rx_dma(priv, chan);
2085 for (chan = 0; chan < tx_channels_count; chan++)
2086 stmmac_stop_tx_dma(priv, chan);
2090 * stmmac_dma_operation_mode - HW DMA operation mode
2091 * @priv: driver private structure
2092 * Description: it is used for configuring the DMA operation mode register in
2093 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2095 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2097 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2098 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2099 int rxfifosz = priv->plat->rx_fifo_size;
2100 int txfifosz = priv->plat->tx_fifo_size;
2107 rxfifosz = priv->dma_cap.rx_fifo_size;
2109 txfifosz = priv->dma_cap.tx_fifo_size;
2111 /* Adjust for real per queue fifo size */
2112 rxfifosz /= rx_channels_count;
2113 txfifosz /= tx_channels_count;
2115 if (priv->plat->force_thresh_dma_mode) {
2118 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2120 * In case of GMAC, SF mode can be enabled
2121 * to perform the TX COE in HW. This depends on:
2122 * 1) TX COE if actually supported
2123 * 2) There is no bugged Jumbo frame support
2124 * that needs to not insert csum in the TDES.
2126 txmode = SF_DMA_MODE;
2127 rxmode = SF_DMA_MODE;
2128 priv->xstats.threshold = SF_DMA_MODE;
2131 rxmode = SF_DMA_MODE;
2134 /* configure all channels */
2135 for (chan = 0; chan < rx_channels_count; chan++) {
2136 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2138 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2140 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
2144 for (chan = 0; chan < tx_channels_count; chan++) {
2145 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2147 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2153 * stmmac_tx_clean - to manage the transmission completion
2154 * @priv: driver private structure
2155 * @budget: napi budget limiting this functions packet handling
2156 * @queue: TX queue index
2157 * Description: it reclaims the transmit resources after transmission completes.
2159 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2161 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2162 unsigned int bytes_compl = 0, pkts_compl = 0;
2163 unsigned int entry, count = 0;
2165 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2167 priv->xstats.tx_clean++;
2169 entry = tx_q->dirty_tx;
2170 while ((entry != tx_q->cur_tx) && (count < budget)) {
2171 struct sk_buff *skb = tx_q->tx_skbuff[entry];
2175 if (priv->extend_desc)
2176 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2177 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2178 p = &tx_q->dma_entx[entry].basic;
2180 p = tx_q->dma_tx + entry;
2182 status = stmmac_tx_status(priv, &priv->dev->stats,
2183 &priv->xstats, p, priv->ioaddr);
2184 /* Check if the descriptor is owned by the DMA */
2185 if (unlikely(status & tx_dma_own))
2190 /* Make sure descriptor fields are read after reading
2195 /* Just consider the last segment and ...*/
2196 if (likely(!(status & tx_not_ls))) {
2197 /* ... verify the status error condition */
2198 if (unlikely(status & tx_err)) {
2199 priv->dev->stats.tx_errors++;
2201 priv->dev->stats.tx_packets++;
2202 priv->xstats.tx_pkt_n++;
2204 stmmac_get_tx_hwtstamp(priv, p, skb);
2207 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
2208 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2209 dma_unmap_page(priv->device,
2210 tx_q->tx_skbuff_dma[entry].buf,
2211 tx_q->tx_skbuff_dma[entry].len,
2214 dma_unmap_single(priv->device,
2215 tx_q->tx_skbuff_dma[entry].buf,
2216 tx_q->tx_skbuff_dma[entry].len,
2218 tx_q->tx_skbuff_dma[entry].buf = 0;
2219 tx_q->tx_skbuff_dma[entry].len = 0;
2220 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2223 stmmac_clean_desc3(priv, tx_q, p);
2225 tx_q->tx_skbuff_dma[entry].last_segment = false;
2226 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2228 if (likely(skb != NULL)) {
2230 bytes_compl += skb->len;
2231 dev_consume_skb_any(skb);
2232 tx_q->tx_skbuff[entry] = NULL;
2235 stmmac_release_tx_desc(priv, p, priv->mode);
2237 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2239 tx_q->dirty_tx = entry;
2241 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2242 pkts_compl, bytes_compl);
2244 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2246 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2248 netif_dbg(priv, tx_done, priv->dev,
2249 "%s: restart transmit\n", __func__);
2250 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2253 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2254 priv->eee_sw_timer_en) {
2255 stmmac_enable_eee_mode(priv);
2256 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2259 /* We still have pending packets, let's call for a new scheduling */
2260 if (tx_q->dirty_tx != tx_q->cur_tx)
2261 hrtimer_start(&tx_q->txtimer,
2262 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2265 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2271 * stmmac_tx_err - to manage the tx error
2272 * @priv: driver private structure
2273 * @chan: channel index
2274 * Description: it cleans the descriptors and restarts the transmission
2275 * in case of transmission errors.
2277 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2279 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2281 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2283 stmmac_stop_tx_dma(priv, chan);
2284 dma_free_tx_skbufs(priv, chan);
2285 stmmac_clear_tx_descriptors(priv, chan);
2289 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2290 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2291 tx_q->dma_tx_phy, chan);
2292 stmmac_start_tx_dma(priv, chan);
2294 priv->dev->stats.tx_errors++;
2295 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2299 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2300 * @priv: driver private structure
2301 * @txmode: TX operating mode
2302 * @rxmode: RX operating mode
2303 * @chan: channel index
2304 * Description: it is used for configuring of the DMA operation mode in
2305 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2308 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2309 u32 rxmode, u32 chan)
2311 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2312 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2313 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2314 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2315 int rxfifosz = priv->plat->rx_fifo_size;
2316 int txfifosz = priv->plat->tx_fifo_size;
2319 rxfifosz = priv->dma_cap.rx_fifo_size;
2321 txfifosz = priv->dma_cap.tx_fifo_size;
2323 /* Adjust for real per queue fifo size */
2324 rxfifosz /= rx_channels_count;
2325 txfifosz /= tx_channels_count;
2327 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2328 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2331 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2335 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2336 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2337 if (ret && (ret != -EINVAL)) {
2338 stmmac_global_err(priv);
2345 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2347 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2348 &priv->xstats, chan, dir);
2349 struct stmmac_channel *ch = &priv->channel[chan];
2350 unsigned long flags;
2352 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2353 if (napi_schedule_prep(&ch->rx_napi)) {
2354 spin_lock_irqsave(&ch->lock, flags);
2355 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2356 spin_unlock_irqrestore(&ch->lock, flags);
2357 __napi_schedule(&ch->rx_napi);
2361 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2362 if (napi_schedule_prep(&ch->tx_napi)) {
2363 spin_lock_irqsave(&ch->lock, flags);
2364 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2365 spin_unlock_irqrestore(&ch->lock, flags);
2366 __napi_schedule(&ch->tx_napi);
2374 * stmmac_dma_interrupt - DMA ISR
2375 * @priv: driver private structure
2376 * Description: this is the DMA ISR. It is called by the main ISR.
2377 * It calls the dwmac dma routine and schedule poll method in case of some
2380 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2382 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2383 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2384 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2385 tx_channel_count : rx_channel_count;
2387 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2389 /* Make sure we never check beyond our status buffer. */
2390 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2391 channels_to_check = ARRAY_SIZE(status);
2393 for (chan = 0; chan < channels_to_check; chan++)
2394 status[chan] = stmmac_napi_check(priv, chan,
2397 for (chan = 0; chan < tx_channel_count; chan++) {
2398 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2399 /* Try to bump up the dma threshold on this failure */
2400 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2403 if (priv->plat->force_thresh_dma_mode)
2404 stmmac_set_dma_operation_mode(priv,
2409 stmmac_set_dma_operation_mode(priv,
2413 priv->xstats.threshold = tc;
2415 } else if (unlikely(status[chan] == tx_hard_error)) {
2416 stmmac_tx_err(priv, chan);
2422 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2423 * @priv: driver private structure
2424 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2426 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2428 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2429 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2431 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2433 if (priv->dma_cap.rmon) {
2434 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2435 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2437 netdev_info(priv->dev, "No MAC Management Counters available\n");
2441 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2442 * @priv: driver private structure
2444 * new GMAC chip generations have a new register to indicate the
2445 * presence of the optional feature/functions.
2446 * This can be also used to override the value passed through the
2447 * platform and necessary for old MAC10/100 and GMAC chips.
2449 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2451 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2455 * stmmac_check_ether_addr - check if the MAC addr is valid
2456 * @priv: driver private structure
2458 * it is to verify if the MAC address is valid, in case of failures it
2459 * generates a random MAC address
2461 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2463 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2464 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2465 if (!is_valid_ether_addr(priv->dev->dev_addr))
2466 eth_hw_addr_random(priv->dev);
2467 dev_info(priv->device, "device MAC address %pM\n",
2468 priv->dev->dev_addr);
2473 * stmmac_init_dma_engine - DMA init.
2474 * @priv: driver private structure
2476 * It inits the DMA invoking the specific MAC/GMAC callback.
2477 * Some DMA parameters can be passed from the platform;
2478 * in case of these are not passed a default is kept for the MAC or GMAC.
2480 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2482 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2483 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2484 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2485 struct stmmac_rx_queue *rx_q;
2486 struct stmmac_tx_queue *tx_q;
2491 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2492 dev_err(priv->device, "Invalid DMA configuration\n");
2496 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2499 ret = stmmac_reset(priv, priv->ioaddr);
2501 dev_err(priv->device, "Failed to reset the dma\n");
2505 /* DMA Configuration */
2506 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2508 if (priv->plat->axi)
2509 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2511 /* DMA CSR Channel configuration */
2512 for (chan = 0; chan < dma_csr_ch; chan++)
2513 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2515 /* DMA RX Channel Configuration */
2516 for (chan = 0; chan < rx_channels_count; chan++) {
2517 rx_q = &priv->rx_queue[chan];
2519 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2520 rx_q->dma_rx_phy, chan);
2522 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2523 (priv->dma_rx_size *
2524 sizeof(struct dma_desc));
2525 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2526 rx_q->rx_tail_addr, chan);
2529 /* DMA TX Channel Configuration */
2530 for (chan = 0; chan < tx_channels_count; chan++) {
2531 tx_q = &priv->tx_queue[chan];
2533 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2534 tx_q->dma_tx_phy, chan);
2536 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2537 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2538 tx_q->tx_tail_addr, chan);
2544 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2546 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2548 hrtimer_start(&tx_q->txtimer,
2549 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2554 * stmmac_tx_timer - mitigation sw timer for tx.
2557 * This is the timer handler to directly invoke the stmmac_tx_clean.
2559 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2561 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2562 struct stmmac_priv *priv = tx_q->priv_data;
2563 struct stmmac_channel *ch;
2565 ch = &priv->channel[tx_q->queue_index];
2567 if (likely(napi_schedule_prep(&ch->tx_napi))) {
2568 unsigned long flags;
2570 spin_lock_irqsave(&ch->lock, flags);
2571 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2572 spin_unlock_irqrestore(&ch->lock, flags);
2573 __napi_schedule(&ch->tx_napi);
2576 return HRTIMER_NORESTART;
2580 * stmmac_init_coalesce - init mitigation options.
2581 * @priv: driver private structure
2583 * This inits the coalesce parameters: i.e. timer rate,
2584 * timer handler and default threshold used for enabling the
2585 * interrupt on completion bit.
2587 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2589 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2590 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2593 for (chan = 0; chan < tx_channel_count; chan++) {
2594 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2596 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
2597 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
2599 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2600 tx_q->txtimer.function = stmmac_tx_timer;
2603 for (chan = 0; chan < rx_channel_count; chan++)
2604 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
2607 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2609 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2610 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2613 /* set TX ring length */
2614 for (chan = 0; chan < tx_channels_count; chan++)
2615 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2616 (priv->dma_tx_size - 1), chan);
2618 /* set RX ring length */
2619 for (chan = 0; chan < rx_channels_count; chan++)
2620 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2621 (priv->dma_rx_size - 1), chan);
2625 * stmmac_set_tx_queue_weight - Set TX queue weight
2626 * @priv: driver private structure
2627 * Description: It is used for setting TX queues weight
2629 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2631 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2635 for (queue = 0; queue < tx_queues_count; queue++) {
2636 weight = priv->plat->tx_queues_cfg[queue].weight;
2637 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2642 * stmmac_configure_cbs - Configure CBS in TX queue
2643 * @priv: driver private structure
2644 * Description: It is used for configuring CBS in AVB TX queues
2646 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2648 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2652 /* queue 0 is reserved for legacy traffic */
2653 for (queue = 1; queue < tx_queues_count; queue++) {
2654 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2655 if (mode_to_use == MTL_QUEUE_DCB)
2658 stmmac_config_cbs(priv, priv->hw,
2659 priv->plat->tx_queues_cfg[queue].send_slope,
2660 priv->plat->tx_queues_cfg[queue].idle_slope,
2661 priv->plat->tx_queues_cfg[queue].high_credit,
2662 priv->plat->tx_queues_cfg[queue].low_credit,
2668 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2669 * @priv: driver private structure
2670 * Description: It is used for mapping RX queues to RX dma channels
2672 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2674 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2678 for (queue = 0; queue < rx_queues_count; queue++) {
2679 chan = priv->plat->rx_queues_cfg[queue].chan;
2680 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2685 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2686 * @priv: driver private structure
2687 * Description: It is used for configuring the RX Queue Priority
2689 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2691 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2695 for (queue = 0; queue < rx_queues_count; queue++) {
2696 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2699 prio = priv->plat->rx_queues_cfg[queue].prio;
2700 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2705 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2706 * @priv: driver private structure
2707 * Description: It is used for configuring the TX Queue Priority
2709 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2711 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2715 for (queue = 0; queue < tx_queues_count; queue++) {
2716 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2719 prio = priv->plat->tx_queues_cfg[queue].prio;
2720 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2725 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2726 * @priv: driver private structure
2727 * Description: It is used for configuring the RX queue routing
2729 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2731 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2735 for (queue = 0; queue < rx_queues_count; queue++) {
2736 /* no specific packet type routing specified for the queue */
2737 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2740 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2741 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2745 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2747 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2748 priv->rss.enable = false;
2752 if (priv->dev->features & NETIF_F_RXHASH)
2753 priv->rss.enable = true;
2755 priv->rss.enable = false;
2757 stmmac_rss_configure(priv, priv->hw, &priv->rss,
2758 priv->plat->rx_queues_to_use);
2762 * stmmac_mtl_configuration - Configure MTL
2763 * @priv: driver private structure
2764 * Description: It is used for configurring MTL
2766 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2768 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2769 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2771 if (tx_queues_count > 1)
2772 stmmac_set_tx_queue_weight(priv);
2774 /* Configure MTL RX algorithms */
2775 if (rx_queues_count > 1)
2776 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2777 priv->plat->rx_sched_algorithm);
2779 /* Configure MTL TX algorithms */
2780 if (tx_queues_count > 1)
2781 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2782 priv->plat->tx_sched_algorithm);
2784 /* Configure CBS in AVB TX queues */
2785 if (tx_queues_count > 1)
2786 stmmac_configure_cbs(priv);
2788 /* Map RX MTL to DMA channels */
2789 stmmac_rx_queue_dma_chan_map(priv);
2791 /* Enable MAC RX Queues */
2792 stmmac_mac_enable_rx_queues(priv);
2794 /* Set RX priorities */
2795 if (rx_queues_count > 1)
2796 stmmac_mac_config_rx_queues_prio(priv);
2798 /* Set TX priorities */
2799 if (tx_queues_count > 1)
2800 stmmac_mac_config_tx_queues_prio(priv);
2802 /* Set RX routing */
2803 if (rx_queues_count > 1)
2804 stmmac_mac_config_rx_queues_routing(priv);
2806 /* Receive Side Scaling */
2807 if (rx_queues_count > 1)
2808 stmmac_mac_config_rss(priv);
2811 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2813 if (priv->dma_cap.asp) {
2814 netdev_info(priv->dev, "Enabling Safety Features\n");
2815 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2817 netdev_info(priv->dev, "No Safety Features support found\n");
2821 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
2825 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
2827 name = priv->wq_name;
2828 sprintf(name, "%s-fpe", priv->dev->name);
2830 priv->fpe_wq = create_singlethread_workqueue(name);
2831 if (!priv->fpe_wq) {
2832 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
2836 netdev_info(priv->dev, "FPE workqueue start");
2842 * stmmac_hw_setup - setup mac in a usable state.
2843 * @dev : pointer to the device structure.
2844 * @init_ptp: initialize PTP if set
2846 * this is the main function to setup the HW in a usable state because the
2847 * dma engine is reset, the core registers are configured (e.g. AXI,
2848 * Checksum features, timers). The DMA is ready to start receiving and
2851 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2854 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2856 struct stmmac_priv *priv = netdev_priv(dev);
2857 u32 rx_cnt = priv->plat->rx_queues_to_use;
2858 u32 tx_cnt = priv->plat->tx_queues_to_use;
2862 /* DMA initialization and SW reset */
2863 ret = stmmac_init_dma_engine(priv);
2865 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2870 /* Copy the MAC addr into the HW */
2871 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2873 /* PS and related bits will be programmed according to the speed */
2874 if (priv->hw->pcs) {
2875 int speed = priv->plat->mac_port_sel_speed;
2877 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2878 (speed == SPEED_1000)) {
2879 priv->hw->ps = speed;
2881 dev_warn(priv->device, "invalid port speed\n");
2886 /* Initialize the MAC Core */
2887 stmmac_core_init(priv, priv->hw, dev);
2890 stmmac_mtl_configuration(priv);
2892 /* Initialize Safety Features */
2893 stmmac_safety_feat_configuration(priv);
2895 ret = stmmac_rx_ipc(priv, priv->hw);
2897 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2898 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2899 priv->hw->rx_csum = 0;
2902 /* Enable the MAC Rx/Tx */
2903 stmmac_mac_set(priv, priv->ioaddr, true);
2905 /* Set the HW DMA mode and the COE */
2906 stmmac_dma_operation_mode(priv);
2908 stmmac_mmc_setup(priv);
2911 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2913 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2915 ret = stmmac_init_ptp(priv);
2916 if (ret == -EOPNOTSUPP)
2917 netdev_warn(priv->dev, "PTP not supported by HW\n");
2919 netdev_warn(priv->dev, "PTP init failed\n");
2922 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2924 /* Convert the timer from msec to usec */
2925 if (!priv->tx_lpi_timer)
2926 priv->tx_lpi_timer = eee_timer * 1000;
2928 if (priv->use_riwt) {
2931 for (queue = 0; queue < rx_cnt; queue++) {
2932 if (!priv->rx_riwt[queue])
2933 priv->rx_riwt[queue] = DEF_DMA_RIWT;
2935 stmmac_rx_watchdog(priv, priv->ioaddr,
2936 priv->rx_riwt[queue], queue);
2941 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2943 /* set TX and RX rings length */
2944 stmmac_set_rings_length(priv);
2948 for (chan = 0; chan < tx_cnt; chan++)
2949 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2952 /* Enable Split Header */
2953 if (priv->sph && priv->hw->rx_csum) {
2954 for (chan = 0; chan < rx_cnt; chan++)
2955 stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2958 /* VLAN Tag Insertion */
2959 if (priv->dma_cap.vlins)
2960 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2963 for (chan = 0; chan < tx_cnt; chan++) {
2964 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2965 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2967 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2970 /* Configure real RX and TX queues */
2971 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
2972 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
2974 /* Start the ball rolling... */
2975 stmmac_start_all_dma(priv);
2977 if (priv->dma_cap.fpesel) {
2978 stmmac_fpe_start_wq(priv);
2980 if (priv->plat->fpe_cfg->enable)
2981 stmmac_fpe_handshake(priv, true);
2987 static void stmmac_hw_teardown(struct net_device *dev)
2989 struct stmmac_priv *priv = netdev_priv(dev);
2991 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2994 static void stmmac_free_irq(struct net_device *dev,
2995 enum request_irq_err irq_err, int irq_idx)
2997 struct stmmac_priv *priv = netdev_priv(dev);
3001 case REQ_IRQ_ERR_ALL:
3002 irq_idx = priv->plat->tx_queues_to_use;
3004 case REQ_IRQ_ERR_TX:
3005 for (j = irq_idx - 1; j >= 0; j--) {
3006 if (priv->tx_irq[j] > 0)
3007 free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
3009 irq_idx = priv->plat->rx_queues_to_use;
3011 case REQ_IRQ_ERR_RX:
3012 for (j = irq_idx - 1; j >= 0; j--) {
3013 if (priv->rx_irq[j] > 0)
3014 free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
3017 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3018 free_irq(priv->sfty_ue_irq, dev);
3020 case REQ_IRQ_ERR_SFTY_UE:
3021 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3022 free_irq(priv->sfty_ce_irq, dev);
3024 case REQ_IRQ_ERR_SFTY_CE:
3025 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3026 free_irq(priv->lpi_irq, dev);
3028 case REQ_IRQ_ERR_LPI:
3029 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3030 free_irq(priv->wol_irq, dev);
3032 case REQ_IRQ_ERR_WOL:
3033 free_irq(dev->irq, dev);
3035 case REQ_IRQ_ERR_MAC:
3036 case REQ_IRQ_ERR_NO:
3037 /* If MAC IRQ request error, no more IRQ to free */
3042 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3044 enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
3045 struct stmmac_priv *priv = netdev_priv(dev);
3051 /* For common interrupt */
3052 int_name = priv->int_name_mac;
3053 sprintf(int_name, "%s:%s", dev->name, "mac");
3054 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3056 if (unlikely(ret < 0)) {
3057 netdev_err(priv->dev,
3058 "%s: alloc mac MSI %d (error: %d)\n",
3059 __func__, dev->irq, ret);
3060 irq_err = REQ_IRQ_ERR_MAC;
3064 /* Request the Wake IRQ in case of another line
3067 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3068 int_name = priv->int_name_wol;
3069 sprintf(int_name, "%s:%s", dev->name, "wol");
3070 ret = request_irq(priv->wol_irq,
3071 stmmac_mac_interrupt,
3073 if (unlikely(ret < 0)) {
3074 netdev_err(priv->dev,
3075 "%s: alloc wol MSI %d (error: %d)\n",
3076 __func__, priv->wol_irq, ret);
3077 irq_err = REQ_IRQ_ERR_WOL;
3082 /* Request the LPI IRQ in case of another line
3085 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3086 int_name = priv->int_name_lpi;
3087 sprintf(int_name, "%s:%s", dev->name, "lpi");
3088 ret = request_irq(priv->lpi_irq,
3089 stmmac_mac_interrupt,
3091 if (unlikely(ret < 0)) {
3092 netdev_err(priv->dev,
3093 "%s: alloc lpi MSI %d (error: %d)\n",
3094 __func__, priv->lpi_irq, ret);
3095 irq_err = REQ_IRQ_ERR_LPI;
3100 /* Request the Safety Feature Correctible Error line in
3101 * case of another line is used
3103 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3104 int_name = priv->int_name_sfty_ce;
3105 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3106 ret = request_irq(priv->sfty_ce_irq,
3107 stmmac_safety_interrupt,
3109 if (unlikely(ret < 0)) {
3110 netdev_err(priv->dev,
3111 "%s: alloc sfty ce MSI %d (error: %d)\n",
3112 __func__, priv->sfty_ce_irq, ret);
3113 irq_err = REQ_IRQ_ERR_SFTY_CE;
3118 /* Request the Safety Feature Uncorrectible Error line in
3119 * case of another line is used
3121 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3122 int_name = priv->int_name_sfty_ue;
3123 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3124 ret = request_irq(priv->sfty_ue_irq,
3125 stmmac_safety_interrupt,
3127 if (unlikely(ret < 0)) {
3128 netdev_err(priv->dev,
3129 "%s: alloc sfty ue MSI %d (error: %d)\n",
3130 __func__, priv->sfty_ue_irq, ret);
3131 irq_err = REQ_IRQ_ERR_SFTY_UE;
3136 /* Request Rx MSI irq */
3137 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3138 if (priv->rx_irq[i] == 0)
3141 int_name = priv->int_name_rx_irq[i];
3142 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3143 ret = request_irq(priv->rx_irq[i],
3145 0, int_name, &priv->rx_queue[i]);
3146 if (unlikely(ret < 0)) {
3147 netdev_err(priv->dev,
3148 "%s: alloc rx-%d MSI %d (error: %d)\n",
3149 __func__, i, priv->rx_irq[i], ret);
3150 irq_err = REQ_IRQ_ERR_RX;
3156 /* Request Tx MSI irq */
3157 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3158 if (priv->tx_irq[i] == 0)
3161 int_name = priv->int_name_tx_irq[i];
3162 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3163 ret = request_irq(priv->tx_irq[i],
3165 0, int_name, &priv->tx_queue[i]);
3166 if (unlikely(ret < 0)) {
3167 netdev_err(priv->dev,
3168 "%s: alloc tx-%d MSI %d (error: %d)\n",
3169 __func__, i, priv->tx_irq[i], ret);
3170 irq_err = REQ_IRQ_ERR_TX;
3179 stmmac_free_irq(dev, irq_err, irq_idx);
3183 static int stmmac_request_irq_single(struct net_device *dev)
3185 enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
3186 struct stmmac_priv *priv = netdev_priv(dev);
3189 ret = request_irq(dev->irq, stmmac_interrupt,
3190 IRQF_SHARED, dev->name, dev);
3191 if (unlikely(ret < 0)) {
3192 netdev_err(priv->dev,
3193 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3194 __func__, dev->irq, ret);
3195 irq_err = REQ_IRQ_ERR_MAC;
3199 /* Request the Wake IRQ in case of another line
3202 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3203 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3204 IRQF_SHARED, dev->name, dev);
3205 if (unlikely(ret < 0)) {
3206 netdev_err(priv->dev,
3207 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3208 __func__, priv->wol_irq, ret);
3209 irq_err = REQ_IRQ_ERR_WOL;
3214 /* Request the IRQ lines */
3215 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3216 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3217 IRQF_SHARED, dev->name, dev);
3218 if (unlikely(ret < 0)) {
3219 netdev_err(priv->dev,
3220 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3221 __func__, priv->lpi_irq, ret);
3222 irq_err = REQ_IRQ_ERR_LPI;
3230 stmmac_free_irq(dev, irq_err, 0);
3234 static int stmmac_request_irq(struct net_device *dev)
3236 struct stmmac_priv *priv = netdev_priv(dev);
3239 /* Request the IRQ lines */
3240 if (priv->plat->multi_msi_en)
3241 ret = stmmac_request_irq_multi_msi(dev);
3243 ret = stmmac_request_irq_single(dev);
3249 * stmmac_open - open entry point of the driver
3250 * @dev : pointer to the device structure.
3252 * This function is the open entry point of the driver.
3254 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3257 static int stmmac_open(struct net_device *dev)
3259 struct stmmac_priv *priv = netdev_priv(dev);
3264 ret = pm_runtime_get_sync(priv->device);
3266 pm_runtime_put_noidle(priv->device);
3270 if (priv->hw->pcs != STMMAC_PCS_TBI &&
3271 priv->hw->pcs != STMMAC_PCS_RTBI &&
3272 priv->hw->xpcs_args.an_mode != DW_AN_C73) {
3273 ret = stmmac_init_phy(dev);
3275 netdev_err(priv->dev,
3276 "%s: Cannot attach to PHY (error: %d)\n",
3278 goto init_phy_error;
3282 /* Extra statistics */
3283 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3284 priv->xstats.threshold = tc;
3286 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
3290 if (bfsize < BUF_SIZE_16KiB)
3291 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
3293 priv->dma_buf_sz = bfsize;
3296 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3298 if (!priv->dma_tx_size)
3299 priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3300 if (!priv->dma_rx_size)
3301 priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3303 /* Earlier check for TBS */
3304 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3305 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3306 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3308 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3309 if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
3310 tx_q->tbs &= ~STMMAC_TBS_AVAIL;
3313 ret = alloc_dma_desc_resources(priv);
3315 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3317 goto dma_desc_error;
3320 ret = init_dma_desc_rings(dev, GFP_KERNEL);
3322 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3327 ret = stmmac_hw_setup(dev, true);
3329 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3333 stmmac_init_coalesce(priv);
3335 phylink_start(priv->phylink);
3336 /* We may have called phylink_speed_down before */
3337 phylink_speed_up(priv->phylink);
3339 ret = stmmac_request_irq(dev);
3343 stmmac_enable_all_queues(priv);
3344 netif_tx_start_all_queues(priv->dev);
3349 phylink_stop(priv->phylink);
3351 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3352 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3354 stmmac_hw_teardown(dev);
3356 free_dma_desc_resources(priv);
3358 phylink_disconnect_phy(priv->phylink);
3360 pm_runtime_put(priv->device);
3364 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3366 set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3369 destroy_workqueue(priv->fpe_wq);
3371 netdev_info(priv->dev, "FPE workqueue stop");
3375 * stmmac_release - close entry point of the driver
3376 * @dev : device pointer.
3378 * This is the stop entry point of the driver.
3380 static int stmmac_release(struct net_device *dev)
3382 struct stmmac_priv *priv = netdev_priv(dev);
3385 if (device_may_wakeup(priv->device))
3386 phylink_speed_down(priv->phylink, false);
3387 /* Stop and disconnect the PHY */
3388 phylink_stop(priv->phylink);
3389 phylink_disconnect_phy(priv->phylink);
3391 stmmac_disable_all_queues(priv);
3393 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3394 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3396 /* Free the IRQ lines */
3397 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3399 if (priv->eee_enabled) {
3400 priv->tx_path_in_lpi_mode = false;
3401 del_timer_sync(&priv->eee_ctrl_timer);
3404 /* Stop TX/RX DMA and clear the descriptors */
3405 stmmac_stop_all_dma(priv);
3407 /* Release and free the Rx/Tx resources */
3408 free_dma_desc_resources(priv);
3410 /* Disable the MAC Rx/Tx */
3411 stmmac_mac_set(priv, priv->ioaddr, false);
3413 netif_carrier_off(dev);
3415 stmmac_release_ptp(priv);
3417 pm_runtime_put(priv->device);
3419 if (priv->dma_cap.fpesel)
3420 stmmac_fpe_stop_wq(priv);
3425 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3426 struct stmmac_tx_queue *tx_q)
3428 u16 tag = 0x0, inner_tag = 0x0;
3429 u32 inner_type = 0x0;
3432 if (!priv->dma_cap.vlins)
3434 if (!skb_vlan_tag_present(skb))
3436 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3437 inner_tag = skb_vlan_tag_get(skb);
3438 inner_type = STMMAC_VLAN_INSERT;
3441 tag = skb_vlan_tag_get(skb);
3443 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3444 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3446 p = &tx_q->dma_tx[tx_q->cur_tx];
3448 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3451 stmmac_set_tx_owner(priv, p);
3452 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3457 * stmmac_tso_allocator - close entry point of the driver
3458 * @priv: driver private structure
3459 * @des: buffer start address
3460 * @total_len: total length to fill in descriptors
3461 * @last_segment: condition for the last descriptor
3462 * @queue: TX queue index
3464 * This function fills descriptor and request new descriptors according to
3465 * buffer length to fill
3467 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3468 int total_len, bool last_segment, u32 queue)
3470 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3471 struct dma_desc *desc;
3475 tmp_len = total_len;
3477 while (tmp_len > 0) {
3478 dma_addr_t curr_addr;
3480 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3482 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3484 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3485 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3487 desc = &tx_q->dma_tx[tx_q->cur_tx];
3489 curr_addr = des + (total_len - tmp_len);
3490 if (priv->dma_cap.addr64 <= 32)
3491 desc->des0 = cpu_to_le32(curr_addr);
3493 stmmac_set_desc_addr(priv, desc, curr_addr);
3495 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3496 TSO_MAX_BUFF_SIZE : tmp_len;
3498 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3500 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3503 tmp_len -= TSO_MAX_BUFF_SIZE;
3508 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3509 * @skb : the socket buffer
3510 * @dev : device pointer
3511 * Description: this is the transmit function that is called on TSO frames
3512 * (support available on GMAC4 and newer chips).
3513 * Diagram below show the ring programming in case of TSO frames:
3517 * | DES0 |---> buffer1 = L2/L3/L4 header
3518 * | DES1 |---> TCP Payload (can continue on next descr...)
3519 * | DES2 |---> buffer 1 and 2 len
3520 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3526 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
3528 * | DES2 | --> buffer 1 and 2 len
3532 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3534 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3536 struct dma_desc *desc, *first, *mss_desc = NULL;
3537 struct stmmac_priv *priv = netdev_priv(dev);
3538 int desc_size, tmp_pay_len = 0, first_tx;
3539 int nfrags = skb_shinfo(skb)->nr_frags;
3540 u32 queue = skb_get_queue_mapping(skb);
3541 unsigned int first_entry, tx_packets;
3542 struct stmmac_tx_queue *tx_q;
3543 bool has_vlan, set_ic;
3544 u8 proto_hdr_len, hdr;
3549 tx_q = &priv->tx_queue[queue];
3550 first_tx = tx_q->cur_tx;
3552 /* Compute header lengths */
3553 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3554 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3555 hdr = sizeof(struct udphdr);
3557 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3558 hdr = tcp_hdrlen(skb);
3561 /* Desc availability based on threshold should be enough safe */
3562 if (unlikely(stmmac_tx_avail(priv, queue) <
3563 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3564 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3565 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3567 /* This is a hard error, log it. */
3568 netdev_err(priv->dev,
3569 "%s: Tx Ring full when queue awake\n",
3572 return NETDEV_TX_BUSY;
3575 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3577 mss = skb_shinfo(skb)->gso_size;
3579 /* set new MSS value if needed */
3580 if (mss != tx_q->mss) {
3581 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3582 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3584 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3586 stmmac_set_mss(priv, mss_desc, mss);
3588 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3590 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3593 if (netif_msg_tx_queued(priv)) {
3594 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3595 __func__, hdr, proto_hdr_len, pay_len, mss);
3596 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3600 /* Check if VLAN can be inserted by HW */
3601 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3603 first_entry = tx_q->cur_tx;
3604 WARN_ON(tx_q->tx_skbuff[first_entry]);
3606 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3607 desc = &tx_q->dma_entx[first_entry].basic;
3609 desc = &tx_q->dma_tx[first_entry];
3613 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3615 /* first descriptor: fill Headers on Buf1 */
3616 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3618 if (dma_mapping_error(priv->device, des))
3621 tx_q->tx_skbuff_dma[first_entry].buf = des;
3622 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
3624 if (priv->dma_cap.addr64 <= 32) {
3625 first->des0 = cpu_to_le32(des);
3627 /* Fill start of payload in buff2 of first descriptor */
3629 first->des1 = cpu_to_le32(des + proto_hdr_len);
3631 /* If needed take extra descriptors to fill the remaining payload */
3632 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3634 stmmac_set_desc_addr(priv, first, des);
3635 tmp_pay_len = pay_len;
3636 des += proto_hdr_len;
3640 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3642 /* Prepare fragments */
3643 for (i = 0; i < nfrags; i++) {
3644 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3646 des = skb_frag_dma_map(priv->device, frag, 0,
3647 skb_frag_size(frag),
3649 if (dma_mapping_error(priv->device, des))
3652 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3653 (i == nfrags - 1), queue);
3655 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3656 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3657 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3660 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3662 /* Only the last descriptor gets to point to the skb. */
3663 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3665 /* Manage tx mitigation */
3666 tx_packets = (tx_q->cur_tx + 1) - first_tx;
3667 tx_q->tx_count_frames += tx_packets;
3669 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3671 else if (!priv->tx_coal_frames[queue])
3673 else if (tx_packets > priv->tx_coal_frames[queue])
3675 else if ((tx_q->tx_count_frames %
3676 priv->tx_coal_frames[queue]) < tx_packets)
3682 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3683 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3685 desc = &tx_q->dma_tx[tx_q->cur_tx];
3687 tx_q->tx_count_frames = 0;
3688 stmmac_set_tx_ic(priv, desc);
3689 priv->xstats.tx_set_ic_bit++;
3692 /* We've used all descriptors we need for this skb, however,
3693 * advance cur_tx so that it references a fresh descriptor.
3694 * ndo_start_xmit will fill this descriptor the next time it's
3695 * called and stmmac_tx_clean may clean up to this descriptor.
3697 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3699 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3700 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3702 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3705 dev->stats.tx_bytes += skb->len;
3706 priv->xstats.tx_tso_frames++;
3707 priv->xstats.tx_tso_nfrags += nfrags;
3709 if (priv->sarc_type)
3710 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3712 skb_tx_timestamp(skb);
3714 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3715 priv->hwts_tx_en)) {
3716 /* declare that device is doing timestamping */
3717 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3718 stmmac_enable_tx_timestamp(priv, first);
3721 /* Complete the first descriptor before granting the DMA */
3722 stmmac_prepare_tso_tx_desc(priv, first, 1,
3725 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3726 hdr / 4, (skb->len - proto_hdr_len));
3728 /* If context desc is used to change MSS */
3730 /* Make sure that first descriptor has been completely
3731 * written, including its own bit. This is because MSS is
3732 * actually before first descriptor, so we need to make
3733 * sure that MSS's own bit is the last thing written.
3736 stmmac_set_tx_owner(priv, mss_desc);
3739 /* The own bit must be the latest setting done when prepare the
3740 * descriptor and then barrier is needed to make sure that
3741 * all is coherent before granting the DMA engine.
3745 if (netif_msg_pktdata(priv)) {
3746 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3747 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3748 tx_q->cur_tx, first, nfrags);
3749 pr_info(">>> frame to be transmitted: ");
3750 print_pkt(skb->data, skb_headlen(skb));
3753 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3755 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3756 desc_size = sizeof(struct dma_edesc);
3758 desc_size = sizeof(struct dma_desc);
3760 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3761 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3762 stmmac_tx_timer_arm(priv, queue);
3764 return NETDEV_TX_OK;
3767 dev_err(priv->device, "Tx dma map failed\n");
3769 priv->dev->stats.tx_dropped++;
3770 return NETDEV_TX_OK;
3774 * stmmac_xmit - Tx entry point of the driver
3775 * @skb : the socket buffer
3776 * @dev : device pointer
3777 * Description : this is the tx entry point of the driver.
3778 * It programs the chain or the ring and supports oversized frames
3781 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3783 unsigned int first_entry, tx_packets, enh_desc;
3784 struct stmmac_priv *priv = netdev_priv(dev);
3785 unsigned int nopaged_len = skb_headlen(skb);
3786 int i, csum_insertion = 0, is_jumbo = 0;
3787 u32 queue = skb_get_queue_mapping(skb);
3788 int nfrags = skb_shinfo(skb)->nr_frags;
3789 int gso = skb_shinfo(skb)->gso_type;
3790 struct dma_edesc *tbs_desc = NULL;
3791 int entry, desc_size, first_tx;
3792 struct dma_desc *desc, *first;
3793 struct stmmac_tx_queue *tx_q;
3794 bool has_vlan, set_ic;
3797 tx_q = &priv->tx_queue[queue];
3798 first_tx = tx_q->cur_tx;
3800 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
3801 stmmac_disable_eee_mode(priv);
3803 /* Manage oversized TCP frames for GMAC4 device */
3804 if (skb_is_gso(skb) && priv->tso) {
3805 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3806 return stmmac_tso_xmit(skb, dev);
3807 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3808 return stmmac_tso_xmit(skb, dev);
3811 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3812 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3813 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3815 /* This is a hard error, log it. */
3816 netdev_err(priv->dev,
3817 "%s: Tx Ring full when queue awake\n",
3820 return NETDEV_TX_BUSY;
3823 /* Check if VLAN can be inserted by HW */
3824 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3826 entry = tx_q->cur_tx;
3827 first_entry = entry;
3828 WARN_ON(tx_q->tx_skbuff[first_entry]);
3830 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3832 if (likely(priv->extend_desc))
3833 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3834 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3835 desc = &tx_q->dma_entx[entry].basic;
3837 desc = tx_q->dma_tx + entry;
3842 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3844 enh_desc = priv->plat->enh_desc;
3845 /* To program the descriptors according to the size of the frame */
3847 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3849 if (unlikely(is_jumbo)) {
3850 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3851 if (unlikely(entry < 0) && (entry != -EINVAL))
3855 for (i = 0; i < nfrags; i++) {
3856 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3857 int len = skb_frag_size(frag);
3858 bool last_segment = (i == (nfrags - 1));
3860 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3861 WARN_ON(tx_q->tx_skbuff[entry]);
3863 if (likely(priv->extend_desc))
3864 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3865 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3866 desc = &tx_q->dma_entx[entry].basic;
3868 desc = tx_q->dma_tx + entry;
3870 des = skb_frag_dma_map(priv->device, frag, 0, len,
3872 if (dma_mapping_error(priv->device, des))
3873 goto dma_map_err; /* should reuse desc w/o issues */
3875 tx_q->tx_skbuff_dma[entry].buf = des;
3877 stmmac_set_desc_addr(priv, desc, des);
3879 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3880 tx_q->tx_skbuff_dma[entry].len = len;
3881 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3883 /* Prepare the descriptor and set the own bit too */
3884 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3885 priv->mode, 1, last_segment, skb->len);
3888 /* Only the last descriptor gets to point to the skb. */
3889 tx_q->tx_skbuff[entry] = skb;
3891 /* According to the coalesce parameter the IC bit for the latest
3892 * segment is reset and the timer re-started to clean the tx status.
3893 * This approach takes care about the fragments: desc is the first
3894 * element in case of no SG.
3896 tx_packets = (entry + 1) - first_tx;
3897 tx_q->tx_count_frames += tx_packets;
3899 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3901 else if (!priv->tx_coal_frames[queue])
3903 else if (tx_packets > priv->tx_coal_frames[queue])
3905 else if ((tx_q->tx_count_frames %
3906 priv->tx_coal_frames[queue]) < tx_packets)
3912 if (likely(priv->extend_desc))
3913 desc = &tx_q->dma_etx[entry].basic;
3914 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3915 desc = &tx_q->dma_entx[entry].basic;
3917 desc = &tx_q->dma_tx[entry];
3919 tx_q->tx_count_frames = 0;
3920 stmmac_set_tx_ic(priv, desc);
3921 priv->xstats.tx_set_ic_bit++;
3924 /* We've used all descriptors we need for this skb, however,
3925 * advance cur_tx so that it references a fresh descriptor.
3926 * ndo_start_xmit will fill this descriptor the next time it's
3927 * called and stmmac_tx_clean may clean up to this descriptor.
3929 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3930 tx_q->cur_tx = entry;
3932 if (netif_msg_pktdata(priv)) {
3933 netdev_dbg(priv->dev,
3934 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3935 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3936 entry, first, nfrags);
3938 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3939 print_pkt(skb->data, skb->len);
3942 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3943 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3945 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3948 dev->stats.tx_bytes += skb->len;
3950 if (priv->sarc_type)
3951 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3953 skb_tx_timestamp(skb);
3955 /* Ready to fill the first descriptor and set the OWN bit w/o any
3956 * problems because all the descriptors are actually ready to be
3957 * passed to the DMA engine.
3959 if (likely(!is_jumbo)) {
3960 bool last_segment = (nfrags == 0);
3962 des = dma_map_single(priv->device, skb->data,
3963 nopaged_len, DMA_TO_DEVICE);
3964 if (dma_mapping_error(priv->device, des))
3967 tx_q->tx_skbuff_dma[first_entry].buf = des;
3969 stmmac_set_desc_addr(priv, first, des);
3971 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3972 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3974 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3975 priv->hwts_tx_en)) {
3976 /* declare that device is doing timestamping */
3977 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3978 stmmac_enable_tx_timestamp(priv, first);
3981 /* Prepare the first descriptor setting the OWN bit too */
3982 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3983 csum_insertion, priv->mode, 0, last_segment,
3987 if (tx_q->tbs & STMMAC_TBS_EN) {
3988 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3990 tbs_desc = &tx_q->dma_entx[first_entry];
3991 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3994 stmmac_set_tx_owner(priv, first);
3996 /* The own bit must be the latest setting done when prepare the
3997 * descriptor and then barrier is needed to make sure that
3998 * all is coherent before granting the DMA engine.
4002 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4004 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4006 if (likely(priv->extend_desc))
4007 desc_size = sizeof(struct dma_extended_desc);
4008 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4009 desc_size = sizeof(struct dma_edesc);
4011 desc_size = sizeof(struct dma_desc);
4013 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4014 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4015 stmmac_tx_timer_arm(priv, queue);
4017 return NETDEV_TX_OK;
4020 netdev_err(priv->dev, "Tx DMA map failed\n");
4022 priv->dev->stats.tx_dropped++;
4023 return NETDEV_TX_OK;
4026 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4028 struct vlan_ethhdr *veth;
4032 veth = (struct vlan_ethhdr *)skb->data;
4033 vlan_proto = veth->h_vlan_proto;
4035 if ((vlan_proto == htons(ETH_P_8021Q) &&
4036 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4037 (vlan_proto == htons(ETH_P_8021AD) &&
4038 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4039 /* pop the vlan tag */
4040 vlanid = ntohs(veth->h_vlan_TCI);
4041 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4042 skb_pull(skb, VLAN_HLEN);
4043 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4048 * stmmac_rx_refill - refill used skb preallocated buffers
4049 * @priv: driver private structure
4050 * @queue: RX queue index
4051 * Description : this is to reallocate the skb for the reception process
4052 * that is based on zero-copy.
4054 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4056 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4057 int len, dirty = stmmac_rx_dirty(priv, queue);
4058 unsigned int entry = rx_q->dirty_rx;
4060 len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
4062 while (dirty-- > 0) {
4063 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4067 if (priv->extend_desc)
4068 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4070 p = rx_q->dma_rx + entry;
4073 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
4078 if (priv->sph && !buf->sec_page) {
4079 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
4083 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4085 dma_sync_single_for_device(priv->device, buf->sec_addr,
4086 len, DMA_FROM_DEVICE);
4089 buf->addr = page_pool_get_dma_addr(buf->page);
4091 /* Sync whole allocation to device. This will invalidate old
4094 dma_sync_single_for_device(priv->device, buf->addr, len,
4097 stmmac_set_desc_addr(priv, p, buf->addr);
4099 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4101 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4102 stmmac_refill_desc3(priv, rx_q, p);
4104 rx_q->rx_count_frames++;
4105 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4106 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4107 rx_q->rx_count_frames = 0;
4109 use_rx_wd = !priv->rx_coal_frames[queue];
4110 use_rx_wd |= rx_q->rx_count_frames > 0;
4111 if (!priv->use_riwt)
4115 stmmac_set_rx_owner(priv, p, use_rx_wd);
4117 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4119 rx_q->dirty_rx = entry;
4120 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4121 (rx_q->dirty_rx * sizeof(struct dma_desc));
4122 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4125 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4127 int status, unsigned int len)
4129 unsigned int plen = 0, hlen = 0;
4130 int coe = priv->hw->rx_csum;
4132 /* Not first descriptor, buffer is always zero */
4133 if (priv->sph && len)
4136 /* First descriptor, get split header length */
4137 stmmac_get_rx_header_len(priv, p, &hlen);
4138 if (priv->sph && hlen) {
4139 priv->xstats.rx_split_hdr_pkt_n++;
4143 /* First descriptor, not last descriptor and not split header */
4144 if (status & rx_not_ls)
4145 return priv->dma_buf_sz;
4147 plen = stmmac_get_rx_frame_len(priv, p, coe);
4149 /* First descriptor and last descriptor and not split header */
4150 return min_t(unsigned int, priv->dma_buf_sz, plen);
4153 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4155 int status, unsigned int len)
4157 int coe = priv->hw->rx_csum;
4158 unsigned int plen = 0;
4160 /* Not split header, buffer is not available */
4164 /* Not last descriptor */
4165 if (status & rx_not_ls)
4166 return priv->dma_buf_sz;
4168 plen = stmmac_get_rx_frame_len(priv, p, coe);
4170 /* Last descriptor */
4175 * stmmac_rx - manage the receive process
4176 * @priv: driver private structure
4177 * @limit: napi bugget
4178 * @queue: RX queue index.
4179 * Description : this the function called by the napi poll method.
4180 * It gets all the frames inside the ring.
4182 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
4184 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4185 struct stmmac_channel *ch = &priv->channel[queue];
4186 unsigned int count = 0, error = 0, len = 0;
4187 int status = 0, coe = priv->hw->rx_csum;
4188 unsigned int next_entry = rx_q->cur_rx;
4189 unsigned int desc_size;
4190 struct sk_buff *skb = NULL;
4192 if (netif_msg_rx_status(priv)) {
4195 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4196 if (priv->extend_desc) {
4197 rx_head = (void *)rx_q->dma_erx;
4198 desc_size = sizeof(struct dma_extended_desc);
4200 rx_head = (void *)rx_q->dma_rx;
4201 desc_size = sizeof(struct dma_desc);
4204 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4205 rx_q->dma_rx_phy, desc_size);
4207 while (count < limit) {
4208 unsigned int buf1_len = 0, buf2_len = 0;
4209 enum pkt_hash_types hash_type;
4210 struct stmmac_rx_buffer *buf;
4211 struct dma_desc *np, *p;
4215 if (!count && rx_q->state_saved) {
4216 skb = rx_q->state.skb;
4217 error = rx_q->state.error;
4218 len = rx_q->state.len;
4220 rx_q->state_saved = false;
4233 buf = &rx_q->buf_pool[entry];
4235 if (priv->extend_desc)
4236 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4238 p = rx_q->dma_rx + entry;
4240 /* read the status of the incoming frame */
4241 status = stmmac_rx_status(priv, &priv->dev->stats,
4243 /* check if managed by the DMA otherwise go ahead */
4244 if (unlikely(status & dma_own))
4247 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4249 next_entry = rx_q->cur_rx;
4251 if (priv->extend_desc)
4252 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4254 np = rx_q->dma_rx + next_entry;
4258 if (priv->extend_desc)
4259 stmmac_rx_extended_status(priv, &priv->dev->stats,
4260 &priv->xstats, rx_q->dma_erx + entry);
4261 if (unlikely(status == discard_frame)) {
4262 page_pool_recycle_direct(rx_q->page_pool, buf->page);
4265 if (!priv->hwts_rx_en)
4266 priv->dev->stats.rx_errors++;
4269 if (unlikely(error && (status & rx_not_ls)))
4271 if (unlikely(error)) {
4278 /* Buffer is good. Go on. */
4280 prefetch(page_address(buf->page));
4282 prefetch(page_address(buf->sec_page));
4284 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
4286 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
4289 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
4290 * Type frames (LLC/LLC-SNAP)
4292 * llc_snap is never checked in GMAC >= 4, so this ACS
4293 * feature is always disabled and packets need to be
4294 * stripped manually.
4296 if (likely(!(status & rx_not_ls)) &&
4297 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
4298 unlikely(status != llc_snap))) {
4300 buf2_len -= ETH_FCS_LEN;
4302 buf1_len -= ETH_FCS_LEN;
4308 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
4310 priv->dev->stats.rx_dropped++;
4315 dma_sync_single_for_cpu(priv->device, buf->addr,
4316 buf1_len, DMA_FROM_DEVICE);
4317 skb_copy_to_linear_data(skb, page_address(buf->page),
4319 skb_put(skb, buf1_len);
4321 /* Data payload copied into SKB, page ready for recycle */
4322 page_pool_recycle_direct(rx_q->page_pool, buf->page);
4324 } else if (buf1_len) {
4325 dma_sync_single_for_cpu(priv->device, buf->addr,
4326 buf1_len, DMA_FROM_DEVICE);
4327 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
4328 buf->page, 0, buf1_len,
4331 /* Data payload appended into SKB */
4332 page_pool_release_page(rx_q->page_pool, buf->page);
4337 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
4338 buf2_len, DMA_FROM_DEVICE);
4339 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
4340 buf->sec_page, 0, buf2_len,
4343 /* Data payload appended into SKB */
4344 page_pool_release_page(rx_q->page_pool, buf->sec_page);
4345 buf->sec_page = NULL;
4349 if (likely(status & rx_not_ls))
4354 /* Got entire packet into SKB. Finish it. */
4356 stmmac_get_rx_hwtstamp(priv, p, np, skb);
4357 stmmac_rx_vlan(priv->dev, skb);
4358 skb->protocol = eth_type_trans(skb, priv->dev);
4361 skb_checksum_none_assert(skb);
4363 skb->ip_summed = CHECKSUM_UNNECESSARY;
4365 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4366 skb_set_hash(skb, hash, hash_type);
4368 skb_record_rx_queue(skb, queue);
4369 napi_gro_receive(&ch->rx_napi, skb);
4372 priv->dev->stats.rx_packets++;
4373 priv->dev->stats.rx_bytes += len;
4377 if (status & rx_not_ls || skb) {
4378 rx_q->state_saved = true;
4379 rx_q->state.skb = skb;
4380 rx_q->state.error = error;
4381 rx_q->state.len = len;
4384 stmmac_rx_refill(priv, queue);
4386 priv->xstats.rx_pkt_n += count;
4391 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
4393 struct stmmac_channel *ch =
4394 container_of(napi, struct stmmac_channel, rx_napi);
4395 struct stmmac_priv *priv = ch->priv_data;
4396 u32 chan = ch->index;
4399 priv->xstats.napi_poll++;
4401 work_done = stmmac_rx(priv, budget, chan);
4402 if (work_done < budget && napi_complete_done(napi, work_done)) {
4403 unsigned long flags;
4405 spin_lock_irqsave(&ch->lock, flags);
4406 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
4407 spin_unlock_irqrestore(&ch->lock, flags);
4413 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
4415 struct stmmac_channel *ch =
4416 container_of(napi, struct stmmac_channel, tx_napi);
4417 struct stmmac_priv *priv = ch->priv_data;
4418 u32 chan = ch->index;
4421 priv->xstats.napi_poll++;
4423 work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
4424 work_done = min(work_done, budget);
4426 if (work_done < budget && napi_complete_done(napi, work_done)) {
4427 unsigned long flags;
4429 spin_lock_irqsave(&ch->lock, flags);
4430 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
4431 spin_unlock_irqrestore(&ch->lock, flags);
4439 * @dev : Pointer to net device structure
4440 * @txqueue: the index of the hanging transmit queue
4441 * Description: this function is called when a packet transmission fails to
4442 * complete within a reasonable time. The driver will mark the error in the
4443 * netdev structure and arrange for the device to be reset to a sane state
4444 * in order to transmit a new packet.
4446 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
4448 struct stmmac_priv *priv = netdev_priv(dev);
4450 stmmac_global_err(priv);
4454 * stmmac_set_rx_mode - entry point for multicast addressing
4455 * @dev : pointer to the device structure
4457 * This function is a driver entry point which gets called by the kernel
4458 * whenever multicast addresses must be enabled/disabled.
4462 static void stmmac_set_rx_mode(struct net_device *dev)
4464 struct stmmac_priv *priv = netdev_priv(dev);
4466 stmmac_set_filter(priv, priv->hw, dev);
4470 * stmmac_change_mtu - entry point to change MTU size for the device.
4471 * @dev : device pointer.
4472 * @new_mtu : the new MTU size for the device.
4473 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
4474 * to drive packet transmission. Ethernet has an MTU of 1500 octets
4475 * (ETH_DATA_LEN). This value can be changed with ifconfig.
4477 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4480 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
4482 struct stmmac_priv *priv = netdev_priv(dev);
4483 int txfifosz = priv->plat->tx_fifo_size;
4484 const int mtu = new_mtu;
4487 txfifosz = priv->dma_cap.tx_fifo_size;
4489 txfifosz /= priv->plat->tx_queues_to_use;
4491 if (netif_running(dev)) {
4492 netdev_err(priv->dev, "must be stopped to change its MTU\n");
4496 new_mtu = STMMAC_ALIGN(new_mtu);
4498 /* If condition true, FIFO is too small or MTU too large */
4499 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
4504 netdev_update_features(dev);
4509 static netdev_features_t stmmac_fix_features(struct net_device *dev,
4510 netdev_features_t features)
4512 struct stmmac_priv *priv = netdev_priv(dev);
4514 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
4515 features &= ~NETIF_F_RXCSUM;
4517 if (!priv->plat->tx_coe)
4518 features &= ~NETIF_F_CSUM_MASK;
4520 /* Some GMAC devices have a bugged Jumbo frame support that
4521 * needs to have the Tx COE disabled for oversized frames
4522 * (due to limited buffer sizes). In this case we disable
4523 * the TX csum insertion in the TDES and not use SF.
4525 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4526 features &= ~NETIF_F_CSUM_MASK;
4528 /* Disable tso if asked by ethtool */
4529 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4530 if (features & NETIF_F_TSO)
4539 static int stmmac_set_features(struct net_device *netdev,
4540 netdev_features_t features)
4542 struct stmmac_priv *priv = netdev_priv(netdev);
4546 /* Keep the COE Type in case of csum is supporting */
4547 if (features & NETIF_F_RXCSUM)
4548 priv->hw->rx_csum = priv->plat->rx_coe;
4550 priv->hw->rx_csum = 0;
4551 /* No check needed because rx_coe has been set before and it will be
4552 * fixed in case of issue.
4554 stmmac_rx_ipc(priv, priv->hw);
4556 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
4557 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
4558 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
4563 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
4565 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
4566 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
4567 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
4568 bool *hs_enable = &fpe_cfg->hs_enable;
4570 if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
4573 /* If LP has sent verify mPacket, LP is FPE capable */
4574 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
4575 if (*lp_state < FPE_STATE_CAPABLE)
4576 *lp_state = FPE_STATE_CAPABLE;
4578 /* If user has requested FPE enable, quickly response */
4580 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
4584 /* If Local has sent verify mPacket, Local is FPE capable */
4585 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
4586 if (*lo_state < FPE_STATE_CAPABLE)
4587 *lo_state = FPE_STATE_CAPABLE;
4590 /* If LP has sent response mPacket, LP is entering FPE ON */
4591 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
4592 *lp_state = FPE_STATE_ENTERING_ON;
4594 /* If Local has sent response mPacket, Local is entering FPE ON */
4595 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
4596 *lo_state = FPE_STATE_ENTERING_ON;
4598 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
4599 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
4601 queue_work(priv->fpe_wq, &priv->fpe_task);
4605 static void stmmac_common_interrupt(struct stmmac_priv *priv)
4607 u32 rx_cnt = priv->plat->rx_queues_to_use;
4608 u32 tx_cnt = priv->plat->tx_queues_to_use;
4613 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
4614 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
4617 pm_wakeup_event(priv->device, 0);
4619 if (priv->dma_cap.estsel)
4620 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
4621 &priv->xstats, tx_cnt);
4623 if (priv->dma_cap.fpesel) {
4624 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
4627 stmmac_fpe_event_status(priv, status);
4630 /* To handle GMAC own interrupts */
4631 if ((priv->plat->has_gmac) || xmac) {
4632 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
4635 if (unlikely(status)) {
4636 /* For LPI we need to save the tx status */
4637 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4638 priv->tx_path_in_lpi_mode = true;
4639 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4640 priv->tx_path_in_lpi_mode = false;
4643 for (queue = 0; queue < queues_count; queue++) {
4644 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4646 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
4648 if (mtl_status != -EINVAL)
4649 status |= mtl_status;
4651 if (status & CORE_IRQ_MTL_RX_OVERFLOW)
4652 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
4657 /* PCS link status */
4658 if (priv->hw->pcs) {
4659 if (priv->xstats.pcs_link)
4660 netif_carrier_on(priv->dev);
4662 netif_carrier_off(priv->dev);
4668 * stmmac_interrupt - main ISR
4669 * @irq: interrupt number.
4670 * @dev_id: to pass the net device pointer.
4671 * Description: this is the main driver interrupt service routine.
4673 * o DMA service routine (to manage incoming frame reception and transmission
4675 * o Core interrupts to manage: remote wake-up, management counter, LPI
4678 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
4680 struct net_device *dev = (struct net_device *)dev_id;
4681 struct stmmac_priv *priv = netdev_priv(dev);
4683 /* Check if adapter is up */
4684 if (test_bit(STMMAC_DOWN, &priv->state))
4687 /* Check if a fatal error happened */
4688 if (stmmac_safety_feat_interrupt(priv))
4691 /* To handle Common interrupts */
4692 stmmac_common_interrupt(priv);
4694 /* To handle DMA interrupts */
4695 stmmac_dma_interrupt(priv);
4700 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
4702 struct net_device *dev = (struct net_device *)dev_id;
4703 struct stmmac_priv *priv = netdev_priv(dev);
4705 if (unlikely(!dev)) {
4706 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
4710 /* Check if adapter is up */
4711 if (test_bit(STMMAC_DOWN, &priv->state))
4714 /* To handle Common interrupts */
4715 stmmac_common_interrupt(priv);
4720 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
4722 struct net_device *dev = (struct net_device *)dev_id;
4723 struct stmmac_priv *priv = netdev_priv(dev);
4725 if (unlikely(!dev)) {
4726 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
4730 /* Check if adapter is up */
4731 if (test_bit(STMMAC_DOWN, &priv->state))
4734 /* Check if a fatal error happened */
4735 stmmac_safety_feat_interrupt(priv);
4740 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
4742 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
4743 int chan = tx_q->queue_index;
4744 struct stmmac_priv *priv;
4747 priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
4749 if (unlikely(!data)) {
4750 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
4754 /* Check if adapter is up */
4755 if (test_bit(STMMAC_DOWN, &priv->state))
4758 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
4760 if (unlikely(status & tx_hard_error_bump_tc)) {
4761 /* Try to bump up the dma threshold on this failure */
4762 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
4765 if (priv->plat->force_thresh_dma_mode)
4766 stmmac_set_dma_operation_mode(priv,
4771 stmmac_set_dma_operation_mode(priv,
4775 priv->xstats.threshold = tc;
4777 } else if (unlikely(status == tx_hard_error)) {
4778 stmmac_tx_err(priv, chan);
4784 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
4786 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
4787 int chan = rx_q->queue_index;
4788 struct stmmac_priv *priv;
4790 priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
4792 if (unlikely(!data)) {
4793 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
4797 /* Check if adapter is up */
4798 if (test_bit(STMMAC_DOWN, &priv->state))
4801 stmmac_napi_check(priv, chan, DMA_DIR_RX);
4806 #ifdef CONFIG_NET_POLL_CONTROLLER
4807 /* Polling receive - used by NETCONSOLE and other diagnostic tools
4808 * to allow network I/O with interrupts disabled.
4810 static void stmmac_poll_controller(struct net_device *dev)
4812 struct stmmac_priv *priv = netdev_priv(dev);
4815 /* If adapter is down, do nothing */
4816 if (test_bit(STMMAC_DOWN, &priv->state))
4819 if (priv->plat->multi_msi_en) {
4820 for (i = 0; i < priv->plat->rx_queues_to_use; i++)
4821 stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
4823 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
4824 stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
4826 disable_irq(dev->irq);
4827 stmmac_interrupt(dev->irq, dev);
4828 enable_irq(dev->irq);
4834 * stmmac_ioctl - Entry point for the Ioctl
4835 * @dev: Device pointer.
4836 * @rq: An IOCTL specefic structure, that can contain a pointer to
4837 * a proprietary structure used to pass information to the driver.
4838 * @cmd: IOCTL command
4840 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
4842 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4844 struct stmmac_priv *priv = netdev_priv (dev);
4845 int ret = -EOPNOTSUPP;
4847 if (!netif_running(dev))
4854 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4857 ret = stmmac_hwtstamp_set(dev, rq);
4860 ret = stmmac_hwtstamp_get(dev, rq);
4869 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4872 struct stmmac_priv *priv = cb_priv;
4873 int ret = -EOPNOTSUPP;
4875 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4878 stmmac_disable_all_queues(priv);
4881 case TC_SETUP_CLSU32:
4882 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4884 case TC_SETUP_CLSFLOWER:
4885 ret = stmmac_tc_setup_cls(priv, priv, type_data);
4891 stmmac_enable_all_queues(priv);
4895 static LIST_HEAD(stmmac_block_cb_list);
4897 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
4900 struct stmmac_priv *priv = netdev_priv(ndev);
4903 case TC_SETUP_BLOCK:
4904 return flow_block_cb_setup_simple(type_data,
4905 &stmmac_block_cb_list,
4906 stmmac_setup_tc_block_cb,
4908 case TC_SETUP_QDISC_CBS:
4909 return stmmac_tc_setup_cbs(priv, priv, type_data);
4910 case TC_SETUP_QDISC_TAPRIO:
4911 return stmmac_tc_setup_taprio(priv, priv, type_data);
4912 case TC_SETUP_QDISC_ETF:
4913 return stmmac_tc_setup_etf(priv, priv, type_data);
4919 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
4920 struct net_device *sb_dev)
4922 int gso = skb_shinfo(skb)->gso_type;
4924 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4926 * There is no way to determine the number of TSO/USO
4927 * capable Queues. Let's use always the Queue 0
4928 * because if TSO/USO is supported then at least this
4929 * one will be capable.
4934 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
4937 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4939 struct stmmac_priv *priv = netdev_priv(ndev);
4942 ret = eth_mac_addr(ndev, addr);
4946 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4951 #ifdef CONFIG_DEBUG_FS
4952 static struct dentry *stmmac_fs_dir;
4954 static void sysfs_display_ring(void *head, int size, int extend_desc,
4955 struct seq_file *seq, dma_addr_t dma_phy_addr)
4958 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4959 struct dma_desc *p = (struct dma_desc *)head;
4960 dma_addr_t dma_addr;
4962 for (i = 0; i < size; i++) {
4964 dma_addr = dma_phy_addr + i * sizeof(*ep);
4965 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4967 le32_to_cpu(ep->basic.des0),
4968 le32_to_cpu(ep->basic.des1),
4969 le32_to_cpu(ep->basic.des2),
4970 le32_to_cpu(ep->basic.des3));
4973 dma_addr = dma_phy_addr + i * sizeof(*p);
4974 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4976 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4977 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4980 seq_printf(seq, "\n");
4984 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4986 struct net_device *dev = seq->private;
4987 struct stmmac_priv *priv = netdev_priv(dev);
4988 u32 rx_count = priv->plat->rx_queues_to_use;
4989 u32 tx_count = priv->plat->tx_queues_to_use;
4992 if ((dev->flags & IFF_UP) == 0)
4995 for (queue = 0; queue < rx_count; queue++) {
4996 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4998 seq_printf(seq, "RX Queue %d:\n", queue);
5000 if (priv->extend_desc) {
5001 seq_printf(seq, "Extended descriptor ring:\n");
5002 sysfs_display_ring((void *)rx_q->dma_erx,
5003 priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
5005 seq_printf(seq, "Descriptor ring:\n");
5006 sysfs_display_ring((void *)rx_q->dma_rx,
5007 priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
5011 for (queue = 0; queue < tx_count; queue++) {
5012 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5014 seq_printf(seq, "TX Queue %d:\n", queue);
5016 if (priv->extend_desc) {
5017 seq_printf(seq, "Extended descriptor ring:\n");
5018 sysfs_display_ring((void *)tx_q->dma_etx,
5019 priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
5020 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
5021 seq_printf(seq, "Descriptor ring:\n");
5022 sysfs_display_ring((void *)tx_q->dma_tx,
5023 priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
5029 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
5031 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
5033 struct net_device *dev = seq->private;
5034 struct stmmac_priv *priv = netdev_priv(dev);
5036 if (!priv->hw_cap_support) {
5037 seq_printf(seq, "DMA HW features not supported\n");
5041 seq_printf(seq, "==============================\n");
5042 seq_printf(seq, "\tDMA HW features\n");
5043 seq_printf(seq, "==============================\n");
5045 seq_printf(seq, "\t10/100 Mbps: %s\n",
5046 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
5047 seq_printf(seq, "\t1000 Mbps: %s\n",
5048 (priv->dma_cap.mbps_1000) ? "Y" : "N");
5049 seq_printf(seq, "\tHalf duplex: %s\n",
5050 (priv->dma_cap.half_duplex) ? "Y" : "N");
5051 seq_printf(seq, "\tHash Filter: %s\n",
5052 (priv->dma_cap.hash_filter) ? "Y" : "N");
5053 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
5054 (priv->dma_cap.multi_addr) ? "Y" : "N");
5055 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
5056 (priv->dma_cap.pcs) ? "Y" : "N");
5057 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
5058 (priv->dma_cap.sma_mdio) ? "Y" : "N");
5059 seq_printf(seq, "\tPMT Remote wake up: %s\n",
5060 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
5061 seq_printf(seq, "\tPMT Magic Frame: %s\n",
5062 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
5063 seq_printf(seq, "\tRMON module: %s\n",
5064 (priv->dma_cap.rmon) ? "Y" : "N");
5065 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
5066 (priv->dma_cap.time_stamp) ? "Y" : "N");
5067 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
5068 (priv->dma_cap.atime_stamp) ? "Y" : "N");
5069 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
5070 (priv->dma_cap.eee) ? "Y" : "N");
5071 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
5072 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
5073 (priv->dma_cap.tx_coe) ? "Y" : "N");
5074 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
5075 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
5076 (priv->dma_cap.rx_coe) ? "Y" : "N");
5078 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
5079 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
5080 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
5081 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
5083 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
5084 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
5085 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
5086 priv->dma_cap.number_rx_channel);
5087 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
5088 priv->dma_cap.number_tx_channel);
5089 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
5090 priv->dma_cap.number_rx_queues);
5091 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
5092 priv->dma_cap.number_tx_queues);
5093 seq_printf(seq, "\tEnhanced descriptors: %s\n",
5094 (priv->dma_cap.enh_desc) ? "Y" : "N");
5095 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
5096 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
5097 seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
5098 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
5099 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
5100 priv->dma_cap.pps_out_num);
5101 seq_printf(seq, "\tSafety Features: %s\n",
5102 priv->dma_cap.asp ? "Y" : "N");
5103 seq_printf(seq, "\tFlexible RX Parser: %s\n",
5104 priv->dma_cap.frpsel ? "Y" : "N");
5105 seq_printf(seq, "\tEnhanced Addressing: %d\n",
5106 priv->dma_cap.addr64);
5107 seq_printf(seq, "\tReceive Side Scaling: %s\n",
5108 priv->dma_cap.rssen ? "Y" : "N");
5109 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
5110 priv->dma_cap.vlhash ? "Y" : "N");
5111 seq_printf(seq, "\tSplit Header: %s\n",
5112 priv->dma_cap.sphen ? "Y" : "N");
5113 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
5114 priv->dma_cap.vlins ? "Y" : "N");
5115 seq_printf(seq, "\tDouble VLAN: %s\n",
5116 priv->dma_cap.dvlan ? "Y" : "N");
5117 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
5118 priv->dma_cap.l3l4fnum);
5119 seq_printf(seq, "\tARP Offloading: %s\n",
5120 priv->dma_cap.arpoffsel ? "Y" : "N");
5121 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
5122 priv->dma_cap.estsel ? "Y" : "N");
5123 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
5124 priv->dma_cap.fpesel ? "Y" : "N");
5125 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
5126 priv->dma_cap.tbssel ? "Y" : "N");
5129 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
5131 /* Use network device events to rename debugfs file entries.
5133 static int stmmac_device_event(struct notifier_block *unused,
5134 unsigned long event, void *ptr)
5136 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5137 struct stmmac_priv *priv = netdev_priv(dev);
5139 if (dev->netdev_ops != &stmmac_netdev_ops)
5143 case NETDEV_CHANGENAME:
5144 if (priv->dbgfs_dir)
5145 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
5155 static struct notifier_block stmmac_notifier = {
5156 .notifier_call = stmmac_device_event,
5159 static void stmmac_init_fs(struct net_device *dev)
5161 struct stmmac_priv *priv = netdev_priv(dev);
5165 /* Create per netdev entries */
5166 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
5168 /* Entry to report DMA RX/TX rings */
5169 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
5170 &stmmac_rings_status_fops);
5172 /* Entry to report the DMA HW features */
5173 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
5174 &stmmac_dma_cap_fops);
5179 static void stmmac_exit_fs(struct net_device *dev)
5181 struct stmmac_priv *priv = netdev_priv(dev);
5183 debugfs_remove_recursive(priv->dbgfs_dir);
5185 #endif /* CONFIG_DEBUG_FS */
5187 static u32 stmmac_vid_crc32_le(__le16 vid_le)
5189 unsigned char *data = (unsigned char *)&vid_le;
5190 unsigned char data_byte = 0;
5195 bits = get_bitmask_order(VLAN_VID_MASK);
5196 for (i = 0; i < bits; i++) {
5198 data_byte = data[i / 8];
5200 temp = ((crc & 1) ^ data_byte) & 1;
5211 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
5218 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
5219 __le16 vid_le = cpu_to_le16(vid);
5220 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
5225 if (!priv->dma_cap.vlhash) {
5226 if (count > 2) /* VID = 0 always passes filter */
5229 pmatch = cpu_to_le16(vid);
5233 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
5236 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
5238 struct stmmac_priv *priv = netdev_priv(ndev);
5239 bool is_double = false;
5242 ret = pm_runtime_get_sync(priv->device);
5244 pm_runtime_put_noidle(priv->device);
5248 if (be16_to_cpu(proto) == ETH_P_8021AD)
5251 set_bit(vid, priv->active_vlans);
5252 ret = stmmac_vlan_update(priv, is_double);
5254 clear_bit(vid, priv->active_vlans);
5258 if (priv->hw->num_vlan) {
5259 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
5267 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
5269 struct stmmac_priv *priv = netdev_priv(ndev);
5270 bool is_double = false;
5273 if (be16_to_cpu(proto) == ETH_P_8021AD)
5276 clear_bit(vid, priv->active_vlans);
5278 if (priv->hw->num_vlan) {
5279 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
5281 goto del_vlan_error;
5284 ret = stmmac_vlan_update(priv, is_double);
5287 pm_runtime_put(priv->device);
5292 static const struct net_device_ops stmmac_netdev_ops = {
5293 .ndo_open = stmmac_open,
5294 .ndo_start_xmit = stmmac_xmit,
5295 .ndo_stop = stmmac_release,
5296 .ndo_change_mtu = stmmac_change_mtu,
5297 .ndo_fix_features = stmmac_fix_features,
5298 .ndo_set_features = stmmac_set_features,
5299 .ndo_set_rx_mode = stmmac_set_rx_mode,
5300 .ndo_tx_timeout = stmmac_tx_timeout,
5301 .ndo_do_ioctl = stmmac_ioctl,
5302 .ndo_setup_tc = stmmac_setup_tc,
5303 .ndo_select_queue = stmmac_select_queue,
5304 #ifdef CONFIG_NET_POLL_CONTROLLER
5305 .ndo_poll_controller = stmmac_poll_controller,
5307 .ndo_set_mac_address = stmmac_set_mac_address,
5308 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
5309 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
5312 static void stmmac_reset_subtask(struct stmmac_priv *priv)
5314 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
5316 if (test_bit(STMMAC_DOWN, &priv->state))
5319 netdev_err(priv->dev, "Reset adapter.\n");
5322 netif_trans_update(priv->dev);
5323 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
5324 usleep_range(1000, 2000);
5326 set_bit(STMMAC_DOWN, &priv->state);
5327 dev_close(priv->dev);
5328 dev_open(priv->dev, NULL);
5329 clear_bit(STMMAC_DOWN, &priv->state);
5330 clear_bit(STMMAC_RESETING, &priv->state);
5334 static void stmmac_service_task(struct work_struct *work)
5336 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
5339 stmmac_reset_subtask(priv);
5340 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
5344 * stmmac_hw_init - Init the MAC device
5345 * @priv: driver private structure
5346 * Description: this function is to configure the MAC device according to
5347 * some platform parameters or the HW capability register. It prepares the
5348 * driver to use either ring or chain modes and to setup either enhanced or
5349 * normal descriptors.
5351 static int stmmac_hw_init(struct stmmac_priv *priv)
5355 /* dwmac-sun8i only work in chain mode */
5356 if (priv->plat->has_sun8i)
5358 priv->chain_mode = chain_mode;
5360 /* Initialize HW Interface */
5361 ret = stmmac_hwif_init(priv);
5365 /* Get the HW capability (new GMAC newer than 3.50a) */
5366 priv->hw_cap_support = stmmac_get_hw_features(priv);
5367 if (priv->hw_cap_support) {
5368 dev_info(priv->device, "DMA HW capability register supported\n");
5370 /* We can override some gmac/dma configuration fields: e.g.
5371 * enh_desc, tx_coe (e.g. that are passed through the
5372 * platform) with the values from the HW capability
5373 * register (if supported).
5375 priv->plat->enh_desc = priv->dma_cap.enh_desc;
5376 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
5377 priv->hw->pmt = priv->plat->pmt;
5378 if (priv->dma_cap.hash_tb_sz) {
5379 priv->hw->multicast_filter_bins =
5380 (BIT(priv->dma_cap.hash_tb_sz) << 5);
5381 priv->hw->mcast_bits_log2 =
5382 ilog2(priv->hw->multicast_filter_bins);
5385 /* TXCOE doesn't work in thresh DMA mode */
5386 if (priv->plat->force_thresh_dma_mode)
5387 priv->plat->tx_coe = 0;
5389 priv->plat->tx_coe = priv->dma_cap.tx_coe;
5391 /* In case of GMAC4 rx_coe is from HW cap register. */
5392 priv->plat->rx_coe = priv->dma_cap.rx_coe;
5394 if (priv->dma_cap.rx_coe_type2)
5395 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
5396 else if (priv->dma_cap.rx_coe_type1)
5397 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
5400 dev_info(priv->device, "No HW DMA feature register supported\n");
5403 if (priv->plat->rx_coe) {
5404 priv->hw->rx_csum = priv->plat->rx_coe;
5405 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
5406 if (priv->synopsys_id < DWMAC_CORE_4_00)
5407 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
5409 if (priv->plat->tx_coe)
5410 dev_info(priv->device, "TX Checksum insertion supported\n");
5412 if (priv->plat->pmt) {
5413 dev_info(priv->device, "Wake-Up On Lan supported\n");
5414 device_set_wakeup_capable(priv->device, 1);
5417 if (priv->dma_cap.tsoen)
5418 dev_info(priv->device, "TSO supported\n");
5420 priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
5421 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
5423 /* Run HW quirks, if any */
5424 if (priv->hwif_quirks) {
5425 ret = priv->hwif_quirks(priv);
5430 /* Rx Watchdog is available in the COREs newer than the 3.40.
5431 * In some case, for example on bugged HW this feature
5432 * has to be disable and this can be done by passing the
5433 * riwt_off field from the platform.
5435 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
5436 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
5438 dev_info(priv->device,
5439 "Enable RX Mitigation via HW Watchdog Timer\n");
5445 static void stmmac_napi_add(struct net_device *dev)
5447 struct stmmac_priv *priv = netdev_priv(dev);
5450 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
5452 for (queue = 0; queue < maxq; queue++) {
5453 struct stmmac_channel *ch = &priv->channel[queue];
5455 ch->priv_data = priv;
5457 spin_lock_init(&ch->lock);
5459 if (queue < priv->plat->rx_queues_to_use) {
5460 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
5463 if (queue < priv->plat->tx_queues_to_use) {
5464 netif_tx_napi_add(dev, &ch->tx_napi,
5465 stmmac_napi_poll_tx,
5471 static void stmmac_napi_del(struct net_device *dev)
5473 struct stmmac_priv *priv = netdev_priv(dev);
5476 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
5478 for (queue = 0; queue < maxq; queue++) {
5479 struct stmmac_channel *ch = &priv->channel[queue];
5481 if (queue < priv->plat->rx_queues_to_use)
5482 netif_napi_del(&ch->rx_napi);
5483 if (queue < priv->plat->tx_queues_to_use)
5484 netif_napi_del(&ch->tx_napi);
5488 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
5490 struct stmmac_priv *priv = netdev_priv(dev);
5493 if (netif_running(dev))
5494 stmmac_release(dev);
5496 stmmac_napi_del(dev);
5498 priv->plat->rx_queues_to_use = rx_cnt;
5499 priv->plat->tx_queues_to_use = tx_cnt;
5501 stmmac_napi_add(dev);
5503 if (netif_running(dev))
5504 ret = stmmac_open(dev);
5509 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
5511 struct stmmac_priv *priv = netdev_priv(dev);
5514 if (netif_running(dev))
5515 stmmac_release(dev);
5517 priv->dma_rx_size = rx_size;
5518 priv->dma_tx_size = tx_size;
5520 if (netif_running(dev))
5521 ret = stmmac_open(dev);
5526 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
5527 static void stmmac_fpe_lp_task(struct work_struct *work)
5529 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
5531 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5532 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5533 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5534 bool *hs_enable = &fpe_cfg->hs_enable;
5535 bool *enable = &fpe_cfg->enable;
5538 while (retries-- > 0) {
5539 /* Bail out immediately if FPE handshake is OFF */
5540 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
5543 if (*lo_state == FPE_STATE_ENTERING_ON &&
5544 *lp_state == FPE_STATE_ENTERING_ON) {
5545 stmmac_fpe_configure(priv, priv->ioaddr,
5546 priv->plat->tx_queues_to_use,
5547 priv->plat->rx_queues_to_use,
5550 netdev_info(priv->dev, "configured FPE\n");
5552 *lo_state = FPE_STATE_ON;
5553 *lp_state = FPE_STATE_ON;
5554 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
5558 if ((*lo_state == FPE_STATE_CAPABLE ||
5559 *lo_state == FPE_STATE_ENTERING_ON) &&
5560 *lp_state != FPE_STATE_ON) {
5561 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
5562 *lo_state, *lp_state);
5563 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5566 /* Sleep then retry */
5570 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
5573 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
5575 if (priv->plat->fpe_cfg->hs_enable != enable) {
5577 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5580 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
5581 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
5584 priv->plat->fpe_cfg->hs_enable = enable;
5590 * @device: device pointer
5591 * @plat_dat: platform data pointer
5592 * @res: stmmac resource pointer
5593 * Description: this is the main probe function used to
5594 * call the alloc_etherdev, allocate the priv structure.
5596 * returns 0 on success, otherwise errno.
5598 int stmmac_dvr_probe(struct device *device,
5599 struct plat_stmmacenet_data *plat_dat,
5600 struct stmmac_resources *res)
5602 struct net_device *ndev = NULL;
5603 struct stmmac_priv *priv;
5607 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
5608 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
5612 SET_NETDEV_DEV(ndev, device);
5614 priv = netdev_priv(ndev);
5615 priv->device = device;
5618 stmmac_set_ethtool_ops(ndev);
5619 priv->pause = pause;
5620 priv->plat = plat_dat;
5621 priv->ioaddr = res->addr;
5622 priv->dev->base_addr = (unsigned long)res->addr;
5623 priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
5625 priv->dev->irq = res->irq;
5626 priv->wol_irq = res->wol_irq;
5627 priv->lpi_irq = res->lpi_irq;
5628 priv->sfty_ce_irq = res->sfty_ce_irq;
5629 priv->sfty_ue_irq = res->sfty_ue_irq;
5630 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
5631 priv->rx_irq[i] = res->rx_irq[i];
5632 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
5633 priv->tx_irq[i] = res->tx_irq[i];
5635 if (!IS_ERR_OR_NULL(res->mac))
5636 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
5638 dev_set_drvdata(device, priv->dev);
5640 /* Verify driver arguments */
5641 stmmac_verify_args();
5643 /* Allocate workqueue */
5644 priv->wq = create_singlethread_workqueue("stmmac_wq");
5646 dev_err(priv->device, "failed to create workqueue\n");
5650 INIT_WORK(&priv->service_task, stmmac_service_task);
5652 /* Initialize Link Partner FPE workqueue */
5653 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
5655 /* Override with kernel parameters if supplied XXX CRS XXX
5656 * this needs to have multiple instances
5658 if ((phyaddr >= 0) && (phyaddr <= 31))
5659 priv->plat->phy_addr = phyaddr;
5661 if (priv->plat->stmmac_rst) {
5662 ret = reset_control_assert(priv->plat->stmmac_rst);
5663 reset_control_deassert(priv->plat->stmmac_rst);
5664 /* Some reset controllers have only reset callback instead of
5665 * assert + deassert callbacks pair.
5667 if (ret == -ENOTSUPP)
5668 reset_control_reset(priv->plat->stmmac_rst);
5671 /* Init MAC and get the capabilities */
5672 ret = stmmac_hw_init(priv);
5676 stmmac_check_ether_addr(priv);
5678 ndev->netdev_ops = &stmmac_netdev_ops;
5680 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5683 ret = stmmac_tc_init(priv, priv);
5685 ndev->hw_features |= NETIF_F_HW_TC;
5688 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5689 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
5690 if (priv->plat->has_gmac4)
5691 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
5693 dev_info(priv->device, "TSO feature enabled\n");
5696 if (priv->dma_cap.sphen) {
5697 ndev->hw_features |= NETIF_F_GRO;
5699 dev_info(priv->device, "SPH feature enabled\n");
5702 /* The current IP register MAC_HW_Feature1[ADDR64] only define
5703 * 32/40/64 bit width, but some SOC support others like i.MX8MP
5704 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
5705 * So overwrite dma_cap.addr64 according to HW real design.
5707 if (priv->plat->addr64)
5708 priv->dma_cap.addr64 = priv->plat->addr64;
5710 if (priv->dma_cap.addr64) {
5711 ret = dma_set_mask_and_coherent(device,
5712 DMA_BIT_MASK(priv->dma_cap.addr64));
5714 dev_info(priv->device, "Using %d bits DMA width\n",
5715 priv->dma_cap.addr64);
5718 * If more than 32 bits can be addressed, make sure to
5719 * enable enhanced addressing mode.
5721 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
5722 priv->plat->dma_cfg->eame = true;
5724 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
5726 dev_err(priv->device, "Failed to set DMA Mask\n");
5730 priv->dma_cap.addr64 = 32;
5734 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
5735 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
5736 #ifdef STMMAC_VLAN_TAG_USED
5737 /* Both mac100 and gmac support receive VLAN tag detection */
5738 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
5739 if (priv->dma_cap.vlhash) {
5740 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5741 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
5743 if (priv->dma_cap.vlins) {
5744 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
5745 if (priv->dma_cap.dvlan)
5746 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
5749 priv->msg_enable = netif_msg_init(debug, default_msg_level);
5751 /* Initialize RSS */
5752 rxq = priv->plat->rx_queues_to_use;
5753 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
5754 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
5755 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
5757 if (priv->dma_cap.rssen && priv->plat->rss_en)
5758 ndev->features |= NETIF_F_RXHASH;
5760 /* MTU range: 46 - hw-specific max */
5761 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
5762 if (priv->plat->has_xgmac)
5763 ndev->max_mtu = XGMAC_JUMBO_LEN;
5764 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
5765 ndev->max_mtu = JUMBO_LEN;
5767 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
5768 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
5769 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
5771 if ((priv->plat->maxmtu < ndev->max_mtu) &&
5772 (priv->plat->maxmtu >= ndev->min_mtu))
5773 ndev->max_mtu = priv->plat->maxmtu;
5774 else if (priv->plat->maxmtu < ndev->min_mtu)
5775 dev_warn(priv->device,
5776 "%s: warning: maxmtu having invalid value (%d)\n",
5777 __func__, priv->plat->maxmtu);
5780 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
5782 /* Setup channels NAPI */
5783 stmmac_napi_add(ndev);
5785 mutex_init(&priv->lock);
5787 /* If a specific clk_csr value is passed from the platform
5788 * this means that the CSR Clock Range selection cannot be
5789 * changed at run-time and it is fixed. Viceversa the driver'll try to
5790 * set the MDC clock dynamically according to the csr actual
5793 if (priv->plat->clk_csr >= 0)
5794 priv->clk_csr = priv->plat->clk_csr;
5796 stmmac_clk_csr_set(priv);
5798 stmmac_check_pcs_mode(priv);
5800 pm_runtime_get_noresume(device);
5801 pm_runtime_set_active(device);
5802 pm_runtime_enable(device);
5804 if (priv->hw->pcs != STMMAC_PCS_TBI &&
5805 priv->hw->pcs != STMMAC_PCS_RTBI) {
5806 /* MDIO bus Registration */
5807 ret = stmmac_mdio_register(ndev);
5809 dev_err(priv->device,
5810 "%s: MDIO bus (id: %d) registration failed",
5811 __func__, priv->plat->bus_id);
5812 goto error_mdio_register;
5816 ret = stmmac_phy_setup(priv);
5818 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
5819 goto error_phy_setup;
5822 ret = register_netdev(ndev);
5824 dev_err(priv->device, "%s: ERROR %i registering the device\n",
5826 goto error_netdev_register;
5829 if (priv->plat->serdes_powerup) {
5830 ret = priv->plat->serdes_powerup(ndev,
5831 priv->plat->bsp_priv);
5834 goto error_serdes_powerup;
5837 #ifdef CONFIG_DEBUG_FS
5838 stmmac_init_fs(ndev);
5841 /* Let pm_runtime_put() disable the clocks.
5842 * If CONFIG_PM is not enabled, the clocks will stay powered.
5844 pm_runtime_put(device);
5848 error_serdes_powerup:
5849 unregister_netdev(ndev);
5850 error_netdev_register:
5851 phylink_destroy(priv->phylink);
5853 if (priv->hw->pcs != STMMAC_PCS_TBI &&
5854 priv->hw->pcs != STMMAC_PCS_RTBI)
5855 stmmac_mdio_unregister(ndev);
5856 error_mdio_register:
5857 stmmac_napi_del(ndev);
5859 destroy_workqueue(priv->wq);
5860 stmmac_bus_clks_config(priv, false);
5864 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
5868 * @dev: device pointer
5869 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5870 * changes the link status, releases the DMA descriptor rings.
5872 int stmmac_dvr_remove(struct device *dev)
5874 struct net_device *ndev = dev_get_drvdata(dev);
5875 struct stmmac_priv *priv = netdev_priv(ndev);
5877 netdev_info(priv->dev, "%s: removing driver", __func__);
5879 stmmac_stop_all_dma(priv);
5880 stmmac_mac_set(priv, priv->ioaddr, false);
5881 netif_carrier_off(ndev);
5882 unregister_netdev(ndev);
5884 /* Serdes power down needs to happen after VLAN filter
5885 * is deleted that is triggered by unregister_netdev().
5887 if (priv->plat->serdes_powerdown)
5888 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5890 #ifdef CONFIG_DEBUG_FS
5891 stmmac_exit_fs(ndev);
5893 phylink_destroy(priv->phylink);
5894 if (priv->plat->stmmac_rst)
5895 reset_control_assert(priv->plat->stmmac_rst);
5896 pm_runtime_put(dev);
5897 pm_runtime_disable(dev);
5898 if (priv->hw->pcs != STMMAC_PCS_TBI &&
5899 priv->hw->pcs != STMMAC_PCS_RTBI)
5900 stmmac_mdio_unregister(ndev);
5901 destroy_workqueue(priv->wq);
5902 mutex_destroy(&priv->lock);
5906 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
5909 * stmmac_suspend - suspend callback
5910 * @dev: device pointer
5911 * Description: this is the function to suspend the device and it is called
5912 * by the platform driver to stop the network queue, release the resources,
5913 * program the PMT register (for WoL), clean and release driver resources.
5915 int stmmac_suspend(struct device *dev)
5917 struct net_device *ndev = dev_get_drvdata(dev);
5918 struct stmmac_priv *priv = netdev_priv(ndev);
5922 if (!ndev || !netif_running(ndev))
5925 phylink_mac_change(priv->phylink, false);
5927 mutex_lock(&priv->lock);
5929 netif_device_detach(ndev);
5931 stmmac_disable_all_queues(priv);
5933 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
5934 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
5936 if (priv->eee_enabled) {
5937 priv->tx_path_in_lpi_mode = false;
5938 del_timer_sync(&priv->eee_ctrl_timer);
5941 /* Stop TX/RX DMA */
5942 stmmac_stop_all_dma(priv);
5944 if (priv->plat->serdes_powerdown)
5945 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5947 /* Enable Power down mode by programming the PMT regs */
5948 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5949 stmmac_pmt(priv, priv->hw, priv->wolopts);
5952 mutex_unlock(&priv->lock);
5954 if (device_may_wakeup(priv->device))
5955 phylink_speed_down(priv->phylink, false);
5956 phylink_stop(priv->phylink);
5958 mutex_lock(&priv->lock);
5960 stmmac_mac_set(priv, priv->ioaddr, false);
5961 pinctrl_pm_select_sleep_state(priv->device);
5962 /* Disable clock in case of PWM is off */
5963 clk_disable_unprepare(priv->plat->clk_ptp_ref);
5964 ret = pm_runtime_force_suspend(dev);
5969 mutex_unlock(&priv->lock);
5971 if (priv->dma_cap.fpesel) {
5973 stmmac_fpe_configure(priv, priv->ioaddr,
5974 priv->plat->tx_queues_to_use,
5975 priv->plat->rx_queues_to_use, false);
5977 stmmac_fpe_handshake(priv, false);
5980 priv->speed = SPEED_UNKNOWN;
5983 EXPORT_SYMBOL_GPL(stmmac_suspend);
5986 * stmmac_reset_queues_param - reset queue parameters
5987 * @priv: device pointer
5989 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
5991 u32 rx_cnt = priv->plat->rx_queues_to_use;
5992 u32 tx_cnt = priv->plat->tx_queues_to_use;
5995 for (queue = 0; queue < rx_cnt; queue++) {
5996 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6002 for (queue = 0; queue < tx_cnt; queue++) {
6003 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6009 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
6014 * stmmac_resume - resume callback
6015 * @dev: device pointer
6016 * Description: when resume this function is invoked to setup the DMA and CORE
6017 * in a usable state.
6019 int stmmac_resume(struct device *dev)
6021 struct net_device *ndev = dev_get_drvdata(dev);
6022 struct stmmac_priv *priv = netdev_priv(ndev);
6025 if (!netif_running(ndev))
6028 /* Power Down bit, into the PM register, is cleared
6029 * automatically as soon as a magic packet or a Wake-up frame
6030 * is received. Anyway, it's better to manually clear
6031 * this bit because it can generate problems while resuming
6032 * from another devices (e.g. serial console).
6034 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
6035 mutex_lock(&priv->lock);
6036 stmmac_pmt(priv, priv->hw, 0);
6037 mutex_unlock(&priv->lock);
6040 pinctrl_pm_select_default_state(priv->device);
6041 /* enable the clk previously disabled */
6042 ret = pm_runtime_force_resume(dev);
6045 if (priv->plat->clk_ptp_ref)
6046 clk_prepare_enable(priv->plat->clk_ptp_ref);
6047 /* reset the phy so that it's ready */
6049 stmmac_mdio_reset(priv->mii);
6052 if (priv->plat->serdes_powerup) {
6053 ret = priv->plat->serdes_powerup(ndev,
6054 priv->plat->bsp_priv);
6060 if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
6062 phylink_start(priv->phylink);
6063 /* We may have called phylink_speed_down before */
6064 phylink_speed_up(priv->phylink);
6069 mutex_lock(&priv->lock);
6071 stmmac_reset_queues_param(priv);
6072 stmmac_reinit_rx_buffers(priv);
6073 stmmac_free_tx_skbufs(priv);
6074 stmmac_clear_descriptors(priv);
6076 stmmac_hw_setup(ndev, false);
6077 stmmac_init_coalesce(priv);
6078 stmmac_set_rx_mode(ndev);
6080 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
6082 stmmac_enable_all_queues(priv);
6084 mutex_unlock(&priv->lock);
6087 phylink_mac_change(priv->phylink, true);
6089 netif_device_attach(ndev);
6093 EXPORT_SYMBOL_GPL(stmmac_resume);
6096 static int __init stmmac_cmdline_opt(char *str)
6102 while ((opt = strsep(&str, ",")) != NULL) {
6103 if (!strncmp(opt, "debug:", 6)) {
6104 if (kstrtoint(opt + 6, 0, &debug))
6106 } else if (!strncmp(opt, "phyaddr:", 8)) {
6107 if (kstrtoint(opt + 8, 0, &phyaddr))
6109 } else if (!strncmp(opt, "buf_sz:", 7)) {
6110 if (kstrtoint(opt + 7, 0, &buf_sz))
6112 } else if (!strncmp(opt, "tc:", 3)) {
6113 if (kstrtoint(opt + 3, 0, &tc))
6115 } else if (!strncmp(opt, "watchdog:", 9)) {
6116 if (kstrtoint(opt + 9, 0, &watchdog))
6118 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
6119 if (kstrtoint(opt + 10, 0, &flow_ctrl))
6121 } else if (!strncmp(opt, "pause:", 6)) {
6122 if (kstrtoint(opt + 6, 0, &pause))
6124 } else if (!strncmp(opt, "eee_timer:", 10)) {
6125 if (kstrtoint(opt + 10, 0, &eee_timer))
6127 } else if (!strncmp(opt, "chain_mode:", 11)) {
6128 if (kstrtoint(opt + 11, 0, &chain_mode))
6135 pr_err("%s: ERROR broken module parameter conversion", __func__);
6139 __setup("stmmaceth=", stmmac_cmdline_opt);
6142 static int __init stmmac_init(void)
6144 #ifdef CONFIG_DEBUG_FS
6145 /* Create debugfs main directory if it doesn't exist yet */
6147 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
6148 register_netdevice_notifier(&stmmac_notifier);
6154 static void __exit stmmac_exit(void)
6156 #ifdef CONFIG_DEBUG_FS
6157 unregister_netdevice_notifier(&stmmac_notifier);
6158 debugfs_remove_recursive(stmmac_fs_dir);
6162 module_init(stmmac_init)
6163 module_exit(stmmac_exit)
6165 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
6166 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
6167 MODULE_LICENSE("GPL");