Merge 5.16-rc3 into char-misc-next
[linux-2.6-microblaze.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5
6         Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11   Documentation available at:
12         http://www.stlinux.com
13   Support available at:
14         https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
45 #include "stmmac.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
50 #include "dwxgmac2.h"
51 #include "hwif.h"
52
53 /* As long as the interface is active, we keep the timestamping counter enabled
54  * with fine resolution and binary rollover. This avoid non-monotonic behavior
55  * (clock jumps) when changing timestamping settings at runtime.
56  */
57 #define STMMAC_HWTS_ACTIVE      (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58                                  PTP_TCR_TSCTRLSSR)
59
60 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
62
63 /* Module parameters */
64 #define TX_TIMEO        5000
65 static int watchdog = TX_TIMEO;
66 module_param(watchdog, int, 0644);
67 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
68
69 static int debug = -1;
70 module_param(debug, int, 0644);
71 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
72
73 static int phyaddr = -1;
74 module_param(phyaddr, int, 0444);
75 MODULE_PARM_DESC(phyaddr, "Physical device address");
76
77 #define STMMAC_TX_THRESH(x)     ((x)->dma_tx_size / 4)
78 #define STMMAC_RX_THRESH(x)     ((x)->dma_rx_size / 4)
79
80 /* Limit to make sure XDP TX and slow path can coexist */
81 #define STMMAC_XSK_TX_BUDGET_MAX        256
82 #define STMMAC_TX_XSK_AVAIL             16
83 #define STMMAC_RX_FILL_BATCH            16
84
85 #define STMMAC_XDP_PASS         0
86 #define STMMAC_XDP_CONSUMED     BIT(0)
87 #define STMMAC_XDP_TX           BIT(1)
88 #define STMMAC_XDP_REDIRECT     BIT(2)
89
90 static int flow_ctrl = FLOW_AUTO;
91 module_param(flow_ctrl, int, 0644);
92 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
93
94 static int pause = PAUSE_TIME;
95 module_param(pause, int, 0644);
96 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
97
98 #define TC_DEFAULT 64
99 static int tc = TC_DEFAULT;
100 module_param(tc, int, 0644);
101 MODULE_PARM_DESC(tc, "DMA threshold control value");
102
103 #define DEFAULT_BUFSIZE 1536
104 static int buf_sz = DEFAULT_BUFSIZE;
105 module_param(buf_sz, int, 0644);
106 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
107
108 #define STMMAC_RX_COPYBREAK     256
109
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
112                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
114 #define STMMAC_DEFAULT_LPI_TIMER        1000
115 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, int, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121  * but allow user to force to use the chain instead of the ring
122  */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
135
136 #ifdef CONFIG_DEBUG_FS
137 static const struct net_device_ops stmmac_netdev_ops;
138 static void stmmac_init_fs(struct net_device *dev);
139 static void stmmac_exit_fs(struct net_device *dev);
140 #endif
141
142 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
143
144 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
145 {
146         int ret = 0;
147
148         if (enabled) {
149                 ret = clk_prepare_enable(priv->plat->stmmac_clk);
150                 if (ret)
151                         return ret;
152                 ret = clk_prepare_enable(priv->plat->pclk);
153                 if (ret) {
154                         clk_disable_unprepare(priv->plat->stmmac_clk);
155                         return ret;
156                 }
157                 if (priv->plat->clks_config) {
158                         ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
159                         if (ret) {
160                                 clk_disable_unprepare(priv->plat->stmmac_clk);
161                                 clk_disable_unprepare(priv->plat->pclk);
162                                 return ret;
163                         }
164                 }
165         } else {
166                 clk_disable_unprepare(priv->plat->stmmac_clk);
167                 clk_disable_unprepare(priv->plat->pclk);
168                 if (priv->plat->clks_config)
169                         priv->plat->clks_config(priv->plat->bsp_priv, enabled);
170         }
171
172         return ret;
173 }
174 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
175
176 /**
177  * stmmac_verify_args - verify the driver parameters.
178  * Description: it checks the driver parameters and set a default in case of
179  * errors.
180  */
181 static void stmmac_verify_args(void)
182 {
183         if (unlikely(watchdog < 0))
184                 watchdog = TX_TIMEO;
185         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
186                 buf_sz = DEFAULT_BUFSIZE;
187         if (unlikely(flow_ctrl > 1))
188                 flow_ctrl = FLOW_AUTO;
189         else if (likely(flow_ctrl < 0))
190                 flow_ctrl = FLOW_OFF;
191         if (unlikely((pause < 0) || (pause > 0xffff)))
192                 pause = PAUSE_TIME;
193         if (eee_timer < 0)
194                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
195 }
196
197 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
198 {
199         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
200         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
201         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
202         u32 queue;
203
204         for (queue = 0; queue < maxq; queue++) {
205                 struct stmmac_channel *ch = &priv->channel[queue];
206
207                 if (stmmac_xdp_is_enabled(priv) &&
208                     test_bit(queue, priv->af_xdp_zc_qps)) {
209                         napi_disable(&ch->rxtx_napi);
210                         continue;
211                 }
212
213                 if (queue < rx_queues_cnt)
214                         napi_disable(&ch->rx_napi);
215                 if (queue < tx_queues_cnt)
216                         napi_disable(&ch->tx_napi);
217         }
218 }
219
220 /**
221  * stmmac_disable_all_queues - Disable all queues
222  * @priv: driver private structure
223  */
224 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
225 {
226         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
227         struct stmmac_rx_queue *rx_q;
228         u32 queue;
229
230         /* synchronize_rcu() needed for pending XDP buffers to drain */
231         for (queue = 0; queue < rx_queues_cnt; queue++) {
232                 rx_q = &priv->rx_queue[queue];
233                 if (rx_q->xsk_pool) {
234                         synchronize_rcu();
235                         break;
236                 }
237         }
238
239         __stmmac_disable_all_queues(priv);
240 }
241
242 /**
243  * stmmac_enable_all_queues - Enable all queues
244  * @priv: driver private structure
245  */
246 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
247 {
248         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
249         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
250         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
251         u32 queue;
252
253         for (queue = 0; queue < maxq; queue++) {
254                 struct stmmac_channel *ch = &priv->channel[queue];
255
256                 if (stmmac_xdp_is_enabled(priv) &&
257                     test_bit(queue, priv->af_xdp_zc_qps)) {
258                         napi_enable(&ch->rxtx_napi);
259                         continue;
260                 }
261
262                 if (queue < rx_queues_cnt)
263                         napi_enable(&ch->rx_napi);
264                 if (queue < tx_queues_cnt)
265                         napi_enable(&ch->tx_napi);
266         }
267 }
268
269 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
270 {
271         if (!test_bit(STMMAC_DOWN, &priv->state) &&
272             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
273                 queue_work(priv->wq, &priv->service_task);
274 }
275
276 static void stmmac_global_err(struct stmmac_priv *priv)
277 {
278         netif_carrier_off(priv->dev);
279         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
280         stmmac_service_event_schedule(priv);
281 }
282
283 /**
284  * stmmac_clk_csr_set - dynamically set the MDC clock
285  * @priv: driver private structure
286  * Description: this is to dynamically set the MDC clock according to the csr
287  * clock input.
288  * Note:
289  *      If a specific clk_csr value is passed from the platform
290  *      this means that the CSR Clock Range selection cannot be
291  *      changed at run-time and it is fixed (as reported in the driver
292  *      documentation). Viceversa the driver will try to set the MDC
293  *      clock dynamically according to the actual clock input.
294  */
295 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
296 {
297         u32 clk_rate;
298
299         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
300
301         /* Platform provided default clk_csr would be assumed valid
302          * for all other cases except for the below mentioned ones.
303          * For values higher than the IEEE 802.3 specified frequency
304          * we can not estimate the proper divider as it is not known
305          * the frequency of clk_csr_i. So we do not change the default
306          * divider.
307          */
308         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
309                 if (clk_rate < CSR_F_35M)
310                         priv->clk_csr = STMMAC_CSR_20_35M;
311                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
312                         priv->clk_csr = STMMAC_CSR_35_60M;
313                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
314                         priv->clk_csr = STMMAC_CSR_60_100M;
315                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
316                         priv->clk_csr = STMMAC_CSR_100_150M;
317                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
318                         priv->clk_csr = STMMAC_CSR_150_250M;
319                 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
320                         priv->clk_csr = STMMAC_CSR_250_300M;
321         }
322
323         if (priv->plat->has_sun8i) {
324                 if (clk_rate > 160000000)
325                         priv->clk_csr = 0x03;
326                 else if (clk_rate > 80000000)
327                         priv->clk_csr = 0x02;
328                 else if (clk_rate > 40000000)
329                         priv->clk_csr = 0x01;
330                 else
331                         priv->clk_csr = 0;
332         }
333
334         if (priv->plat->has_xgmac) {
335                 if (clk_rate > 400000000)
336                         priv->clk_csr = 0x5;
337                 else if (clk_rate > 350000000)
338                         priv->clk_csr = 0x4;
339                 else if (clk_rate > 300000000)
340                         priv->clk_csr = 0x3;
341                 else if (clk_rate > 250000000)
342                         priv->clk_csr = 0x2;
343                 else if (clk_rate > 150000000)
344                         priv->clk_csr = 0x1;
345                 else
346                         priv->clk_csr = 0x0;
347         }
348 }
349
350 static void print_pkt(unsigned char *buf, int len)
351 {
352         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
353         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
354 }
355
356 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
357 {
358         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
359         u32 avail;
360
361         if (tx_q->dirty_tx > tx_q->cur_tx)
362                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
363         else
364                 avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
365
366         return avail;
367 }
368
369 /**
370  * stmmac_rx_dirty - Get RX queue dirty
371  * @priv: driver private structure
372  * @queue: RX queue index
373  */
374 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
375 {
376         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
377         u32 dirty;
378
379         if (rx_q->dirty_rx <= rx_q->cur_rx)
380                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
381         else
382                 dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
383
384         return dirty;
385 }
386
387 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
388 {
389         int tx_lpi_timer;
390
391         /* Clear/set the SW EEE timer flag based on LPI ET enablement */
392         priv->eee_sw_timer_en = en ? 0 : 1;
393         tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
394         stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
395 }
396
397 /**
398  * stmmac_enable_eee_mode - check and enter in LPI mode
399  * @priv: driver private structure
400  * Description: this function is to verify and enter in LPI mode in case of
401  * EEE.
402  */
403 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
404 {
405         u32 tx_cnt = priv->plat->tx_queues_to_use;
406         u32 queue;
407
408         /* check if all TX queues have the work finished */
409         for (queue = 0; queue < tx_cnt; queue++) {
410                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
411
412                 if (tx_q->dirty_tx != tx_q->cur_tx)
413                         return; /* still unfinished work */
414         }
415
416         /* Check and enter in LPI mode */
417         if (!priv->tx_path_in_lpi_mode)
418                 stmmac_set_eee_mode(priv, priv->hw,
419                                 priv->plat->en_tx_lpi_clockgating);
420 }
421
422 /**
423  * stmmac_disable_eee_mode - disable and exit from LPI mode
424  * @priv: driver private structure
425  * Description: this function is to exit and disable EEE in case of
426  * LPI state is true. This is called by the xmit.
427  */
428 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
429 {
430         if (!priv->eee_sw_timer_en) {
431                 stmmac_lpi_entry_timer_config(priv, 0);
432                 return;
433         }
434
435         stmmac_reset_eee_mode(priv, priv->hw);
436         del_timer_sync(&priv->eee_ctrl_timer);
437         priv->tx_path_in_lpi_mode = false;
438 }
439
440 /**
441  * stmmac_eee_ctrl_timer - EEE TX SW timer.
442  * @t:  timer_list struct containing private info
443  * Description:
444  *  if there is no data transfer and if we are not in LPI state,
445  *  then MAC Transmitter can be moved to LPI state.
446  */
447 static void stmmac_eee_ctrl_timer(struct timer_list *t)
448 {
449         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
450
451         stmmac_enable_eee_mode(priv);
452         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
453 }
454
455 /**
456  * stmmac_eee_init - init EEE
457  * @priv: driver private structure
458  * Description:
459  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
460  *  can also manage EEE, this function enable the LPI state and start related
461  *  timer.
462  */
463 bool stmmac_eee_init(struct stmmac_priv *priv)
464 {
465         int eee_tw_timer = priv->eee_tw_timer;
466
467         /* Using PCS we cannot dial with the phy registers at this stage
468          * so we do not support extra feature like EEE.
469          */
470         if (priv->hw->pcs == STMMAC_PCS_TBI ||
471             priv->hw->pcs == STMMAC_PCS_RTBI)
472                 return false;
473
474         /* Check if MAC core supports the EEE feature. */
475         if (!priv->dma_cap.eee)
476                 return false;
477
478         mutex_lock(&priv->lock);
479
480         /* Check if it needs to be deactivated */
481         if (!priv->eee_active) {
482                 if (priv->eee_enabled) {
483                         netdev_dbg(priv->dev, "disable EEE\n");
484                         stmmac_lpi_entry_timer_config(priv, 0);
485                         del_timer_sync(&priv->eee_ctrl_timer);
486                         stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
487                         if (priv->hw->xpcs)
488                                 xpcs_config_eee(priv->hw->xpcs,
489                                                 priv->plat->mult_fact_100ns,
490                                                 false);
491                 }
492                 mutex_unlock(&priv->lock);
493                 return false;
494         }
495
496         if (priv->eee_active && !priv->eee_enabled) {
497                 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
498                 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
499                                      eee_tw_timer);
500                 if (priv->hw->xpcs)
501                         xpcs_config_eee(priv->hw->xpcs,
502                                         priv->plat->mult_fact_100ns,
503                                         true);
504         }
505
506         if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
507                 del_timer_sync(&priv->eee_ctrl_timer);
508                 priv->tx_path_in_lpi_mode = false;
509                 stmmac_lpi_entry_timer_config(priv, 1);
510         } else {
511                 stmmac_lpi_entry_timer_config(priv, 0);
512                 mod_timer(&priv->eee_ctrl_timer,
513                           STMMAC_LPI_T(priv->tx_lpi_timer));
514         }
515
516         mutex_unlock(&priv->lock);
517         netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
518         return true;
519 }
520
521 static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv)
522 {
523         /* Correct the clk domain crossing(CDC) error */
524         if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
525                 return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
526         return 0;
527 }
528
529 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
530  * @priv: driver private structure
531  * @p : descriptor pointer
532  * @skb : the socket buffer
533  * Description :
534  * This function will read timestamp from the descriptor & pass it to stack.
535  * and also perform some sanity checks.
536  */
537 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
538                                    struct dma_desc *p, struct sk_buff *skb)
539 {
540         struct skb_shared_hwtstamps shhwtstamp;
541         bool found = false;
542         u64 ns = 0;
543
544         if (!priv->hwts_tx_en)
545                 return;
546
547         /* exit if skb doesn't support hw tstamp */
548         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
549                 return;
550
551         /* check tx tstamp status */
552         if (stmmac_get_tx_timestamp_status(priv, p)) {
553                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
554                 found = true;
555         } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
556                 found = true;
557         }
558
559         if (found) {
560                 ns -= stmmac_cdc_adjust(priv);
561
562                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
563                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
564
565                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
566                 /* pass tstamp to stack */
567                 skb_tstamp_tx(skb, &shhwtstamp);
568         }
569 }
570
571 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
572  * @priv: driver private structure
573  * @p : descriptor pointer
574  * @np : next descriptor pointer
575  * @skb : the socket buffer
576  * Description :
577  * This function will read received packet's timestamp from the descriptor
578  * and pass it to stack. It also perform some sanity checks.
579  */
580 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
581                                    struct dma_desc *np, struct sk_buff *skb)
582 {
583         struct skb_shared_hwtstamps *shhwtstamp = NULL;
584         struct dma_desc *desc = p;
585         u64 ns = 0;
586
587         if (!priv->hwts_rx_en)
588                 return;
589         /* For GMAC4, the valid timestamp is from CTX next desc. */
590         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
591                 desc = np;
592
593         /* Check if timestamp is available */
594         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
595                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
596
597                 ns -= stmmac_cdc_adjust(priv);
598
599                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
600                 shhwtstamp = skb_hwtstamps(skb);
601                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
602                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
603         } else  {
604                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
605         }
606 }
607
608 /**
609  *  stmmac_hwtstamp_set - control hardware timestamping.
610  *  @dev: device pointer.
611  *  @ifr: An IOCTL specific structure, that can contain a pointer to
612  *  a proprietary structure used to pass information to the driver.
613  *  Description:
614  *  This function configures the MAC to enable/disable both outgoing(TX)
615  *  and incoming(RX) packets time stamping based on user input.
616  *  Return Value:
617  *  0 on success and an appropriate -ve integer on failure.
618  */
619 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
620 {
621         struct stmmac_priv *priv = netdev_priv(dev);
622         struct hwtstamp_config config;
623         u32 ptp_v2 = 0;
624         u32 tstamp_all = 0;
625         u32 ptp_over_ipv4_udp = 0;
626         u32 ptp_over_ipv6_udp = 0;
627         u32 ptp_over_ethernet = 0;
628         u32 snap_type_sel = 0;
629         u32 ts_master_en = 0;
630         u32 ts_event_en = 0;
631
632         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
633                 netdev_alert(priv->dev, "No support for HW time stamping\n");
634                 priv->hwts_tx_en = 0;
635                 priv->hwts_rx_en = 0;
636
637                 return -EOPNOTSUPP;
638         }
639
640         if (copy_from_user(&config, ifr->ifr_data,
641                            sizeof(config)))
642                 return -EFAULT;
643
644         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
645                    __func__, config.flags, config.tx_type, config.rx_filter);
646
647         /* reserved for future extensions */
648         if (config.flags)
649                 return -EINVAL;
650
651         if (config.tx_type != HWTSTAMP_TX_OFF &&
652             config.tx_type != HWTSTAMP_TX_ON)
653                 return -ERANGE;
654
655         if (priv->adv_ts) {
656                 switch (config.rx_filter) {
657                 case HWTSTAMP_FILTER_NONE:
658                         /* time stamp no incoming packet at all */
659                         config.rx_filter = HWTSTAMP_FILTER_NONE;
660                         break;
661
662                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
663                         /* PTP v1, UDP, any kind of event packet */
664                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
665                         /* 'xmac' hardware can support Sync, Pdelay_Req and
666                          * Pdelay_resp by setting bit14 and bits17/16 to 01
667                          * This leaves Delay_Req timestamps out.
668                          * Enable all events *and* general purpose message
669                          * timestamping
670                          */
671                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
672                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
673                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
674                         break;
675
676                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
677                         /* PTP v1, UDP, Sync packet */
678                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
679                         /* take time stamp for SYNC messages only */
680                         ts_event_en = PTP_TCR_TSEVNTENA;
681
682                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
683                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
684                         break;
685
686                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
687                         /* PTP v1, UDP, Delay_req packet */
688                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
689                         /* take time stamp for Delay_Req messages only */
690                         ts_master_en = PTP_TCR_TSMSTRENA;
691                         ts_event_en = PTP_TCR_TSEVNTENA;
692
693                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
694                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
695                         break;
696
697                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
698                         /* PTP v2, UDP, any kind of event packet */
699                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
700                         ptp_v2 = PTP_TCR_TSVER2ENA;
701                         /* take time stamp for all event messages */
702                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
703
704                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
705                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
706                         break;
707
708                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
709                         /* PTP v2, UDP, Sync packet */
710                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
711                         ptp_v2 = PTP_TCR_TSVER2ENA;
712                         /* take time stamp for SYNC messages only */
713                         ts_event_en = PTP_TCR_TSEVNTENA;
714
715                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
716                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
717                         break;
718
719                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
720                         /* PTP v2, UDP, Delay_req packet */
721                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
722                         ptp_v2 = PTP_TCR_TSVER2ENA;
723                         /* take time stamp for Delay_Req messages only */
724                         ts_master_en = PTP_TCR_TSMSTRENA;
725                         ts_event_en = PTP_TCR_TSEVNTENA;
726
727                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
728                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
729                         break;
730
731                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
732                         /* PTP v2/802.AS1 any layer, any kind of event packet */
733                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
734                         ptp_v2 = PTP_TCR_TSVER2ENA;
735                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
736                         if (priv->synopsys_id < DWMAC_CORE_4_10)
737                                 ts_event_en = PTP_TCR_TSEVNTENA;
738                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
739                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
740                         ptp_over_ethernet = PTP_TCR_TSIPENA;
741                         break;
742
743                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
744                         /* PTP v2/802.AS1, any layer, Sync packet */
745                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
746                         ptp_v2 = PTP_TCR_TSVER2ENA;
747                         /* take time stamp for SYNC messages only */
748                         ts_event_en = PTP_TCR_TSEVNTENA;
749
750                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
751                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
752                         ptp_over_ethernet = PTP_TCR_TSIPENA;
753                         break;
754
755                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
756                         /* PTP v2/802.AS1, any layer, Delay_req packet */
757                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
758                         ptp_v2 = PTP_TCR_TSVER2ENA;
759                         /* take time stamp for Delay_Req messages only */
760                         ts_master_en = PTP_TCR_TSMSTRENA;
761                         ts_event_en = PTP_TCR_TSEVNTENA;
762
763                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
764                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
765                         ptp_over_ethernet = PTP_TCR_TSIPENA;
766                         break;
767
768                 case HWTSTAMP_FILTER_NTP_ALL:
769                 case HWTSTAMP_FILTER_ALL:
770                         /* time stamp any incoming packet */
771                         config.rx_filter = HWTSTAMP_FILTER_ALL;
772                         tstamp_all = PTP_TCR_TSENALL;
773                         break;
774
775                 default:
776                         return -ERANGE;
777                 }
778         } else {
779                 switch (config.rx_filter) {
780                 case HWTSTAMP_FILTER_NONE:
781                         config.rx_filter = HWTSTAMP_FILTER_NONE;
782                         break;
783                 default:
784                         /* PTP v1, UDP, any kind of event packet */
785                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
786                         break;
787                 }
788         }
789         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
790         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
791
792         priv->systime_flags = STMMAC_HWTS_ACTIVE;
793
794         if (priv->hwts_tx_en || priv->hwts_rx_en) {
795                 priv->systime_flags |= tstamp_all | ptp_v2 |
796                                        ptp_over_ethernet | ptp_over_ipv6_udp |
797                                        ptp_over_ipv4_udp | ts_event_en |
798                                        ts_master_en | snap_type_sel;
799         }
800
801         stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
802
803         memcpy(&priv->tstamp_config, &config, sizeof(config));
804
805         return copy_to_user(ifr->ifr_data, &config,
806                             sizeof(config)) ? -EFAULT : 0;
807 }
808
809 /**
810  *  stmmac_hwtstamp_get - read hardware timestamping.
811  *  @dev: device pointer.
812  *  @ifr: An IOCTL specific structure, that can contain a pointer to
813  *  a proprietary structure used to pass information to the driver.
814  *  Description:
815  *  This function obtain the current hardware timestamping settings
816  *  as requested.
817  */
818 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
819 {
820         struct stmmac_priv *priv = netdev_priv(dev);
821         struct hwtstamp_config *config = &priv->tstamp_config;
822
823         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
824                 return -EOPNOTSUPP;
825
826         return copy_to_user(ifr->ifr_data, config,
827                             sizeof(*config)) ? -EFAULT : 0;
828 }
829
830 /**
831  * stmmac_init_tstamp_counter - init hardware timestamping counter
832  * @priv: driver private structure
833  * @systime_flags: timestamping flags
834  * Description:
835  * Initialize hardware counter for packet timestamping.
836  * This is valid as long as the interface is open and not suspended.
837  * Will be rerun after resuming from suspend, case in which the timestamping
838  * flags updated by stmmac_hwtstamp_set() also need to be restored.
839  */
840 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
841 {
842         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
843         struct timespec64 now;
844         u32 sec_inc = 0;
845         u64 temp = 0;
846         int ret;
847
848         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
849                 return -EOPNOTSUPP;
850
851         ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
852         if (ret < 0) {
853                 netdev_warn(priv->dev,
854                             "failed to enable PTP reference clock: %pe\n",
855                             ERR_PTR(ret));
856                 return ret;
857         }
858
859         stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
860         priv->systime_flags = systime_flags;
861
862         /* program Sub Second Increment reg */
863         stmmac_config_sub_second_increment(priv, priv->ptpaddr,
864                                            priv->plat->clk_ptp_rate,
865                                            xmac, &sec_inc);
866         temp = div_u64(1000000000ULL, sec_inc);
867
868         /* Store sub second increment for later use */
869         priv->sub_second_inc = sec_inc;
870
871         /* calculate default added value:
872          * formula is :
873          * addend = (2^32)/freq_div_ratio;
874          * where, freq_div_ratio = 1e9ns/sec_inc
875          */
876         temp = (u64)(temp << 32);
877         priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
878         stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
879
880         /* initialize system time */
881         ktime_get_real_ts64(&now);
882
883         /* lower 32 bits of tv_sec are safe until y2106 */
884         stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
885
886         return 0;
887 }
888 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
889
890 /**
891  * stmmac_init_ptp - init PTP
892  * @priv: driver private structure
893  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
894  * This is done by looking at the HW cap. register.
895  * This function also registers the ptp driver.
896  */
897 static int stmmac_init_ptp(struct stmmac_priv *priv)
898 {
899         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
900         int ret;
901
902         ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
903         if (ret)
904                 return ret;
905
906         priv->adv_ts = 0;
907         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
908         if (xmac && priv->dma_cap.atime_stamp)
909                 priv->adv_ts = 1;
910         /* Dwmac 3.x core with extend_desc can support adv_ts */
911         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
912                 priv->adv_ts = 1;
913
914         if (priv->dma_cap.time_stamp)
915                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
916
917         if (priv->adv_ts)
918                 netdev_info(priv->dev,
919                             "IEEE 1588-2008 Advanced Timestamp supported\n");
920
921         priv->hwts_tx_en = 0;
922         priv->hwts_rx_en = 0;
923
924         stmmac_ptp_register(priv);
925
926         return 0;
927 }
928
929 static void stmmac_release_ptp(struct stmmac_priv *priv)
930 {
931         clk_disable_unprepare(priv->plat->clk_ptp_ref);
932         stmmac_ptp_unregister(priv);
933 }
934
935 /**
936  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
937  *  @priv: driver private structure
938  *  @duplex: duplex passed to the next function
939  *  Description: It is used for configuring the flow control in all queues
940  */
941 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
942 {
943         u32 tx_cnt = priv->plat->tx_queues_to_use;
944
945         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
946                         priv->pause, tx_cnt);
947 }
948
949 static void stmmac_validate(struct phylink_config *config,
950                             unsigned long *supported,
951                             struct phylink_link_state *state)
952 {
953         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
954         __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
955         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
956         int tx_cnt = priv->plat->tx_queues_to_use;
957         int max_speed = priv->plat->max_speed;
958
959         phylink_set(mac_supported, 10baseT_Half);
960         phylink_set(mac_supported, 10baseT_Full);
961         phylink_set(mac_supported, 100baseT_Half);
962         phylink_set(mac_supported, 100baseT_Full);
963         phylink_set(mac_supported, 1000baseT_Half);
964         phylink_set(mac_supported, 1000baseT_Full);
965         phylink_set(mac_supported, 1000baseKX_Full);
966
967         phylink_set(mac_supported, Autoneg);
968         phylink_set(mac_supported, Pause);
969         phylink_set(mac_supported, Asym_Pause);
970         phylink_set_port_modes(mac_supported);
971
972         /* Cut down 1G if asked to */
973         if ((max_speed > 0) && (max_speed < 1000)) {
974                 phylink_set(mask, 1000baseT_Full);
975                 phylink_set(mask, 1000baseX_Full);
976         } else if (priv->plat->has_gmac4) {
977                 if (!max_speed || max_speed >= 2500) {
978                         phylink_set(mac_supported, 2500baseT_Full);
979                         phylink_set(mac_supported, 2500baseX_Full);
980                 }
981         } else if (priv->plat->has_xgmac) {
982                 if (!max_speed || (max_speed >= 2500)) {
983                         phylink_set(mac_supported, 2500baseT_Full);
984                         phylink_set(mac_supported, 2500baseX_Full);
985                 }
986                 if (!max_speed || (max_speed >= 5000)) {
987                         phylink_set(mac_supported, 5000baseT_Full);
988                 }
989                 if (!max_speed || (max_speed >= 10000)) {
990                         phylink_set(mac_supported, 10000baseSR_Full);
991                         phylink_set(mac_supported, 10000baseLR_Full);
992                         phylink_set(mac_supported, 10000baseER_Full);
993                         phylink_set(mac_supported, 10000baseLRM_Full);
994                         phylink_set(mac_supported, 10000baseT_Full);
995                         phylink_set(mac_supported, 10000baseKX4_Full);
996                         phylink_set(mac_supported, 10000baseKR_Full);
997                 }
998                 if (!max_speed || (max_speed >= 25000)) {
999                         phylink_set(mac_supported, 25000baseCR_Full);
1000                         phylink_set(mac_supported, 25000baseKR_Full);
1001                         phylink_set(mac_supported, 25000baseSR_Full);
1002                 }
1003                 if (!max_speed || (max_speed >= 40000)) {
1004                         phylink_set(mac_supported, 40000baseKR4_Full);
1005                         phylink_set(mac_supported, 40000baseCR4_Full);
1006                         phylink_set(mac_supported, 40000baseSR4_Full);
1007                         phylink_set(mac_supported, 40000baseLR4_Full);
1008                 }
1009                 if (!max_speed || (max_speed >= 50000)) {
1010                         phylink_set(mac_supported, 50000baseCR2_Full);
1011                         phylink_set(mac_supported, 50000baseKR2_Full);
1012                         phylink_set(mac_supported, 50000baseSR2_Full);
1013                         phylink_set(mac_supported, 50000baseKR_Full);
1014                         phylink_set(mac_supported, 50000baseSR_Full);
1015                         phylink_set(mac_supported, 50000baseCR_Full);
1016                         phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
1017                         phylink_set(mac_supported, 50000baseDR_Full);
1018                 }
1019                 if (!max_speed || (max_speed >= 100000)) {
1020                         phylink_set(mac_supported, 100000baseKR4_Full);
1021                         phylink_set(mac_supported, 100000baseSR4_Full);
1022                         phylink_set(mac_supported, 100000baseCR4_Full);
1023                         phylink_set(mac_supported, 100000baseLR4_ER4_Full);
1024                         phylink_set(mac_supported, 100000baseKR2_Full);
1025                         phylink_set(mac_supported, 100000baseSR2_Full);
1026                         phylink_set(mac_supported, 100000baseCR2_Full);
1027                         phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
1028                         phylink_set(mac_supported, 100000baseDR2_Full);
1029                 }
1030         }
1031
1032         /* Half-Duplex can only work with single queue */
1033         if (tx_cnt > 1) {
1034                 phylink_set(mask, 10baseT_Half);
1035                 phylink_set(mask, 100baseT_Half);
1036                 phylink_set(mask, 1000baseT_Half);
1037         }
1038
1039         linkmode_and(supported, supported, mac_supported);
1040         linkmode_andnot(supported, supported, mask);
1041
1042         linkmode_and(state->advertising, state->advertising, mac_supported);
1043         linkmode_andnot(state->advertising, state->advertising, mask);
1044
1045         /* If PCS is supported, check which modes it supports. */
1046         if (priv->hw->xpcs)
1047                 xpcs_validate(priv->hw->xpcs, supported, state);
1048 }
1049
1050 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
1051                               const struct phylink_link_state *state)
1052 {
1053         /* Nothing to do, xpcs_config() handles everything */
1054 }
1055
1056 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
1057 {
1058         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
1059         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
1060         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
1061         bool *hs_enable = &fpe_cfg->hs_enable;
1062
1063         if (is_up && *hs_enable) {
1064                 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
1065         } else {
1066                 *lo_state = FPE_STATE_OFF;
1067                 *lp_state = FPE_STATE_OFF;
1068         }
1069 }
1070
1071 static void stmmac_mac_link_down(struct phylink_config *config,
1072                                  unsigned int mode, phy_interface_t interface)
1073 {
1074         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1075
1076         stmmac_mac_set(priv, priv->ioaddr, false);
1077         priv->eee_active = false;
1078         priv->tx_lpi_enabled = false;
1079         priv->eee_enabled = stmmac_eee_init(priv);
1080         stmmac_set_eee_pls(priv, priv->hw, false);
1081
1082         if (priv->dma_cap.fpesel)
1083                 stmmac_fpe_link_state_handle(priv, false);
1084 }
1085
1086 static void stmmac_mac_link_up(struct phylink_config *config,
1087                                struct phy_device *phy,
1088                                unsigned int mode, phy_interface_t interface,
1089                                int speed, int duplex,
1090                                bool tx_pause, bool rx_pause)
1091 {
1092         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1093         u32 ctrl;
1094
1095         ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1096         ctrl &= ~priv->hw->link.speed_mask;
1097
1098         if (interface == PHY_INTERFACE_MODE_USXGMII) {
1099                 switch (speed) {
1100                 case SPEED_10000:
1101                         ctrl |= priv->hw->link.xgmii.speed10000;
1102                         break;
1103                 case SPEED_5000:
1104                         ctrl |= priv->hw->link.xgmii.speed5000;
1105                         break;
1106                 case SPEED_2500:
1107                         ctrl |= priv->hw->link.xgmii.speed2500;
1108                         break;
1109                 default:
1110                         return;
1111                 }
1112         } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1113                 switch (speed) {
1114                 case SPEED_100000:
1115                         ctrl |= priv->hw->link.xlgmii.speed100000;
1116                         break;
1117                 case SPEED_50000:
1118                         ctrl |= priv->hw->link.xlgmii.speed50000;
1119                         break;
1120                 case SPEED_40000:
1121                         ctrl |= priv->hw->link.xlgmii.speed40000;
1122                         break;
1123                 case SPEED_25000:
1124                         ctrl |= priv->hw->link.xlgmii.speed25000;
1125                         break;
1126                 case SPEED_10000:
1127                         ctrl |= priv->hw->link.xgmii.speed10000;
1128                         break;
1129                 case SPEED_2500:
1130                         ctrl |= priv->hw->link.speed2500;
1131                         break;
1132                 case SPEED_1000:
1133                         ctrl |= priv->hw->link.speed1000;
1134                         break;
1135                 default:
1136                         return;
1137                 }
1138         } else {
1139                 switch (speed) {
1140                 case SPEED_2500:
1141                         ctrl |= priv->hw->link.speed2500;
1142                         break;
1143                 case SPEED_1000:
1144                         ctrl |= priv->hw->link.speed1000;
1145                         break;
1146                 case SPEED_100:
1147                         ctrl |= priv->hw->link.speed100;
1148                         break;
1149                 case SPEED_10:
1150                         ctrl |= priv->hw->link.speed10;
1151                         break;
1152                 default:
1153                         return;
1154                 }
1155         }
1156
1157         priv->speed = speed;
1158
1159         if (priv->plat->fix_mac_speed)
1160                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1161
1162         if (!duplex)
1163                 ctrl &= ~priv->hw->link.duplex;
1164         else
1165                 ctrl |= priv->hw->link.duplex;
1166
1167         /* Flow Control operation */
1168         if (tx_pause && rx_pause)
1169                 stmmac_mac_flow_ctrl(priv, duplex);
1170
1171         writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1172
1173         stmmac_mac_set(priv, priv->ioaddr, true);
1174         if (phy && priv->dma_cap.eee) {
1175                 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1176                 priv->eee_enabled = stmmac_eee_init(priv);
1177                 priv->tx_lpi_enabled = priv->eee_enabled;
1178                 stmmac_set_eee_pls(priv, priv->hw, true);
1179         }
1180
1181         if (priv->dma_cap.fpesel)
1182                 stmmac_fpe_link_state_handle(priv, true);
1183 }
1184
1185 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1186         .validate = stmmac_validate,
1187         .mac_config = stmmac_mac_config,
1188         .mac_link_down = stmmac_mac_link_down,
1189         .mac_link_up = stmmac_mac_link_up,
1190 };
1191
1192 /**
1193  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1194  * @priv: driver private structure
1195  * Description: this is to verify if the HW supports the PCS.
1196  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1197  * configured for the TBI, RTBI, or SGMII PHY interface.
1198  */
1199 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1200 {
1201         int interface = priv->plat->interface;
1202
1203         if (priv->dma_cap.pcs) {
1204                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1205                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1206                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1207                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1208                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1209                         priv->hw->pcs = STMMAC_PCS_RGMII;
1210                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1211                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1212                         priv->hw->pcs = STMMAC_PCS_SGMII;
1213                 }
1214         }
1215 }
1216
1217 /**
1218  * stmmac_init_phy - PHY initialization
1219  * @dev: net device structure
1220  * Description: it initializes the driver's PHY state, and attaches the PHY
1221  * to the mac driver.
1222  *  Return value:
1223  *  0 on success
1224  */
1225 static int stmmac_init_phy(struct net_device *dev)
1226 {
1227         struct stmmac_priv *priv = netdev_priv(dev);
1228         struct device_node *node;
1229         int ret;
1230
1231         node = priv->plat->phylink_node;
1232
1233         if (node)
1234                 ret = phylink_of_phy_connect(priv->phylink, node, 0);
1235
1236         /* Some DT bindings do not set-up the PHY handle. Let's try to
1237          * manually parse it
1238          */
1239         if (!node || ret) {
1240                 int addr = priv->plat->phy_addr;
1241                 struct phy_device *phydev;
1242
1243                 phydev = mdiobus_get_phy(priv->mii, addr);
1244                 if (!phydev) {
1245                         netdev_err(priv->dev, "no phy at addr %d\n", addr);
1246                         return -ENODEV;
1247                 }
1248
1249                 ret = phylink_connect_phy(priv->phylink, phydev);
1250         }
1251
1252         if (!priv->plat->pmt) {
1253                 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1254
1255                 phylink_ethtool_get_wol(priv->phylink, &wol);
1256                 device_set_wakeup_capable(priv->device, !!wol.supported);
1257         }
1258
1259         return ret;
1260 }
1261
1262 static int stmmac_phy_setup(struct stmmac_priv *priv)
1263 {
1264         struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1265         struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1266         int mode = priv->plat->phy_interface;
1267         struct phylink *phylink;
1268
1269         priv->phylink_config.dev = &priv->dev->dev;
1270         priv->phylink_config.type = PHYLINK_NETDEV;
1271         priv->phylink_config.pcs_poll = true;
1272         if (priv->plat->mdio_bus_data)
1273                 priv->phylink_config.ovr_an_inband =
1274                         mdio_bus_data->xpcs_an_inband;
1275
1276         if (!fwnode)
1277                 fwnode = dev_fwnode(priv->device);
1278
1279         phylink = phylink_create(&priv->phylink_config, fwnode,
1280                                  mode, &stmmac_phylink_mac_ops);
1281         if (IS_ERR(phylink))
1282                 return PTR_ERR(phylink);
1283
1284         if (priv->hw->xpcs)
1285                 phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
1286
1287         priv->phylink = phylink;
1288         return 0;
1289 }
1290
1291 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1292 {
1293         u32 rx_cnt = priv->plat->rx_queues_to_use;
1294         unsigned int desc_size;
1295         void *head_rx;
1296         u32 queue;
1297
1298         /* Display RX rings */
1299         for (queue = 0; queue < rx_cnt; queue++) {
1300                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1301
1302                 pr_info("\tRX Queue %u rings\n", queue);
1303
1304                 if (priv->extend_desc) {
1305                         head_rx = (void *)rx_q->dma_erx;
1306                         desc_size = sizeof(struct dma_extended_desc);
1307                 } else {
1308                         head_rx = (void *)rx_q->dma_rx;
1309                         desc_size = sizeof(struct dma_desc);
1310                 }
1311
1312                 /* Display RX ring */
1313                 stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1314                                     rx_q->dma_rx_phy, desc_size);
1315         }
1316 }
1317
1318 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1319 {
1320         u32 tx_cnt = priv->plat->tx_queues_to_use;
1321         unsigned int desc_size;
1322         void *head_tx;
1323         u32 queue;
1324
1325         /* Display TX rings */
1326         for (queue = 0; queue < tx_cnt; queue++) {
1327                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1328
1329                 pr_info("\tTX Queue %d rings\n", queue);
1330
1331                 if (priv->extend_desc) {
1332                         head_tx = (void *)tx_q->dma_etx;
1333                         desc_size = sizeof(struct dma_extended_desc);
1334                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1335                         head_tx = (void *)tx_q->dma_entx;
1336                         desc_size = sizeof(struct dma_edesc);
1337                 } else {
1338                         head_tx = (void *)tx_q->dma_tx;
1339                         desc_size = sizeof(struct dma_desc);
1340                 }
1341
1342                 stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1343                                     tx_q->dma_tx_phy, desc_size);
1344         }
1345 }
1346
1347 static void stmmac_display_rings(struct stmmac_priv *priv)
1348 {
1349         /* Display RX ring */
1350         stmmac_display_rx_rings(priv);
1351
1352         /* Display TX ring */
1353         stmmac_display_tx_rings(priv);
1354 }
1355
1356 static int stmmac_set_bfsize(int mtu, int bufsize)
1357 {
1358         int ret = bufsize;
1359
1360         if (mtu >= BUF_SIZE_8KiB)
1361                 ret = BUF_SIZE_16KiB;
1362         else if (mtu >= BUF_SIZE_4KiB)
1363                 ret = BUF_SIZE_8KiB;
1364         else if (mtu >= BUF_SIZE_2KiB)
1365                 ret = BUF_SIZE_4KiB;
1366         else if (mtu > DEFAULT_BUFSIZE)
1367                 ret = BUF_SIZE_2KiB;
1368         else
1369                 ret = DEFAULT_BUFSIZE;
1370
1371         return ret;
1372 }
1373
1374 /**
1375  * stmmac_clear_rx_descriptors - clear RX descriptors
1376  * @priv: driver private structure
1377  * @queue: RX queue index
1378  * Description: this function is called to clear the RX descriptors
1379  * in case of both basic and extended descriptors are used.
1380  */
1381 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1382 {
1383         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1384         int i;
1385
1386         /* Clear the RX descriptors */
1387         for (i = 0; i < priv->dma_rx_size; i++)
1388                 if (priv->extend_desc)
1389                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1390                                         priv->use_riwt, priv->mode,
1391                                         (i == priv->dma_rx_size - 1),
1392                                         priv->dma_buf_sz);
1393                 else
1394                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1395                                         priv->use_riwt, priv->mode,
1396                                         (i == priv->dma_rx_size - 1),
1397                                         priv->dma_buf_sz);
1398 }
1399
1400 /**
1401  * stmmac_clear_tx_descriptors - clear tx descriptors
1402  * @priv: driver private structure
1403  * @queue: TX queue index.
1404  * Description: this function is called to clear the TX descriptors
1405  * in case of both basic and extended descriptors are used.
1406  */
1407 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1408 {
1409         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1410         int i;
1411
1412         /* Clear the TX descriptors */
1413         for (i = 0; i < priv->dma_tx_size; i++) {
1414                 int last = (i == (priv->dma_tx_size - 1));
1415                 struct dma_desc *p;
1416
1417                 if (priv->extend_desc)
1418                         p = &tx_q->dma_etx[i].basic;
1419                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1420                         p = &tx_q->dma_entx[i].basic;
1421                 else
1422                         p = &tx_q->dma_tx[i];
1423
1424                 stmmac_init_tx_desc(priv, p, priv->mode, last);
1425         }
1426 }
1427
1428 /**
1429  * stmmac_clear_descriptors - clear descriptors
1430  * @priv: driver private structure
1431  * Description: this function is called to clear the TX and RX descriptors
1432  * in case of both basic and extended descriptors are used.
1433  */
1434 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1435 {
1436         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1437         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1438         u32 queue;
1439
1440         /* Clear the RX descriptors */
1441         for (queue = 0; queue < rx_queue_cnt; queue++)
1442                 stmmac_clear_rx_descriptors(priv, queue);
1443
1444         /* Clear the TX descriptors */
1445         for (queue = 0; queue < tx_queue_cnt; queue++)
1446                 stmmac_clear_tx_descriptors(priv, queue);
1447 }
1448
1449 /**
1450  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1451  * @priv: driver private structure
1452  * @p: descriptor pointer
1453  * @i: descriptor index
1454  * @flags: gfp flag
1455  * @queue: RX queue index
1456  * Description: this function is called to allocate a receive buffer, perform
1457  * the DMA mapping and init the descriptor.
1458  */
1459 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1460                                   int i, gfp_t flags, u32 queue)
1461 {
1462         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1463         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1464
1465         if (!buf->page) {
1466                 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1467                 if (!buf->page)
1468                         return -ENOMEM;
1469                 buf->page_offset = stmmac_rx_offset(priv);
1470         }
1471
1472         if (priv->sph && !buf->sec_page) {
1473                 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1474                 if (!buf->sec_page)
1475                         return -ENOMEM;
1476
1477                 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1478                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1479         } else {
1480                 buf->sec_page = NULL;
1481                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1482         }
1483
1484         buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1485
1486         stmmac_set_desc_addr(priv, p, buf->addr);
1487         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1488                 stmmac_init_desc3(priv, p);
1489
1490         return 0;
1491 }
1492
1493 /**
1494  * stmmac_free_rx_buffer - free RX dma buffers
1495  * @priv: private structure
1496  * @queue: RX queue index
1497  * @i: buffer index.
1498  */
1499 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1500 {
1501         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1502         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1503
1504         if (buf->page)
1505                 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1506         buf->page = NULL;
1507
1508         if (buf->sec_page)
1509                 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1510         buf->sec_page = NULL;
1511 }
1512
1513 /**
1514  * stmmac_free_tx_buffer - free RX dma buffers
1515  * @priv: private structure
1516  * @queue: RX queue index
1517  * @i: buffer index.
1518  */
1519 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1520 {
1521         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1522
1523         if (tx_q->tx_skbuff_dma[i].buf &&
1524             tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1525                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1526                         dma_unmap_page(priv->device,
1527                                        tx_q->tx_skbuff_dma[i].buf,
1528                                        tx_q->tx_skbuff_dma[i].len,
1529                                        DMA_TO_DEVICE);
1530                 else
1531                         dma_unmap_single(priv->device,
1532                                          tx_q->tx_skbuff_dma[i].buf,
1533                                          tx_q->tx_skbuff_dma[i].len,
1534                                          DMA_TO_DEVICE);
1535         }
1536
1537         if (tx_q->xdpf[i] &&
1538             (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1539              tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1540                 xdp_return_frame(tx_q->xdpf[i]);
1541                 tx_q->xdpf[i] = NULL;
1542         }
1543
1544         if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1545                 tx_q->xsk_frames_done++;
1546
1547         if (tx_q->tx_skbuff[i] &&
1548             tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1549                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1550                 tx_q->tx_skbuff[i] = NULL;
1551         }
1552
1553         tx_q->tx_skbuff_dma[i].buf = 0;
1554         tx_q->tx_skbuff_dma[i].map_as_page = false;
1555 }
1556
1557 /**
1558  * dma_free_rx_skbufs - free RX dma buffers
1559  * @priv: private structure
1560  * @queue: RX queue index
1561  */
1562 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1563 {
1564         int i;
1565
1566         for (i = 0; i < priv->dma_rx_size; i++)
1567                 stmmac_free_rx_buffer(priv, queue, i);
1568 }
1569
1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
1571                                    gfp_t flags)
1572 {
1573         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1574         int i;
1575
1576         for (i = 0; i < priv->dma_rx_size; i++) {
1577                 struct dma_desc *p;
1578                 int ret;
1579
1580                 if (priv->extend_desc)
1581                         p = &((rx_q->dma_erx + i)->basic);
1582                 else
1583                         p = rx_q->dma_rx + i;
1584
1585                 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1586                                              queue);
1587                 if (ret)
1588                         return ret;
1589
1590                 rx_q->buf_alloc_num++;
1591         }
1592
1593         return 0;
1594 }
1595
1596 /**
1597  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1598  * @priv: private structure
1599  * @queue: RX queue index
1600  */
1601 static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
1602 {
1603         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1604         int i;
1605
1606         for (i = 0; i < priv->dma_rx_size; i++) {
1607                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1608
1609                 if (!buf->xdp)
1610                         continue;
1611
1612                 xsk_buff_free(buf->xdp);
1613                 buf->xdp = NULL;
1614         }
1615 }
1616
1617 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
1618 {
1619         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1620         int i;
1621
1622         for (i = 0; i < priv->dma_rx_size; i++) {
1623                 struct stmmac_rx_buffer *buf;
1624                 dma_addr_t dma_addr;
1625                 struct dma_desc *p;
1626
1627                 if (priv->extend_desc)
1628                         p = (struct dma_desc *)(rx_q->dma_erx + i);
1629                 else
1630                         p = rx_q->dma_rx + i;
1631
1632                 buf = &rx_q->buf_pool[i];
1633
1634                 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1635                 if (!buf->xdp)
1636                         return -ENOMEM;
1637
1638                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1639                 stmmac_set_desc_addr(priv, p, dma_addr);
1640                 rx_q->buf_alloc_num++;
1641         }
1642
1643         return 0;
1644 }
1645
1646 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1647 {
1648         if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1649                 return NULL;
1650
1651         return xsk_get_pool_from_qid(priv->dev, queue);
1652 }
1653
1654 /**
1655  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1656  * @priv: driver private structure
1657  * @queue: RX queue index
1658  * @flags: gfp flag.
1659  * Description: this function initializes the DMA RX descriptors
1660  * and allocates the socket buffers. It supports the chained and ring
1661  * modes.
1662  */
1663 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
1664 {
1665         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1666         int ret;
1667
1668         netif_dbg(priv, probe, priv->dev,
1669                   "(%s) dma_rx_phy=0x%08x\n", __func__,
1670                   (u32)rx_q->dma_rx_phy);
1671
1672         stmmac_clear_rx_descriptors(priv, queue);
1673
1674         xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1675
1676         rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1677
1678         if (rx_q->xsk_pool) {
1679                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680                                                    MEM_TYPE_XSK_BUFF_POOL,
1681                                                    NULL));
1682                 netdev_info(priv->dev,
1683                             "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1684                             rx_q->queue_index);
1685                 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1686         } else {
1687                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1688                                                    MEM_TYPE_PAGE_POOL,
1689                                                    rx_q->page_pool));
1690                 netdev_info(priv->dev,
1691                             "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1692                             rx_q->queue_index);
1693         }
1694
1695         if (rx_q->xsk_pool) {
1696                 /* RX XDP ZC buffer pool may not be populated, e.g.
1697                  * xdpsock TX-only.
1698                  */
1699                 stmmac_alloc_rx_buffers_zc(priv, queue);
1700         } else {
1701                 ret = stmmac_alloc_rx_buffers(priv, queue, flags);
1702                 if (ret < 0)
1703                         return -ENOMEM;
1704         }
1705
1706         rx_q->cur_rx = 0;
1707         rx_q->dirty_rx = 0;
1708
1709         /* Setup the chained descriptor addresses */
1710         if (priv->mode == STMMAC_CHAIN_MODE) {
1711                 if (priv->extend_desc)
1712                         stmmac_mode_init(priv, rx_q->dma_erx,
1713                                          rx_q->dma_rx_phy,
1714                                          priv->dma_rx_size, 1);
1715                 else
1716                         stmmac_mode_init(priv, rx_q->dma_rx,
1717                                          rx_q->dma_rx_phy,
1718                                          priv->dma_rx_size, 0);
1719         }
1720
1721         return 0;
1722 }
1723
1724 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1725 {
1726         struct stmmac_priv *priv = netdev_priv(dev);
1727         u32 rx_count = priv->plat->rx_queues_to_use;
1728         u32 queue;
1729         int ret;
1730
1731         /* RX INITIALIZATION */
1732         netif_dbg(priv, probe, priv->dev,
1733                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1734
1735         for (queue = 0; queue < rx_count; queue++) {
1736                 ret = __init_dma_rx_desc_rings(priv, queue, flags);
1737                 if (ret)
1738                         goto err_init_rx_buffers;
1739         }
1740
1741         return 0;
1742
1743 err_init_rx_buffers:
1744         while (queue >= 0) {
1745                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1746
1747                 if (rx_q->xsk_pool)
1748                         dma_free_rx_xskbufs(priv, queue);
1749                 else
1750                         dma_free_rx_skbufs(priv, queue);
1751
1752                 rx_q->buf_alloc_num = 0;
1753                 rx_q->xsk_pool = NULL;
1754
1755                 if (queue == 0)
1756                         break;
1757
1758                 queue--;
1759         }
1760
1761         return ret;
1762 }
1763
1764 /**
1765  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1766  * @priv: driver private structure
1767  * @queue : TX queue index
1768  * Description: this function initializes the DMA TX descriptors
1769  * and allocates the socket buffers. It supports the chained and ring
1770  * modes.
1771  */
1772 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
1773 {
1774         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1775         int i;
1776
1777         netif_dbg(priv, probe, priv->dev,
1778                   "(%s) dma_tx_phy=0x%08x\n", __func__,
1779                   (u32)tx_q->dma_tx_phy);
1780
1781         /* Setup the chained descriptor addresses */
1782         if (priv->mode == STMMAC_CHAIN_MODE) {
1783                 if (priv->extend_desc)
1784                         stmmac_mode_init(priv, tx_q->dma_etx,
1785                                          tx_q->dma_tx_phy,
1786                                          priv->dma_tx_size, 1);
1787                 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1788                         stmmac_mode_init(priv, tx_q->dma_tx,
1789                                          tx_q->dma_tx_phy,
1790                                          priv->dma_tx_size, 0);
1791         }
1792
1793         tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1794
1795         for (i = 0; i < priv->dma_tx_size; i++) {
1796                 struct dma_desc *p;
1797
1798                 if (priv->extend_desc)
1799                         p = &((tx_q->dma_etx + i)->basic);
1800                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1801                         p = &((tx_q->dma_entx + i)->basic);
1802                 else
1803                         p = tx_q->dma_tx + i;
1804
1805                 stmmac_clear_desc(priv, p);
1806
1807                 tx_q->tx_skbuff_dma[i].buf = 0;
1808                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1809                 tx_q->tx_skbuff_dma[i].len = 0;
1810                 tx_q->tx_skbuff_dma[i].last_segment = false;
1811                 tx_q->tx_skbuff[i] = NULL;
1812         }
1813
1814         tx_q->dirty_tx = 0;
1815         tx_q->cur_tx = 0;
1816         tx_q->mss = 0;
1817
1818         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1819
1820         return 0;
1821 }
1822
1823 static int init_dma_tx_desc_rings(struct net_device *dev)
1824 {
1825         struct stmmac_priv *priv = netdev_priv(dev);
1826         u32 tx_queue_cnt;
1827         u32 queue;
1828
1829         tx_queue_cnt = priv->plat->tx_queues_to_use;
1830
1831         for (queue = 0; queue < tx_queue_cnt; queue++)
1832                 __init_dma_tx_desc_rings(priv, queue);
1833
1834         return 0;
1835 }
1836
1837 /**
1838  * init_dma_desc_rings - init the RX/TX descriptor rings
1839  * @dev: net device structure
1840  * @flags: gfp flag.
1841  * Description: this function initializes the DMA RX/TX descriptors
1842  * and allocates the socket buffers. It supports the chained and ring
1843  * modes.
1844  */
1845 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1846 {
1847         struct stmmac_priv *priv = netdev_priv(dev);
1848         int ret;
1849
1850         ret = init_dma_rx_desc_rings(dev, flags);
1851         if (ret)
1852                 return ret;
1853
1854         ret = init_dma_tx_desc_rings(dev);
1855
1856         stmmac_clear_descriptors(priv);
1857
1858         if (netif_msg_hw(priv))
1859                 stmmac_display_rings(priv);
1860
1861         return ret;
1862 }
1863
1864 /**
1865  * dma_free_tx_skbufs - free TX dma buffers
1866  * @priv: private structure
1867  * @queue: TX queue index
1868  */
1869 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1870 {
1871         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1872         int i;
1873
1874         tx_q->xsk_frames_done = 0;
1875
1876         for (i = 0; i < priv->dma_tx_size; i++)
1877                 stmmac_free_tx_buffer(priv, queue, i);
1878
1879         if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1880                 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1881                 tx_q->xsk_frames_done = 0;
1882                 tx_q->xsk_pool = NULL;
1883         }
1884 }
1885
1886 /**
1887  * stmmac_free_tx_skbufs - free TX skb buffers
1888  * @priv: private structure
1889  */
1890 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1891 {
1892         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1893         u32 queue;
1894
1895         for (queue = 0; queue < tx_queue_cnt; queue++)
1896                 dma_free_tx_skbufs(priv, queue);
1897 }
1898
1899 /**
1900  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1901  * @priv: private structure
1902  * @queue: RX queue index
1903  */
1904 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1905 {
1906         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1907
1908         /* Release the DMA RX socket buffers */
1909         if (rx_q->xsk_pool)
1910                 dma_free_rx_xskbufs(priv, queue);
1911         else
1912                 dma_free_rx_skbufs(priv, queue);
1913
1914         rx_q->buf_alloc_num = 0;
1915         rx_q->xsk_pool = NULL;
1916
1917         /* Free DMA regions of consistent memory previously allocated */
1918         if (!priv->extend_desc)
1919                 dma_free_coherent(priv->device, priv->dma_rx_size *
1920                                   sizeof(struct dma_desc),
1921                                   rx_q->dma_rx, rx_q->dma_rx_phy);
1922         else
1923                 dma_free_coherent(priv->device, priv->dma_rx_size *
1924                                   sizeof(struct dma_extended_desc),
1925                                   rx_q->dma_erx, rx_q->dma_rx_phy);
1926
1927         if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1928                 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1929
1930         kfree(rx_q->buf_pool);
1931         if (rx_q->page_pool)
1932                 page_pool_destroy(rx_q->page_pool);
1933 }
1934
1935 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1936 {
1937         u32 rx_count = priv->plat->rx_queues_to_use;
1938         u32 queue;
1939
1940         /* Free RX queue resources */
1941         for (queue = 0; queue < rx_count; queue++)
1942                 __free_dma_rx_desc_resources(priv, queue);
1943 }
1944
1945 /**
1946  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1947  * @priv: private structure
1948  * @queue: TX queue index
1949  */
1950 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
1951 {
1952         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1953         size_t size;
1954         void *addr;
1955
1956         /* Release the DMA TX socket buffers */
1957         dma_free_tx_skbufs(priv, queue);
1958
1959         if (priv->extend_desc) {
1960                 size = sizeof(struct dma_extended_desc);
1961                 addr = tx_q->dma_etx;
1962         } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1963                 size = sizeof(struct dma_edesc);
1964                 addr = tx_q->dma_entx;
1965         } else {
1966                 size = sizeof(struct dma_desc);
1967                 addr = tx_q->dma_tx;
1968         }
1969
1970         size *= priv->dma_tx_size;
1971
1972         dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1973
1974         kfree(tx_q->tx_skbuff_dma);
1975         kfree(tx_q->tx_skbuff);
1976 }
1977
1978 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1979 {
1980         u32 tx_count = priv->plat->tx_queues_to_use;
1981         u32 queue;
1982
1983         /* Free TX queue resources */
1984         for (queue = 0; queue < tx_count; queue++)
1985                 __free_dma_tx_desc_resources(priv, queue);
1986 }
1987
1988 /**
1989  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1990  * @priv: private structure
1991  * @queue: RX queue index
1992  * Description: according to which descriptor can be used (extend or basic)
1993  * this function allocates the resources for TX and RX paths. In case of
1994  * reception, for example, it pre-allocated the RX socket buffer in order to
1995  * allow zero-copy mechanism.
1996  */
1997 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1998 {
1999         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2000         struct stmmac_channel *ch = &priv->channel[queue];
2001         bool xdp_prog = stmmac_xdp_is_enabled(priv);
2002         struct page_pool_params pp_params = { 0 };
2003         unsigned int num_pages;
2004         unsigned int napi_id;
2005         int ret;
2006
2007         rx_q->queue_index = queue;
2008         rx_q->priv_data = priv;
2009
2010         pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2011         pp_params.pool_size = priv->dma_rx_size;
2012         num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
2013         pp_params.order = ilog2(num_pages);
2014         pp_params.nid = dev_to_node(priv->device);
2015         pp_params.dev = priv->device;
2016         pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2017         pp_params.offset = stmmac_rx_offset(priv);
2018         pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2019
2020         rx_q->page_pool = page_pool_create(&pp_params);
2021         if (IS_ERR(rx_q->page_pool)) {
2022                 ret = PTR_ERR(rx_q->page_pool);
2023                 rx_q->page_pool = NULL;
2024                 return ret;
2025         }
2026
2027         rx_q->buf_pool = kcalloc(priv->dma_rx_size,
2028                                  sizeof(*rx_q->buf_pool),
2029                                  GFP_KERNEL);
2030         if (!rx_q->buf_pool)
2031                 return -ENOMEM;
2032
2033         if (priv->extend_desc) {
2034                 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2035                                                    priv->dma_rx_size *
2036                                                    sizeof(struct dma_extended_desc),
2037                                                    &rx_q->dma_rx_phy,
2038                                                    GFP_KERNEL);
2039                 if (!rx_q->dma_erx)
2040                         return -ENOMEM;
2041
2042         } else {
2043                 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2044                                                   priv->dma_rx_size *
2045                                                   sizeof(struct dma_desc),
2046                                                   &rx_q->dma_rx_phy,
2047                                                   GFP_KERNEL);
2048                 if (!rx_q->dma_rx)
2049                         return -ENOMEM;
2050         }
2051
2052         if (stmmac_xdp_is_enabled(priv) &&
2053             test_bit(queue, priv->af_xdp_zc_qps))
2054                 napi_id = ch->rxtx_napi.napi_id;
2055         else
2056                 napi_id = ch->rx_napi.napi_id;
2057
2058         ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2059                                rx_q->queue_index,
2060                                napi_id);
2061         if (ret) {
2062                 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2063                 return -EINVAL;
2064         }
2065
2066         return 0;
2067 }
2068
2069 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
2070 {
2071         u32 rx_count = priv->plat->rx_queues_to_use;
2072         u32 queue;
2073         int ret;
2074
2075         /* RX queues buffers and DMA */
2076         for (queue = 0; queue < rx_count; queue++) {
2077                 ret = __alloc_dma_rx_desc_resources(priv, queue);
2078                 if (ret)
2079                         goto err_dma;
2080         }
2081
2082         return 0;
2083
2084 err_dma:
2085         free_dma_rx_desc_resources(priv);
2086
2087         return ret;
2088 }
2089
2090 /**
2091  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2092  * @priv: private structure
2093  * @queue: TX queue index
2094  * Description: according to which descriptor can be used (extend or basic)
2095  * this function allocates the resources for TX and RX paths. In case of
2096  * reception, for example, it pre-allocated the RX socket buffer in order to
2097  * allow zero-copy mechanism.
2098  */
2099 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
2100 {
2101         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2102         size_t size;
2103         void *addr;
2104
2105         tx_q->queue_index = queue;
2106         tx_q->priv_data = priv;
2107
2108         tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
2109                                       sizeof(*tx_q->tx_skbuff_dma),
2110                                       GFP_KERNEL);
2111         if (!tx_q->tx_skbuff_dma)
2112                 return -ENOMEM;
2113
2114         tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
2115                                   sizeof(struct sk_buff *),
2116                                   GFP_KERNEL);
2117         if (!tx_q->tx_skbuff)
2118                 return -ENOMEM;
2119
2120         if (priv->extend_desc)
2121                 size = sizeof(struct dma_extended_desc);
2122         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2123                 size = sizeof(struct dma_edesc);
2124         else
2125                 size = sizeof(struct dma_desc);
2126
2127         size *= priv->dma_tx_size;
2128
2129         addr = dma_alloc_coherent(priv->device, size,
2130                                   &tx_q->dma_tx_phy, GFP_KERNEL);
2131         if (!addr)
2132                 return -ENOMEM;
2133
2134         if (priv->extend_desc)
2135                 tx_q->dma_etx = addr;
2136         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2137                 tx_q->dma_entx = addr;
2138         else
2139                 tx_q->dma_tx = addr;
2140
2141         return 0;
2142 }
2143
2144 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
2145 {
2146         u32 tx_count = priv->plat->tx_queues_to_use;
2147         u32 queue;
2148         int ret;
2149
2150         /* TX queues buffers and DMA */
2151         for (queue = 0; queue < tx_count; queue++) {
2152                 ret = __alloc_dma_tx_desc_resources(priv, queue);
2153                 if (ret)
2154                         goto err_dma;
2155         }
2156
2157         return 0;
2158
2159 err_dma:
2160         free_dma_tx_desc_resources(priv);
2161         return ret;
2162 }
2163
2164 /**
2165  * alloc_dma_desc_resources - alloc TX/RX resources.
2166  * @priv: private structure
2167  * Description: according to which descriptor can be used (extend or basic)
2168  * this function allocates the resources for TX and RX paths. In case of
2169  * reception, for example, it pre-allocated the RX socket buffer in order to
2170  * allow zero-copy mechanism.
2171  */
2172 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
2173 {
2174         /* RX Allocation */
2175         int ret = alloc_dma_rx_desc_resources(priv);
2176
2177         if (ret)
2178                 return ret;
2179
2180         ret = alloc_dma_tx_desc_resources(priv);
2181
2182         return ret;
2183 }
2184
2185 /**
2186  * free_dma_desc_resources - free dma desc resources
2187  * @priv: private structure
2188  */
2189 static void free_dma_desc_resources(struct stmmac_priv *priv)
2190 {
2191         /* Release the DMA TX socket buffers */
2192         free_dma_tx_desc_resources(priv);
2193
2194         /* Release the DMA RX socket buffers later
2195          * to ensure all pending XDP_TX buffers are returned.
2196          */
2197         free_dma_rx_desc_resources(priv);
2198 }
2199
2200 /**
2201  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2202  *  @priv: driver private structure
2203  *  Description: It is used for enabling the rx queues in the MAC
2204  */
2205 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2206 {
2207         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2208         int queue;
2209         u8 mode;
2210
2211         for (queue = 0; queue < rx_queues_count; queue++) {
2212                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2213                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2214         }
2215 }
2216
2217 /**
2218  * stmmac_start_rx_dma - start RX DMA channel
2219  * @priv: driver private structure
2220  * @chan: RX channel index
2221  * Description:
2222  * This starts a RX DMA channel
2223  */
2224 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2225 {
2226         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2227         stmmac_start_rx(priv, priv->ioaddr, chan);
2228 }
2229
2230 /**
2231  * stmmac_start_tx_dma - start TX DMA channel
2232  * @priv: driver private structure
2233  * @chan: TX channel index
2234  * Description:
2235  * This starts a TX DMA channel
2236  */
2237 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2238 {
2239         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2240         stmmac_start_tx(priv, priv->ioaddr, chan);
2241 }
2242
2243 /**
2244  * stmmac_stop_rx_dma - stop RX DMA channel
2245  * @priv: driver private structure
2246  * @chan: RX channel index
2247  * Description:
2248  * This stops a RX DMA channel
2249  */
2250 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2251 {
2252         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2253         stmmac_stop_rx(priv, priv->ioaddr, chan);
2254 }
2255
2256 /**
2257  * stmmac_stop_tx_dma - stop TX DMA channel
2258  * @priv: driver private structure
2259  * @chan: TX channel index
2260  * Description:
2261  * This stops a TX DMA channel
2262  */
2263 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2264 {
2265         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2266         stmmac_stop_tx(priv, priv->ioaddr, chan);
2267 }
2268
2269 /**
2270  * stmmac_start_all_dma - start all RX and TX DMA channels
2271  * @priv: driver private structure
2272  * Description:
2273  * This starts all the RX and TX DMA channels
2274  */
2275 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2276 {
2277         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2278         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2279         u32 chan = 0;
2280
2281         for (chan = 0; chan < rx_channels_count; chan++)
2282                 stmmac_start_rx_dma(priv, chan);
2283
2284         for (chan = 0; chan < tx_channels_count; chan++)
2285                 stmmac_start_tx_dma(priv, chan);
2286 }
2287
2288 /**
2289  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2290  * @priv: driver private structure
2291  * Description:
2292  * This stops the RX and TX DMA channels
2293  */
2294 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2295 {
2296         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2297         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2298         u32 chan = 0;
2299
2300         for (chan = 0; chan < rx_channels_count; chan++)
2301                 stmmac_stop_rx_dma(priv, chan);
2302
2303         for (chan = 0; chan < tx_channels_count; chan++)
2304                 stmmac_stop_tx_dma(priv, chan);
2305 }
2306
2307 /**
2308  *  stmmac_dma_operation_mode - HW DMA operation mode
2309  *  @priv: driver private structure
2310  *  Description: it is used for configuring the DMA operation mode register in
2311  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2312  */
2313 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2314 {
2315         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2316         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2317         int rxfifosz = priv->plat->rx_fifo_size;
2318         int txfifosz = priv->plat->tx_fifo_size;
2319         u32 txmode = 0;
2320         u32 rxmode = 0;
2321         u32 chan = 0;
2322         u8 qmode = 0;
2323
2324         if (rxfifosz == 0)
2325                 rxfifosz = priv->dma_cap.rx_fifo_size;
2326         if (txfifosz == 0)
2327                 txfifosz = priv->dma_cap.tx_fifo_size;
2328
2329         /* Adjust for real per queue fifo size */
2330         rxfifosz /= rx_channels_count;
2331         txfifosz /= tx_channels_count;
2332
2333         if (priv->plat->force_thresh_dma_mode) {
2334                 txmode = tc;
2335                 rxmode = tc;
2336         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2337                 /*
2338                  * In case of GMAC, SF mode can be enabled
2339                  * to perform the TX COE in HW. This depends on:
2340                  * 1) TX COE if actually supported
2341                  * 2) There is no bugged Jumbo frame support
2342                  *    that needs to not insert csum in the TDES.
2343                  */
2344                 txmode = SF_DMA_MODE;
2345                 rxmode = SF_DMA_MODE;
2346                 priv->xstats.threshold = SF_DMA_MODE;
2347         } else {
2348                 txmode = tc;
2349                 rxmode = SF_DMA_MODE;
2350         }
2351
2352         /* configure all channels */
2353         for (chan = 0; chan < rx_channels_count; chan++) {
2354                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2355                 u32 buf_size;
2356
2357                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2358
2359                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2360                                 rxfifosz, qmode);
2361
2362                 if (rx_q->xsk_pool) {
2363                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2364                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2365                                               buf_size,
2366                                               chan);
2367                 } else {
2368                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2369                                               priv->dma_buf_sz,
2370                                               chan);
2371                 }
2372         }
2373
2374         for (chan = 0; chan < tx_channels_count; chan++) {
2375                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2376
2377                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2378                                 txfifosz, qmode);
2379         }
2380 }
2381
2382 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2383 {
2384         struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2385         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2386         struct xsk_buff_pool *pool = tx_q->xsk_pool;
2387         unsigned int entry = tx_q->cur_tx;
2388         struct dma_desc *tx_desc = NULL;
2389         struct xdp_desc xdp_desc;
2390         bool work_done = true;
2391
2392         /* Avoids TX time-out as we are sharing with slow path */
2393         nq->trans_start = jiffies;
2394
2395         budget = min(budget, stmmac_tx_avail(priv, queue));
2396
2397         while (budget-- > 0) {
2398                 dma_addr_t dma_addr;
2399                 bool set_ic;
2400
2401                 /* We are sharing with slow path and stop XSK TX desc submission when
2402                  * available TX ring is less than threshold.
2403                  */
2404                 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2405                     !netif_carrier_ok(priv->dev)) {
2406                         work_done = false;
2407                         break;
2408                 }
2409
2410                 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2411                         break;
2412
2413                 if (likely(priv->extend_desc))
2414                         tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2415                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2416                         tx_desc = &tx_q->dma_entx[entry].basic;
2417                 else
2418                         tx_desc = tx_q->dma_tx + entry;
2419
2420                 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2421                 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2422
2423                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2424
2425                 /* To return XDP buffer to XSK pool, we simple call
2426                  * xsk_tx_completed(), so we don't need to fill up
2427                  * 'buf' and 'xdpf'.
2428                  */
2429                 tx_q->tx_skbuff_dma[entry].buf = 0;
2430                 tx_q->xdpf[entry] = NULL;
2431
2432                 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2433                 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2434                 tx_q->tx_skbuff_dma[entry].last_segment = true;
2435                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2436
2437                 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2438
2439                 tx_q->tx_count_frames++;
2440
2441                 if (!priv->tx_coal_frames[queue])
2442                         set_ic = false;
2443                 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2444                         set_ic = true;
2445                 else
2446                         set_ic = false;
2447
2448                 if (set_ic) {
2449                         tx_q->tx_count_frames = 0;
2450                         stmmac_set_tx_ic(priv, tx_desc);
2451                         priv->xstats.tx_set_ic_bit++;
2452                 }
2453
2454                 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2455                                        true, priv->mode, true, true,
2456                                        xdp_desc.len);
2457
2458                 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2459
2460                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2461                 entry = tx_q->cur_tx;
2462         }
2463
2464         if (tx_desc) {
2465                 stmmac_flush_tx_descriptors(priv, queue);
2466                 xsk_tx_release(pool);
2467         }
2468
2469         /* Return true if all of the 3 conditions are met
2470          *  a) TX Budget is still available
2471          *  b) work_done = true when XSK TX desc peek is empty (no more
2472          *     pending XSK TX for transmission)
2473          */
2474         return !!budget && work_done;
2475 }
2476
2477 /**
2478  * stmmac_tx_clean - to manage the transmission completion
2479  * @priv: driver private structure
2480  * @budget: napi budget limiting this functions packet handling
2481  * @queue: TX queue index
2482  * Description: it reclaims the transmit resources after transmission completes.
2483  */
2484 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2485 {
2486         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2487         unsigned int bytes_compl = 0, pkts_compl = 0;
2488         unsigned int entry, xmits = 0, count = 0;
2489
2490         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2491
2492         priv->xstats.tx_clean++;
2493
2494         tx_q->xsk_frames_done = 0;
2495
2496         entry = tx_q->dirty_tx;
2497
2498         /* Try to clean all TX complete frame in 1 shot */
2499         while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
2500                 struct xdp_frame *xdpf;
2501                 struct sk_buff *skb;
2502                 struct dma_desc *p;
2503                 int status;
2504
2505                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2506                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2507                         xdpf = tx_q->xdpf[entry];
2508                         skb = NULL;
2509                 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2510                         xdpf = NULL;
2511                         skb = tx_q->tx_skbuff[entry];
2512                 } else {
2513                         xdpf = NULL;
2514                         skb = NULL;
2515                 }
2516
2517                 if (priv->extend_desc)
2518                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
2519                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2520                         p = &tx_q->dma_entx[entry].basic;
2521                 else
2522                         p = tx_q->dma_tx + entry;
2523
2524                 status = stmmac_tx_status(priv, &priv->dev->stats,
2525                                 &priv->xstats, p, priv->ioaddr);
2526                 /* Check if the descriptor is owned by the DMA */
2527                 if (unlikely(status & tx_dma_own))
2528                         break;
2529
2530                 count++;
2531
2532                 /* Make sure descriptor fields are read after reading
2533                  * the own bit.
2534                  */
2535                 dma_rmb();
2536
2537                 /* Just consider the last segment and ...*/
2538                 if (likely(!(status & tx_not_ls))) {
2539                         /* ... verify the status error condition */
2540                         if (unlikely(status & tx_err)) {
2541                                 priv->dev->stats.tx_errors++;
2542                         } else {
2543                                 priv->dev->stats.tx_packets++;
2544                                 priv->xstats.tx_pkt_n++;
2545                                 priv->xstats.txq_stats[queue].tx_pkt_n++;
2546                         }
2547                         if (skb)
2548                                 stmmac_get_tx_hwtstamp(priv, p, skb);
2549                 }
2550
2551                 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2552                            tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2553                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
2554                                 dma_unmap_page(priv->device,
2555                                                tx_q->tx_skbuff_dma[entry].buf,
2556                                                tx_q->tx_skbuff_dma[entry].len,
2557                                                DMA_TO_DEVICE);
2558                         else
2559                                 dma_unmap_single(priv->device,
2560                                                  tx_q->tx_skbuff_dma[entry].buf,
2561                                                  tx_q->tx_skbuff_dma[entry].len,
2562                                                  DMA_TO_DEVICE);
2563                         tx_q->tx_skbuff_dma[entry].buf = 0;
2564                         tx_q->tx_skbuff_dma[entry].len = 0;
2565                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
2566                 }
2567
2568                 stmmac_clean_desc3(priv, tx_q, p);
2569
2570                 tx_q->tx_skbuff_dma[entry].last_segment = false;
2571                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2572
2573                 if (xdpf &&
2574                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2575                         xdp_return_frame_rx_napi(xdpf);
2576                         tx_q->xdpf[entry] = NULL;
2577                 }
2578
2579                 if (xdpf &&
2580                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2581                         xdp_return_frame(xdpf);
2582                         tx_q->xdpf[entry] = NULL;
2583                 }
2584
2585                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2586                         tx_q->xsk_frames_done++;
2587
2588                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2589                         if (likely(skb)) {
2590                                 pkts_compl++;
2591                                 bytes_compl += skb->len;
2592                                 dev_consume_skb_any(skb);
2593                                 tx_q->tx_skbuff[entry] = NULL;
2594                         }
2595                 }
2596
2597                 stmmac_release_tx_desc(priv, p, priv->mode);
2598
2599                 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2600         }
2601         tx_q->dirty_tx = entry;
2602
2603         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2604                                   pkts_compl, bytes_compl);
2605
2606         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2607                                                                 queue))) &&
2608             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2609
2610                 netif_dbg(priv, tx_done, priv->dev,
2611                           "%s: restart transmit\n", __func__);
2612                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2613         }
2614
2615         if (tx_q->xsk_pool) {
2616                 bool work_done;
2617
2618                 if (tx_q->xsk_frames_done)
2619                         xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2620
2621                 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2622                         xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2623
2624                 /* For XSK TX, we try to send as many as possible.
2625                  * If XSK work done (XSK TX desc empty and budget still
2626                  * available), return "budget - 1" to reenable TX IRQ.
2627                  * Else, return "budget" to make NAPI continue polling.
2628                  */
2629                 work_done = stmmac_xdp_xmit_zc(priv, queue,
2630                                                STMMAC_XSK_TX_BUDGET_MAX);
2631                 if (work_done)
2632                         xmits = budget - 1;
2633                 else
2634                         xmits = budget;
2635         }
2636
2637         if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2638             priv->eee_sw_timer_en) {
2639                 stmmac_enable_eee_mode(priv);
2640                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2641         }
2642
2643         /* We still have pending packets, let's call for a new scheduling */
2644         if (tx_q->dirty_tx != tx_q->cur_tx)
2645                 hrtimer_start(&tx_q->txtimer,
2646                               STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2647                               HRTIMER_MODE_REL);
2648
2649         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2650
2651         /* Combine decisions from TX clean and XSK TX */
2652         return max(count, xmits);
2653 }
2654
2655 /**
2656  * stmmac_tx_err - to manage the tx error
2657  * @priv: driver private structure
2658  * @chan: channel index
2659  * Description: it cleans the descriptors and restarts the transmission
2660  * in case of transmission errors.
2661  */
2662 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2663 {
2664         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2665
2666         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2667
2668         stmmac_stop_tx_dma(priv, chan);
2669         dma_free_tx_skbufs(priv, chan);
2670         stmmac_clear_tx_descriptors(priv, chan);
2671         tx_q->dirty_tx = 0;
2672         tx_q->cur_tx = 0;
2673         tx_q->mss = 0;
2674         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2675         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2676                             tx_q->dma_tx_phy, chan);
2677         stmmac_start_tx_dma(priv, chan);
2678
2679         priv->dev->stats.tx_errors++;
2680         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2681 }
2682
2683 /**
2684  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2685  *  @priv: driver private structure
2686  *  @txmode: TX operating mode
2687  *  @rxmode: RX operating mode
2688  *  @chan: channel index
2689  *  Description: it is used for configuring of the DMA operation mode in
2690  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2691  *  mode.
2692  */
2693 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2694                                           u32 rxmode, u32 chan)
2695 {
2696         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2697         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2698         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2699         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2700         int rxfifosz = priv->plat->rx_fifo_size;
2701         int txfifosz = priv->plat->tx_fifo_size;
2702
2703         if (rxfifosz == 0)
2704                 rxfifosz = priv->dma_cap.rx_fifo_size;
2705         if (txfifosz == 0)
2706                 txfifosz = priv->dma_cap.tx_fifo_size;
2707
2708         /* Adjust for real per queue fifo size */
2709         rxfifosz /= rx_channels_count;
2710         txfifosz /= tx_channels_count;
2711
2712         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2713         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2714 }
2715
2716 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2717 {
2718         int ret;
2719
2720         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2721                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2722         if (ret && (ret != -EINVAL)) {
2723                 stmmac_global_err(priv);
2724                 return true;
2725         }
2726
2727         return false;
2728 }
2729
2730 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2731 {
2732         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2733                                                  &priv->xstats, chan, dir);
2734         struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2735         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2736         struct stmmac_channel *ch = &priv->channel[chan];
2737         struct napi_struct *rx_napi;
2738         struct napi_struct *tx_napi;
2739         unsigned long flags;
2740
2741         rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2742         tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2743
2744         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2745                 if (napi_schedule_prep(rx_napi)) {
2746                         spin_lock_irqsave(&ch->lock, flags);
2747                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2748                         spin_unlock_irqrestore(&ch->lock, flags);
2749                         __napi_schedule(rx_napi);
2750                 }
2751         }
2752
2753         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2754                 if (napi_schedule_prep(tx_napi)) {
2755                         spin_lock_irqsave(&ch->lock, flags);
2756                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2757                         spin_unlock_irqrestore(&ch->lock, flags);
2758                         __napi_schedule(tx_napi);
2759                 }
2760         }
2761
2762         return status;
2763 }
2764
2765 /**
2766  * stmmac_dma_interrupt - DMA ISR
2767  * @priv: driver private structure
2768  * Description: this is the DMA ISR. It is called by the main ISR.
2769  * It calls the dwmac dma routine and schedule poll method in case of some
2770  * work can be done.
2771  */
2772 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2773 {
2774         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2775         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2776         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2777                                 tx_channel_count : rx_channel_count;
2778         u32 chan;
2779         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2780
2781         /* Make sure we never check beyond our status buffer. */
2782         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2783                 channels_to_check = ARRAY_SIZE(status);
2784
2785         for (chan = 0; chan < channels_to_check; chan++)
2786                 status[chan] = stmmac_napi_check(priv, chan,
2787                                                  DMA_DIR_RXTX);
2788
2789         for (chan = 0; chan < tx_channel_count; chan++) {
2790                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2791                         /* Try to bump up the dma threshold on this failure */
2792                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2793                             (tc <= 256)) {
2794                                 tc += 64;
2795                                 if (priv->plat->force_thresh_dma_mode)
2796                                         stmmac_set_dma_operation_mode(priv,
2797                                                                       tc,
2798                                                                       tc,
2799                                                                       chan);
2800                                 else
2801                                         stmmac_set_dma_operation_mode(priv,
2802                                                                     tc,
2803                                                                     SF_DMA_MODE,
2804                                                                     chan);
2805                                 priv->xstats.threshold = tc;
2806                         }
2807                 } else if (unlikely(status[chan] == tx_hard_error)) {
2808                         stmmac_tx_err(priv, chan);
2809                 }
2810         }
2811 }
2812
2813 /**
2814  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2815  * @priv: driver private structure
2816  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2817  */
2818 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2819 {
2820         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2821                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2822
2823         stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2824
2825         if (priv->dma_cap.rmon) {
2826                 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2827                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2828         } else
2829                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2830 }
2831
2832 /**
2833  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2834  * @priv: driver private structure
2835  * Description:
2836  *  new GMAC chip generations have a new register to indicate the
2837  *  presence of the optional feature/functions.
2838  *  This can be also used to override the value passed through the
2839  *  platform and necessary for old MAC10/100 and GMAC chips.
2840  */
2841 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2842 {
2843         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2844 }
2845
2846 /**
2847  * stmmac_check_ether_addr - check if the MAC addr is valid
2848  * @priv: driver private structure
2849  * Description:
2850  * it is to verify if the MAC address is valid, in case of failures it
2851  * generates a random MAC address
2852  */
2853 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2854 {
2855         u8 addr[ETH_ALEN];
2856
2857         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2858                 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2859                 if (is_valid_ether_addr(addr))
2860                         eth_hw_addr_set(priv->dev, addr);
2861                 else
2862                         eth_hw_addr_random(priv->dev);
2863                 dev_info(priv->device, "device MAC address %pM\n",
2864                          priv->dev->dev_addr);
2865         }
2866 }
2867
2868 /**
2869  * stmmac_init_dma_engine - DMA init.
2870  * @priv: driver private structure
2871  * Description:
2872  * It inits the DMA invoking the specific MAC/GMAC callback.
2873  * Some DMA parameters can be passed from the platform;
2874  * in case of these are not passed a default is kept for the MAC or GMAC.
2875  */
2876 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2877 {
2878         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2879         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2880         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2881         struct stmmac_rx_queue *rx_q;
2882         struct stmmac_tx_queue *tx_q;
2883         u32 chan = 0;
2884         int atds = 0;
2885         int ret = 0;
2886
2887         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2888                 dev_err(priv->device, "Invalid DMA configuration\n");
2889                 return -EINVAL;
2890         }
2891
2892         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2893                 atds = 1;
2894
2895         ret = stmmac_reset(priv, priv->ioaddr);
2896         if (ret) {
2897                 dev_err(priv->device, "Failed to reset the dma\n");
2898                 return ret;
2899         }
2900
2901         /* DMA Configuration */
2902         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2903
2904         if (priv->plat->axi)
2905                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2906
2907         /* DMA CSR Channel configuration */
2908         for (chan = 0; chan < dma_csr_ch; chan++)
2909                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2910
2911         /* DMA RX Channel Configuration */
2912         for (chan = 0; chan < rx_channels_count; chan++) {
2913                 rx_q = &priv->rx_queue[chan];
2914
2915                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2916                                     rx_q->dma_rx_phy, chan);
2917
2918                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2919                                      (rx_q->buf_alloc_num *
2920                                       sizeof(struct dma_desc));
2921                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2922                                        rx_q->rx_tail_addr, chan);
2923         }
2924
2925         /* DMA TX Channel Configuration */
2926         for (chan = 0; chan < tx_channels_count; chan++) {
2927                 tx_q = &priv->tx_queue[chan];
2928
2929                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2930                                     tx_q->dma_tx_phy, chan);
2931
2932                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2933                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2934                                        tx_q->tx_tail_addr, chan);
2935         }
2936
2937         return ret;
2938 }
2939
2940 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2941 {
2942         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2943
2944         hrtimer_start(&tx_q->txtimer,
2945                       STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2946                       HRTIMER_MODE_REL);
2947 }
2948
2949 /**
2950  * stmmac_tx_timer - mitigation sw timer for tx.
2951  * @t: data pointer
2952  * Description:
2953  * This is the timer handler to directly invoke the stmmac_tx_clean.
2954  */
2955 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2956 {
2957         struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2958         struct stmmac_priv *priv = tx_q->priv_data;
2959         struct stmmac_channel *ch;
2960         struct napi_struct *napi;
2961
2962         ch = &priv->channel[tx_q->queue_index];
2963         napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2964
2965         if (likely(napi_schedule_prep(napi))) {
2966                 unsigned long flags;
2967
2968                 spin_lock_irqsave(&ch->lock, flags);
2969                 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2970                 spin_unlock_irqrestore(&ch->lock, flags);
2971                 __napi_schedule(napi);
2972         }
2973
2974         return HRTIMER_NORESTART;
2975 }
2976
2977 /**
2978  * stmmac_init_coalesce - init mitigation options.
2979  * @priv: driver private structure
2980  * Description:
2981  * This inits the coalesce parameters: i.e. timer rate,
2982  * timer handler and default threshold used for enabling the
2983  * interrupt on completion bit.
2984  */
2985 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2986 {
2987         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2988         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2989         u32 chan;
2990
2991         for (chan = 0; chan < tx_channel_count; chan++) {
2992                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2993
2994                 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
2995                 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
2996
2997                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2998                 tx_q->txtimer.function = stmmac_tx_timer;
2999         }
3000
3001         for (chan = 0; chan < rx_channel_count; chan++)
3002                 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3003 }
3004
3005 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3006 {
3007         u32 rx_channels_count = priv->plat->rx_queues_to_use;
3008         u32 tx_channels_count = priv->plat->tx_queues_to_use;
3009         u32 chan;
3010
3011         /* set TX ring length */
3012         for (chan = 0; chan < tx_channels_count; chan++)
3013                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3014                                        (priv->dma_tx_size - 1), chan);
3015
3016         /* set RX ring length */
3017         for (chan = 0; chan < rx_channels_count; chan++)
3018                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3019                                        (priv->dma_rx_size - 1), chan);
3020 }
3021
3022 /**
3023  *  stmmac_set_tx_queue_weight - Set TX queue weight
3024  *  @priv: driver private structure
3025  *  Description: It is used for setting TX queues weight
3026  */
3027 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3028 {
3029         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3030         u32 weight;
3031         u32 queue;
3032
3033         for (queue = 0; queue < tx_queues_count; queue++) {
3034                 weight = priv->plat->tx_queues_cfg[queue].weight;
3035                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3036         }
3037 }
3038
3039 /**
3040  *  stmmac_configure_cbs - Configure CBS in TX queue
3041  *  @priv: driver private structure
3042  *  Description: It is used for configuring CBS in AVB TX queues
3043  */
3044 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3045 {
3046         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3047         u32 mode_to_use;
3048         u32 queue;
3049
3050         /* queue 0 is reserved for legacy traffic */
3051         for (queue = 1; queue < tx_queues_count; queue++) {
3052                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3053                 if (mode_to_use == MTL_QUEUE_DCB)
3054                         continue;
3055
3056                 stmmac_config_cbs(priv, priv->hw,
3057                                 priv->plat->tx_queues_cfg[queue].send_slope,
3058                                 priv->plat->tx_queues_cfg[queue].idle_slope,
3059                                 priv->plat->tx_queues_cfg[queue].high_credit,
3060                                 priv->plat->tx_queues_cfg[queue].low_credit,
3061                                 queue);
3062         }
3063 }
3064
3065 /**
3066  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3067  *  @priv: driver private structure
3068  *  Description: It is used for mapping RX queues to RX dma channels
3069  */
3070 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3071 {
3072         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3073         u32 queue;
3074         u32 chan;
3075
3076         for (queue = 0; queue < rx_queues_count; queue++) {
3077                 chan = priv->plat->rx_queues_cfg[queue].chan;
3078                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3079         }
3080 }
3081
3082 /**
3083  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3084  *  @priv: driver private structure
3085  *  Description: It is used for configuring the RX Queue Priority
3086  */
3087 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3088 {
3089         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3090         u32 queue;
3091         u32 prio;
3092
3093         for (queue = 0; queue < rx_queues_count; queue++) {
3094                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3095                         continue;
3096
3097                 prio = priv->plat->rx_queues_cfg[queue].prio;
3098                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3099         }
3100 }
3101
3102 /**
3103  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3104  *  @priv: driver private structure
3105  *  Description: It is used for configuring the TX Queue Priority
3106  */
3107 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3108 {
3109         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3110         u32 queue;
3111         u32 prio;
3112
3113         for (queue = 0; queue < tx_queues_count; queue++) {
3114                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3115                         continue;
3116
3117                 prio = priv->plat->tx_queues_cfg[queue].prio;
3118                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3119         }
3120 }
3121
3122 /**
3123  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3124  *  @priv: driver private structure
3125  *  Description: It is used for configuring the RX queue routing
3126  */
3127 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3128 {
3129         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3130         u32 queue;
3131         u8 packet;
3132
3133         for (queue = 0; queue < rx_queues_count; queue++) {
3134                 /* no specific packet type routing specified for the queue */
3135                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3136                         continue;
3137
3138                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3139                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3140         }
3141 }
3142
3143 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3144 {
3145         if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3146                 priv->rss.enable = false;
3147                 return;
3148         }
3149
3150         if (priv->dev->features & NETIF_F_RXHASH)
3151                 priv->rss.enable = true;
3152         else
3153                 priv->rss.enable = false;
3154
3155         stmmac_rss_configure(priv, priv->hw, &priv->rss,
3156                              priv->plat->rx_queues_to_use);
3157 }
3158
3159 /**
3160  *  stmmac_mtl_configuration - Configure MTL
3161  *  @priv: driver private structure
3162  *  Description: It is used for configurring MTL
3163  */
3164 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3165 {
3166         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3167         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3168
3169         if (tx_queues_count > 1)
3170                 stmmac_set_tx_queue_weight(priv);
3171
3172         /* Configure MTL RX algorithms */
3173         if (rx_queues_count > 1)
3174                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3175                                 priv->plat->rx_sched_algorithm);
3176
3177         /* Configure MTL TX algorithms */
3178         if (tx_queues_count > 1)
3179                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3180                                 priv->plat->tx_sched_algorithm);
3181
3182         /* Configure CBS in AVB TX queues */
3183         if (tx_queues_count > 1)
3184                 stmmac_configure_cbs(priv);
3185
3186         /* Map RX MTL to DMA channels */
3187         stmmac_rx_queue_dma_chan_map(priv);
3188
3189         /* Enable MAC RX Queues */
3190         stmmac_mac_enable_rx_queues(priv);
3191
3192         /* Set RX priorities */
3193         if (rx_queues_count > 1)
3194                 stmmac_mac_config_rx_queues_prio(priv);
3195
3196         /* Set TX priorities */
3197         if (tx_queues_count > 1)
3198                 stmmac_mac_config_tx_queues_prio(priv);
3199
3200         /* Set RX routing */
3201         if (rx_queues_count > 1)
3202                 stmmac_mac_config_rx_queues_routing(priv);
3203
3204         /* Receive Side Scaling */
3205         if (rx_queues_count > 1)
3206                 stmmac_mac_config_rss(priv);
3207 }
3208
3209 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3210 {
3211         if (priv->dma_cap.asp) {
3212                 netdev_info(priv->dev, "Enabling Safety Features\n");
3213                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3214                                           priv->plat->safety_feat_cfg);
3215         } else {
3216                 netdev_info(priv->dev, "No Safety Features support found\n");
3217         }
3218 }
3219
3220 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3221 {
3222         char *name;
3223
3224         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3225         clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3226
3227         name = priv->wq_name;
3228         sprintf(name, "%s-fpe", priv->dev->name);
3229
3230         priv->fpe_wq = create_singlethread_workqueue(name);
3231         if (!priv->fpe_wq) {
3232                 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3233
3234                 return -ENOMEM;
3235         }
3236         netdev_info(priv->dev, "FPE workqueue start");
3237
3238         return 0;
3239 }
3240
3241 /**
3242  * stmmac_hw_setup - setup mac in a usable state.
3243  *  @dev : pointer to the device structure.
3244  *  @init_ptp: initialize PTP if set
3245  *  Description:
3246  *  this is the main function to setup the HW in a usable state because the
3247  *  dma engine is reset, the core registers are configured (e.g. AXI,
3248  *  Checksum features, timers). The DMA is ready to start receiving and
3249  *  transmitting.
3250  *  Return value:
3251  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3252  *  file on failure.
3253  */
3254 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
3255 {
3256         struct stmmac_priv *priv = netdev_priv(dev);
3257         u32 rx_cnt = priv->plat->rx_queues_to_use;
3258         u32 tx_cnt = priv->plat->tx_queues_to_use;
3259         bool sph_en;
3260         u32 chan;
3261         int ret;
3262
3263         /* DMA initialization and SW reset */
3264         ret = stmmac_init_dma_engine(priv);
3265         if (ret < 0) {
3266                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3267                            __func__);
3268                 return ret;
3269         }
3270
3271         /* Copy the MAC addr into the HW  */
3272         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3273
3274         /* PS and related bits will be programmed according to the speed */
3275         if (priv->hw->pcs) {
3276                 int speed = priv->plat->mac_port_sel_speed;
3277
3278                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3279                     (speed == SPEED_1000)) {
3280                         priv->hw->ps = speed;
3281                 } else {
3282                         dev_warn(priv->device, "invalid port speed\n");
3283                         priv->hw->ps = 0;
3284                 }
3285         }
3286
3287         /* Initialize the MAC Core */
3288         stmmac_core_init(priv, priv->hw, dev);
3289
3290         /* Initialize MTL*/
3291         stmmac_mtl_configuration(priv);
3292
3293         /* Initialize Safety Features */
3294         stmmac_safety_feat_configuration(priv);
3295
3296         ret = stmmac_rx_ipc(priv, priv->hw);
3297         if (!ret) {
3298                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3299                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3300                 priv->hw->rx_csum = 0;
3301         }
3302
3303         /* Enable the MAC Rx/Tx */
3304         stmmac_mac_set(priv, priv->ioaddr, true);
3305
3306         /* Set the HW DMA mode and the COE */
3307         stmmac_dma_operation_mode(priv);
3308
3309         stmmac_mmc_setup(priv);
3310
3311         if (init_ptp) {
3312                 ret = stmmac_init_ptp(priv);
3313                 if (ret == -EOPNOTSUPP)
3314                         netdev_warn(priv->dev, "PTP not supported by HW\n");
3315                 else if (ret)
3316                         netdev_warn(priv->dev, "PTP init failed\n");
3317         }
3318
3319         priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3320
3321         /* Convert the timer from msec to usec */
3322         if (!priv->tx_lpi_timer)
3323                 priv->tx_lpi_timer = eee_timer * 1000;
3324
3325         if (priv->use_riwt) {
3326                 u32 queue;
3327
3328                 for (queue = 0; queue < rx_cnt; queue++) {
3329                         if (!priv->rx_riwt[queue])
3330                                 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3331
3332                         stmmac_rx_watchdog(priv, priv->ioaddr,
3333                                            priv->rx_riwt[queue], queue);
3334                 }
3335         }
3336
3337         if (priv->hw->pcs)
3338                 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3339
3340         /* set TX and RX rings length */
3341         stmmac_set_rings_length(priv);
3342
3343         /* Enable TSO */
3344         if (priv->tso) {
3345                 for (chan = 0; chan < tx_cnt; chan++) {
3346                         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3347
3348                         /* TSO and TBS cannot co-exist */
3349                         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3350                                 continue;
3351
3352                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3353                 }
3354         }
3355
3356         /* Enable Split Header */
3357         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3358         for (chan = 0; chan < rx_cnt; chan++)
3359                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3360
3361
3362         /* VLAN Tag Insertion */
3363         if (priv->dma_cap.vlins)
3364                 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3365
3366         /* TBS */
3367         for (chan = 0; chan < tx_cnt; chan++) {
3368                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3369                 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3370
3371                 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3372         }
3373
3374         /* Configure real RX and TX queues */
3375         netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3376         netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3377
3378         /* Start the ball rolling... */
3379         stmmac_start_all_dma(priv);
3380
3381         if (priv->dma_cap.fpesel) {
3382                 stmmac_fpe_start_wq(priv);
3383
3384                 if (priv->plat->fpe_cfg->enable)
3385                         stmmac_fpe_handshake(priv, true);
3386         }
3387
3388         return 0;
3389 }
3390
3391 static void stmmac_hw_teardown(struct net_device *dev)
3392 {
3393         struct stmmac_priv *priv = netdev_priv(dev);
3394
3395         clk_disable_unprepare(priv->plat->clk_ptp_ref);
3396 }
3397
3398 static void stmmac_free_irq(struct net_device *dev,
3399                             enum request_irq_err irq_err, int irq_idx)
3400 {
3401         struct stmmac_priv *priv = netdev_priv(dev);
3402         int j;
3403
3404         switch (irq_err) {
3405         case REQ_IRQ_ERR_ALL:
3406                 irq_idx = priv->plat->tx_queues_to_use;
3407                 fallthrough;
3408         case REQ_IRQ_ERR_TX:
3409                 for (j = irq_idx - 1; j >= 0; j--) {
3410                         if (priv->tx_irq[j] > 0) {
3411                                 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3412                                 free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
3413                         }
3414                 }
3415                 irq_idx = priv->plat->rx_queues_to_use;
3416                 fallthrough;
3417         case REQ_IRQ_ERR_RX:
3418                 for (j = irq_idx - 1; j >= 0; j--) {
3419                         if (priv->rx_irq[j] > 0) {
3420                                 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3421                                 free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
3422                         }
3423                 }
3424
3425                 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3426                         free_irq(priv->sfty_ue_irq, dev);
3427                 fallthrough;
3428         case REQ_IRQ_ERR_SFTY_UE:
3429                 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3430                         free_irq(priv->sfty_ce_irq, dev);
3431                 fallthrough;
3432         case REQ_IRQ_ERR_SFTY_CE:
3433                 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3434                         free_irq(priv->lpi_irq, dev);
3435                 fallthrough;
3436         case REQ_IRQ_ERR_LPI:
3437                 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3438                         free_irq(priv->wol_irq, dev);
3439                 fallthrough;
3440         case REQ_IRQ_ERR_WOL:
3441                 free_irq(dev->irq, dev);
3442                 fallthrough;
3443         case REQ_IRQ_ERR_MAC:
3444         case REQ_IRQ_ERR_NO:
3445                 /* If MAC IRQ request error, no more IRQ to free */
3446                 break;
3447         }
3448 }
3449
3450 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3451 {
3452         struct stmmac_priv *priv = netdev_priv(dev);
3453         enum request_irq_err irq_err;
3454         cpumask_t cpu_mask;
3455         int irq_idx = 0;
3456         char *int_name;
3457         int ret;
3458         int i;
3459
3460         /* For common interrupt */
3461         int_name = priv->int_name_mac;
3462         sprintf(int_name, "%s:%s", dev->name, "mac");
3463         ret = request_irq(dev->irq, stmmac_mac_interrupt,
3464                           0, int_name, dev);
3465         if (unlikely(ret < 0)) {
3466                 netdev_err(priv->dev,
3467                            "%s: alloc mac MSI %d (error: %d)\n",
3468                            __func__, dev->irq, ret);
3469                 irq_err = REQ_IRQ_ERR_MAC;
3470                 goto irq_error;
3471         }
3472
3473         /* Request the Wake IRQ in case of another line
3474          * is used for WoL
3475          */
3476         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3477                 int_name = priv->int_name_wol;
3478                 sprintf(int_name, "%s:%s", dev->name, "wol");
3479                 ret = request_irq(priv->wol_irq,
3480                                   stmmac_mac_interrupt,
3481                                   0, int_name, dev);
3482                 if (unlikely(ret < 0)) {
3483                         netdev_err(priv->dev,
3484                                    "%s: alloc wol MSI %d (error: %d)\n",
3485                                    __func__, priv->wol_irq, ret);
3486                         irq_err = REQ_IRQ_ERR_WOL;
3487                         goto irq_error;
3488                 }
3489         }
3490
3491         /* Request the LPI IRQ in case of another line
3492          * is used for LPI
3493          */
3494         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3495                 int_name = priv->int_name_lpi;
3496                 sprintf(int_name, "%s:%s", dev->name, "lpi");
3497                 ret = request_irq(priv->lpi_irq,
3498                                   stmmac_mac_interrupt,
3499                                   0, int_name, dev);
3500                 if (unlikely(ret < 0)) {
3501                         netdev_err(priv->dev,
3502                                    "%s: alloc lpi MSI %d (error: %d)\n",
3503                                    __func__, priv->lpi_irq, ret);
3504                         irq_err = REQ_IRQ_ERR_LPI;
3505                         goto irq_error;
3506                 }
3507         }
3508
3509         /* Request the Safety Feature Correctible Error line in
3510          * case of another line is used
3511          */
3512         if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3513                 int_name = priv->int_name_sfty_ce;
3514                 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3515                 ret = request_irq(priv->sfty_ce_irq,
3516                                   stmmac_safety_interrupt,
3517                                   0, int_name, dev);
3518                 if (unlikely(ret < 0)) {
3519                         netdev_err(priv->dev,
3520                                    "%s: alloc sfty ce MSI %d (error: %d)\n",
3521                                    __func__, priv->sfty_ce_irq, ret);
3522                         irq_err = REQ_IRQ_ERR_SFTY_CE;
3523                         goto irq_error;
3524                 }
3525         }
3526
3527         /* Request the Safety Feature Uncorrectible Error line in
3528          * case of another line is used
3529          */
3530         if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3531                 int_name = priv->int_name_sfty_ue;
3532                 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3533                 ret = request_irq(priv->sfty_ue_irq,
3534                                   stmmac_safety_interrupt,
3535                                   0, int_name, dev);
3536                 if (unlikely(ret < 0)) {
3537                         netdev_err(priv->dev,
3538                                    "%s: alloc sfty ue MSI %d (error: %d)\n",
3539                                    __func__, priv->sfty_ue_irq, ret);
3540                         irq_err = REQ_IRQ_ERR_SFTY_UE;
3541                         goto irq_error;
3542                 }
3543         }
3544
3545         /* Request Rx MSI irq */
3546         for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3547                 if (i >= MTL_MAX_RX_QUEUES)
3548                         break;
3549                 if (priv->rx_irq[i] == 0)
3550                         continue;
3551
3552                 int_name = priv->int_name_rx_irq[i];
3553                 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3554                 ret = request_irq(priv->rx_irq[i],
3555                                   stmmac_msi_intr_rx,
3556                                   0, int_name, &priv->rx_queue[i]);
3557                 if (unlikely(ret < 0)) {
3558                         netdev_err(priv->dev,
3559                                    "%s: alloc rx-%d  MSI %d (error: %d)\n",
3560                                    __func__, i, priv->rx_irq[i], ret);
3561                         irq_err = REQ_IRQ_ERR_RX;
3562                         irq_idx = i;
3563                         goto irq_error;
3564                 }
3565                 cpumask_clear(&cpu_mask);
3566                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3567                 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3568         }
3569
3570         /* Request Tx MSI irq */
3571         for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3572                 if (i >= MTL_MAX_TX_QUEUES)
3573                         break;
3574                 if (priv->tx_irq[i] == 0)
3575                         continue;
3576
3577                 int_name = priv->int_name_tx_irq[i];
3578                 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3579                 ret = request_irq(priv->tx_irq[i],
3580                                   stmmac_msi_intr_tx,
3581                                   0, int_name, &priv->tx_queue[i]);
3582                 if (unlikely(ret < 0)) {
3583                         netdev_err(priv->dev,
3584                                    "%s: alloc tx-%d  MSI %d (error: %d)\n",
3585                                    __func__, i, priv->tx_irq[i], ret);
3586                         irq_err = REQ_IRQ_ERR_TX;
3587                         irq_idx = i;
3588                         goto irq_error;
3589                 }
3590                 cpumask_clear(&cpu_mask);
3591                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3592                 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3593         }
3594
3595         return 0;
3596
3597 irq_error:
3598         stmmac_free_irq(dev, irq_err, irq_idx);
3599         return ret;
3600 }
3601
3602 static int stmmac_request_irq_single(struct net_device *dev)
3603 {
3604         struct stmmac_priv *priv = netdev_priv(dev);
3605         enum request_irq_err irq_err;
3606         int ret;
3607
3608         ret = request_irq(dev->irq, stmmac_interrupt,
3609                           IRQF_SHARED, dev->name, dev);
3610         if (unlikely(ret < 0)) {
3611                 netdev_err(priv->dev,
3612                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3613                            __func__, dev->irq, ret);
3614                 irq_err = REQ_IRQ_ERR_MAC;
3615                 goto irq_error;
3616         }
3617
3618         /* Request the Wake IRQ in case of another line
3619          * is used for WoL
3620          */
3621         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3622                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3623                                   IRQF_SHARED, dev->name, dev);
3624                 if (unlikely(ret < 0)) {
3625                         netdev_err(priv->dev,
3626                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3627                                    __func__, priv->wol_irq, ret);
3628                         irq_err = REQ_IRQ_ERR_WOL;
3629                         goto irq_error;
3630                 }
3631         }
3632
3633         /* Request the IRQ lines */
3634         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3635                 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3636                                   IRQF_SHARED, dev->name, dev);
3637                 if (unlikely(ret < 0)) {
3638                         netdev_err(priv->dev,
3639                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3640                                    __func__, priv->lpi_irq, ret);
3641                         irq_err = REQ_IRQ_ERR_LPI;
3642                         goto irq_error;
3643                 }
3644         }
3645
3646         return 0;
3647
3648 irq_error:
3649         stmmac_free_irq(dev, irq_err, 0);
3650         return ret;
3651 }
3652
3653 static int stmmac_request_irq(struct net_device *dev)
3654 {
3655         struct stmmac_priv *priv = netdev_priv(dev);
3656         int ret;
3657
3658         /* Request the IRQ lines */
3659         if (priv->plat->multi_msi_en)
3660                 ret = stmmac_request_irq_multi_msi(dev);
3661         else
3662                 ret = stmmac_request_irq_single(dev);
3663
3664         return ret;
3665 }
3666
3667 /**
3668  *  stmmac_open - open entry point of the driver
3669  *  @dev : pointer to the device structure.
3670  *  Description:
3671  *  This function is the open entry point of the driver.
3672  *  Return value:
3673  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3674  *  file on failure.
3675  */
3676 int stmmac_open(struct net_device *dev)
3677 {
3678         struct stmmac_priv *priv = netdev_priv(dev);
3679         int mode = priv->plat->phy_interface;
3680         int bfsize = 0;
3681         u32 chan;
3682         int ret;
3683
3684         ret = pm_runtime_get_sync(priv->device);
3685         if (ret < 0) {
3686                 pm_runtime_put_noidle(priv->device);
3687                 return ret;
3688         }
3689
3690         if (priv->hw->pcs != STMMAC_PCS_TBI &&
3691             priv->hw->pcs != STMMAC_PCS_RTBI &&
3692             (!priv->hw->xpcs ||
3693              xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3694                 ret = stmmac_init_phy(dev);
3695                 if (ret) {
3696                         netdev_err(priv->dev,
3697                                    "%s: Cannot attach to PHY (error: %d)\n",
3698                                    __func__, ret);
3699                         goto init_phy_error;
3700                 }
3701         }
3702
3703         /* Extra statistics */
3704         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3705         priv->xstats.threshold = tc;
3706
3707         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
3708         if (bfsize < 0)
3709                 bfsize = 0;
3710
3711         if (bfsize < BUF_SIZE_16KiB)
3712                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
3713
3714         priv->dma_buf_sz = bfsize;
3715         buf_sz = bfsize;
3716
3717         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3718
3719         if (!priv->dma_tx_size)
3720                 priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3721         if (!priv->dma_rx_size)
3722                 priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3723
3724         /* Earlier check for TBS */
3725         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3726                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3727                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3728
3729                 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3730                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3731         }
3732
3733         ret = alloc_dma_desc_resources(priv);
3734         if (ret < 0) {
3735                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3736                            __func__);
3737                 goto dma_desc_error;
3738         }
3739
3740         ret = init_dma_desc_rings(dev, GFP_KERNEL);
3741         if (ret < 0) {
3742                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3743                            __func__);
3744                 goto init_error;
3745         }
3746
3747         ret = stmmac_hw_setup(dev, true);
3748         if (ret < 0) {
3749                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3750                 goto init_error;
3751         }
3752
3753         stmmac_init_coalesce(priv);
3754
3755         phylink_start(priv->phylink);
3756         /* We may have called phylink_speed_down before */
3757         phylink_speed_up(priv->phylink);
3758
3759         ret = stmmac_request_irq(dev);
3760         if (ret)
3761                 goto irq_error;
3762
3763         stmmac_enable_all_queues(priv);
3764         netif_tx_start_all_queues(priv->dev);
3765
3766         return 0;
3767
3768 irq_error:
3769         phylink_stop(priv->phylink);
3770
3771         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3772                 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3773
3774         stmmac_hw_teardown(dev);
3775 init_error:
3776         free_dma_desc_resources(priv);
3777 dma_desc_error:
3778         phylink_disconnect_phy(priv->phylink);
3779 init_phy_error:
3780         pm_runtime_put(priv->device);
3781         return ret;
3782 }
3783
3784 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3785 {
3786         set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3787
3788         if (priv->fpe_wq)
3789                 destroy_workqueue(priv->fpe_wq);
3790
3791         netdev_info(priv->dev, "FPE workqueue stop");
3792 }
3793
3794 /**
3795  *  stmmac_release - close entry point of the driver
3796  *  @dev : device pointer.
3797  *  Description:
3798  *  This is the stop entry point of the driver.
3799  */
3800 int stmmac_release(struct net_device *dev)
3801 {
3802         struct stmmac_priv *priv = netdev_priv(dev);
3803         u32 chan;
3804
3805         netif_tx_disable(dev);
3806
3807         if (device_may_wakeup(priv->device))
3808                 phylink_speed_down(priv->phylink, false);
3809         /* Stop and disconnect the PHY */
3810         phylink_stop(priv->phylink);
3811         phylink_disconnect_phy(priv->phylink);
3812
3813         stmmac_disable_all_queues(priv);
3814
3815         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3816                 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3817
3818         /* Free the IRQ lines */
3819         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3820
3821         if (priv->eee_enabled) {
3822                 priv->tx_path_in_lpi_mode = false;
3823                 del_timer_sync(&priv->eee_ctrl_timer);
3824         }
3825
3826         /* Stop TX/RX DMA and clear the descriptors */
3827         stmmac_stop_all_dma(priv);
3828
3829         /* Release and free the Rx/Tx resources */
3830         free_dma_desc_resources(priv);
3831
3832         /* Disable the MAC Rx/Tx */
3833         stmmac_mac_set(priv, priv->ioaddr, false);
3834
3835         netif_carrier_off(dev);
3836
3837         stmmac_release_ptp(priv);
3838
3839         pm_runtime_put(priv->device);
3840
3841         if (priv->dma_cap.fpesel)
3842                 stmmac_fpe_stop_wq(priv);
3843
3844         return 0;
3845 }
3846
3847 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3848                                struct stmmac_tx_queue *tx_q)
3849 {
3850         u16 tag = 0x0, inner_tag = 0x0;
3851         u32 inner_type = 0x0;
3852         struct dma_desc *p;
3853
3854         if (!priv->dma_cap.vlins)
3855                 return false;
3856         if (!skb_vlan_tag_present(skb))
3857                 return false;
3858         if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3859                 inner_tag = skb_vlan_tag_get(skb);
3860                 inner_type = STMMAC_VLAN_INSERT;
3861         }
3862
3863         tag = skb_vlan_tag_get(skb);
3864
3865         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3866                 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3867         else
3868                 p = &tx_q->dma_tx[tx_q->cur_tx];
3869
3870         if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3871                 return false;
3872
3873         stmmac_set_tx_owner(priv, p);
3874         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3875         return true;
3876 }
3877
3878 /**
3879  *  stmmac_tso_allocator - close entry point of the driver
3880  *  @priv: driver private structure
3881  *  @des: buffer start address
3882  *  @total_len: total length to fill in descriptors
3883  *  @last_segment: condition for the last descriptor
3884  *  @queue: TX queue index
3885  *  Description:
3886  *  This function fills descriptor and request new descriptors according to
3887  *  buffer length to fill
3888  */
3889 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3890                                  int total_len, bool last_segment, u32 queue)
3891 {
3892         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3893         struct dma_desc *desc;
3894         u32 buff_size;
3895         int tmp_len;
3896
3897         tmp_len = total_len;
3898
3899         while (tmp_len > 0) {
3900                 dma_addr_t curr_addr;
3901
3902                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3903                                                 priv->dma_tx_size);
3904                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3905
3906                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3907                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3908                 else
3909                         desc = &tx_q->dma_tx[tx_q->cur_tx];
3910
3911                 curr_addr = des + (total_len - tmp_len);
3912                 if (priv->dma_cap.addr64 <= 32)
3913                         desc->des0 = cpu_to_le32(curr_addr);
3914                 else
3915                         stmmac_set_desc_addr(priv, desc, curr_addr);
3916
3917                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3918                             TSO_MAX_BUFF_SIZE : tmp_len;
3919
3920                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3921                                 0, 1,
3922                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3923                                 0, 0);
3924
3925                 tmp_len -= TSO_MAX_BUFF_SIZE;
3926         }
3927 }
3928
3929 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3930 {
3931         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3932         int desc_size;
3933
3934         if (likely(priv->extend_desc))
3935                 desc_size = sizeof(struct dma_extended_desc);
3936         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3937                 desc_size = sizeof(struct dma_edesc);
3938         else
3939                 desc_size = sizeof(struct dma_desc);
3940
3941         /* The own bit must be the latest setting done when prepare the
3942          * descriptor and then barrier is needed to make sure that
3943          * all is coherent before granting the DMA engine.
3944          */
3945         wmb();
3946
3947         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3948         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3949 }
3950
3951 /**
3952  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3953  *  @skb : the socket buffer
3954  *  @dev : device pointer
3955  *  Description: this is the transmit function that is called on TSO frames
3956  *  (support available on GMAC4 and newer chips).
3957  *  Diagram below show the ring programming in case of TSO frames:
3958  *
3959  *  First Descriptor
3960  *   --------
3961  *   | DES0 |---> buffer1 = L2/L3/L4 header
3962  *   | DES1 |---> TCP Payload (can continue on next descr...)
3963  *   | DES2 |---> buffer 1 and 2 len
3964  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3965  *   --------
3966  *      |
3967  *     ...
3968  *      |
3969  *   --------
3970  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3971  *   | DES1 | --|
3972  *   | DES2 | --> buffer 1 and 2 len
3973  *   | DES3 |
3974  *   --------
3975  *
3976  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3977  */
3978 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3979 {
3980         struct dma_desc *desc, *first, *mss_desc = NULL;
3981         struct stmmac_priv *priv = netdev_priv(dev);
3982         int nfrags = skb_shinfo(skb)->nr_frags;
3983         u32 queue = skb_get_queue_mapping(skb);
3984         unsigned int first_entry, tx_packets;
3985         int tmp_pay_len = 0, first_tx;
3986         struct stmmac_tx_queue *tx_q;
3987         bool has_vlan, set_ic;
3988         u8 proto_hdr_len, hdr;
3989         u32 pay_len, mss;
3990         dma_addr_t des;
3991         int i;
3992
3993         tx_q = &priv->tx_queue[queue];
3994         first_tx = tx_q->cur_tx;
3995
3996         /* Compute header lengths */
3997         if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3998                 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3999                 hdr = sizeof(struct udphdr);
4000         } else {
4001                 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4002                 hdr = tcp_hdrlen(skb);
4003         }
4004
4005         /* Desc availability based on threshold should be enough safe */
4006         if (unlikely(stmmac_tx_avail(priv, queue) <
4007                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4008                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4009                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4010                                                                 queue));
4011                         /* This is a hard error, log it. */
4012                         netdev_err(priv->dev,
4013                                    "%s: Tx Ring full when queue awake\n",
4014                                    __func__);
4015                 }
4016                 return NETDEV_TX_BUSY;
4017         }
4018
4019         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4020
4021         mss = skb_shinfo(skb)->gso_size;
4022
4023         /* set new MSS value if needed */
4024         if (mss != tx_q->mss) {
4025                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4026                         mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4027                 else
4028                         mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4029
4030                 stmmac_set_mss(priv, mss_desc, mss);
4031                 tx_q->mss = mss;
4032                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4033                                                 priv->dma_tx_size);
4034                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4035         }
4036
4037         if (netif_msg_tx_queued(priv)) {
4038                 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4039                         __func__, hdr, proto_hdr_len, pay_len, mss);
4040                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4041                         skb->data_len);
4042         }
4043
4044         /* Check if VLAN can be inserted by HW */
4045         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4046
4047         first_entry = tx_q->cur_tx;
4048         WARN_ON(tx_q->tx_skbuff[first_entry]);
4049
4050         if (tx_q->tbs & STMMAC_TBS_AVAIL)
4051                 desc = &tx_q->dma_entx[first_entry].basic;
4052         else
4053                 desc = &tx_q->dma_tx[first_entry];
4054         first = desc;
4055
4056         if (has_vlan)
4057                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4058
4059         /* first descriptor: fill Headers on Buf1 */
4060         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4061                              DMA_TO_DEVICE);
4062         if (dma_mapping_error(priv->device, des))
4063                 goto dma_map_err;
4064
4065         tx_q->tx_skbuff_dma[first_entry].buf = des;
4066         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4067         tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4068         tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4069
4070         if (priv->dma_cap.addr64 <= 32) {
4071                 first->des0 = cpu_to_le32(des);
4072
4073                 /* Fill start of payload in buff2 of first descriptor */
4074                 if (pay_len)
4075                         first->des1 = cpu_to_le32(des + proto_hdr_len);
4076
4077                 /* If needed take extra descriptors to fill the remaining payload */
4078                 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4079         } else {
4080                 stmmac_set_desc_addr(priv, first, des);
4081                 tmp_pay_len = pay_len;
4082                 des += proto_hdr_len;
4083                 pay_len = 0;
4084         }
4085
4086         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4087
4088         /* Prepare fragments */
4089         for (i = 0; i < nfrags; i++) {
4090                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4091
4092                 des = skb_frag_dma_map(priv->device, frag, 0,
4093                                        skb_frag_size(frag),
4094                                        DMA_TO_DEVICE);
4095                 if (dma_mapping_error(priv->device, des))
4096                         goto dma_map_err;
4097
4098                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4099                                      (i == nfrags - 1), queue);
4100
4101                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4102                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4103                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4104                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4105         }
4106
4107         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4108
4109         /* Only the last descriptor gets to point to the skb. */
4110         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4111         tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4112
4113         /* Manage tx mitigation */
4114         tx_packets = (tx_q->cur_tx + 1) - first_tx;
4115         tx_q->tx_count_frames += tx_packets;
4116
4117         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4118                 set_ic = true;
4119         else if (!priv->tx_coal_frames[queue])
4120                 set_ic = false;
4121         else if (tx_packets > priv->tx_coal_frames[queue])
4122                 set_ic = true;
4123         else if ((tx_q->tx_count_frames %
4124                   priv->tx_coal_frames[queue]) < tx_packets)
4125                 set_ic = true;
4126         else
4127                 set_ic = false;
4128
4129         if (set_ic) {
4130                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4131                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4132                 else
4133                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4134
4135                 tx_q->tx_count_frames = 0;
4136                 stmmac_set_tx_ic(priv, desc);
4137                 priv->xstats.tx_set_ic_bit++;
4138         }
4139
4140         /* We've used all descriptors we need for this skb, however,
4141          * advance cur_tx so that it references a fresh descriptor.
4142          * ndo_start_xmit will fill this descriptor the next time it's
4143          * called and stmmac_tx_clean may clean up to this descriptor.
4144          */
4145         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
4146
4147         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4148                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4149                           __func__);
4150                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4151         }
4152
4153         dev->stats.tx_bytes += skb->len;
4154         priv->xstats.tx_tso_frames++;
4155         priv->xstats.tx_tso_nfrags += nfrags;
4156
4157         if (priv->sarc_type)
4158                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4159
4160         skb_tx_timestamp(skb);
4161
4162         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4163                      priv->hwts_tx_en)) {
4164                 /* declare that device is doing timestamping */
4165                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4166                 stmmac_enable_tx_timestamp(priv, first);
4167         }
4168
4169         /* Complete the first descriptor before granting the DMA */
4170         stmmac_prepare_tso_tx_desc(priv, first, 1,
4171                         proto_hdr_len,
4172                         pay_len,
4173                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4174                         hdr / 4, (skb->len - proto_hdr_len));
4175
4176         /* If context desc is used to change MSS */
4177         if (mss_desc) {
4178                 /* Make sure that first descriptor has been completely
4179                  * written, including its own bit. This is because MSS is
4180                  * actually before first descriptor, so we need to make
4181                  * sure that MSS's own bit is the last thing written.
4182                  */
4183                 dma_wmb();
4184                 stmmac_set_tx_owner(priv, mss_desc);
4185         }
4186
4187         if (netif_msg_pktdata(priv)) {
4188                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4189                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4190                         tx_q->cur_tx, first, nfrags);
4191                 pr_info(">>> frame to be transmitted: ");
4192                 print_pkt(skb->data, skb_headlen(skb));
4193         }
4194
4195         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4196
4197         stmmac_flush_tx_descriptors(priv, queue);
4198         stmmac_tx_timer_arm(priv, queue);
4199
4200         return NETDEV_TX_OK;
4201
4202 dma_map_err:
4203         dev_err(priv->device, "Tx dma map failed\n");
4204         dev_kfree_skb(skb);
4205         priv->dev->stats.tx_dropped++;
4206         return NETDEV_TX_OK;
4207 }
4208
4209 /**
4210  *  stmmac_xmit - Tx entry point of the driver
4211  *  @skb : the socket buffer
4212  *  @dev : device pointer
4213  *  Description : this is the tx entry point of the driver.
4214  *  It programs the chain or the ring and supports oversized frames
4215  *  and SG feature.
4216  */
4217 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4218 {
4219         unsigned int first_entry, tx_packets, enh_desc;
4220         struct stmmac_priv *priv = netdev_priv(dev);
4221         unsigned int nopaged_len = skb_headlen(skb);
4222         int i, csum_insertion = 0, is_jumbo = 0;
4223         u32 queue = skb_get_queue_mapping(skb);
4224         int nfrags = skb_shinfo(skb)->nr_frags;
4225         int gso = skb_shinfo(skb)->gso_type;
4226         struct dma_edesc *tbs_desc = NULL;
4227         struct dma_desc *desc, *first;
4228         struct stmmac_tx_queue *tx_q;
4229         bool has_vlan, set_ic;
4230         int entry, first_tx;
4231         dma_addr_t des;
4232
4233         tx_q = &priv->tx_queue[queue];
4234         first_tx = tx_q->cur_tx;
4235
4236         if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4237                 stmmac_disable_eee_mode(priv);
4238
4239         /* Manage oversized TCP frames for GMAC4 device */
4240         if (skb_is_gso(skb) && priv->tso) {
4241                 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4242                         return stmmac_tso_xmit(skb, dev);
4243                 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4244                         return stmmac_tso_xmit(skb, dev);
4245         }
4246
4247         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4248                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4249                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4250                                                                 queue));
4251                         /* This is a hard error, log it. */
4252                         netdev_err(priv->dev,
4253                                    "%s: Tx Ring full when queue awake\n",
4254                                    __func__);
4255                 }
4256                 return NETDEV_TX_BUSY;
4257         }
4258
4259         /* Check if VLAN can be inserted by HW */
4260         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4261
4262         entry = tx_q->cur_tx;
4263         first_entry = entry;
4264         WARN_ON(tx_q->tx_skbuff[first_entry]);
4265
4266         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4267
4268         if (likely(priv->extend_desc))
4269                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4270         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4271                 desc = &tx_q->dma_entx[entry].basic;
4272         else
4273                 desc = tx_q->dma_tx + entry;
4274
4275         first = desc;
4276
4277         if (has_vlan)
4278                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4279
4280         enh_desc = priv->plat->enh_desc;
4281         /* To program the descriptors according to the size of the frame */
4282         if (enh_desc)
4283                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4284
4285         if (unlikely(is_jumbo)) {
4286                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4287                 if (unlikely(entry < 0) && (entry != -EINVAL))
4288                         goto dma_map_err;
4289         }
4290
4291         for (i = 0; i < nfrags; i++) {
4292                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4293                 int len = skb_frag_size(frag);
4294                 bool last_segment = (i == (nfrags - 1));
4295
4296                 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4297                 WARN_ON(tx_q->tx_skbuff[entry]);
4298
4299                 if (likely(priv->extend_desc))
4300                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4301                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4302                         desc = &tx_q->dma_entx[entry].basic;
4303                 else
4304                         desc = tx_q->dma_tx + entry;
4305
4306                 des = skb_frag_dma_map(priv->device, frag, 0, len,
4307                                        DMA_TO_DEVICE);
4308                 if (dma_mapping_error(priv->device, des))
4309                         goto dma_map_err; /* should reuse desc w/o issues */
4310
4311                 tx_q->tx_skbuff_dma[entry].buf = des;
4312
4313                 stmmac_set_desc_addr(priv, desc, des);
4314
4315                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4316                 tx_q->tx_skbuff_dma[entry].len = len;
4317                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4318                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4319
4320                 /* Prepare the descriptor and set the own bit too */
4321                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4322                                 priv->mode, 1, last_segment, skb->len);
4323         }
4324
4325         /* Only the last descriptor gets to point to the skb. */
4326         tx_q->tx_skbuff[entry] = skb;
4327         tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4328
4329         /* According to the coalesce parameter the IC bit for the latest
4330          * segment is reset and the timer re-started to clean the tx status.
4331          * This approach takes care about the fragments: desc is the first
4332          * element in case of no SG.
4333          */
4334         tx_packets = (entry + 1) - first_tx;
4335         tx_q->tx_count_frames += tx_packets;
4336
4337         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4338                 set_ic = true;
4339         else if (!priv->tx_coal_frames[queue])
4340                 set_ic = false;
4341         else if (tx_packets > priv->tx_coal_frames[queue])
4342                 set_ic = true;
4343         else if ((tx_q->tx_count_frames %
4344                   priv->tx_coal_frames[queue]) < tx_packets)
4345                 set_ic = true;
4346         else
4347                 set_ic = false;
4348
4349         if (set_ic) {
4350                 if (likely(priv->extend_desc))
4351                         desc = &tx_q->dma_etx[entry].basic;
4352                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4353                         desc = &tx_q->dma_entx[entry].basic;
4354                 else
4355                         desc = &tx_q->dma_tx[entry];
4356
4357                 tx_q->tx_count_frames = 0;
4358                 stmmac_set_tx_ic(priv, desc);
4359                 priv->xstats.tx_set_ic_bit++;
4360         }
4361
4362         /* We've used all descriptors we need for this skb, however,
4363          * advance cur_tx so that it references a fresh descriptor.
4364          * ndo_start_xmit will fill this descriptor the next time it's
4365          * called and stmmac_tx_clean may clean up to this descriptor.
4366          */
4367         entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4368         tx_q->cur_tx = entry;
4369
4370         if (netif_msg_pktdata(priv)) {
4371                 netdev_dbg(priv->dev,
4372                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4373                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4374                            entry, first, nfrags);
4375
4376                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4377                 print_pkt(skb->data, skb->len);
4378         }
4379
4380         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4381                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4382                           __func__);
4383                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4384         }
4385
4386         dev->stats.tx_bytes += skb->len;
4387
4388         if (priv->sarc_type)
4389                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4390
4391         skb_tx_timestamp(skb);
4392
4393         /* Ready to fill the first descriptor and set the OWN bit w/o any
4394          * problems because all the descriptors are actually ready to be
4395          * passed to the DMA engine.
4396          */
4397         if (likely(!is_jumbo)) {
4398                 bool last_segment = (nfrags == 0);
4399
4400                 des = dma_map_single(priv->device, skb->data,
4401                                      nopaged_len, DMA_TO_DEVICE);
4402                 if (dma_mapping_error(priv->device, des))
4403                         goto dma_map_err;
4404
4405                 tx_q->tx_skbuff_dma[first_entry].buf = des;
4406                 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4407                 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4408
4409                 stmmac_set_desc_addr(priv, first, des);
4410
4411                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4412                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4413
4414                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4415                              priv->hwts_tx_en)) {
4416                         /* declare that device is doing timestamping */
4417                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4418                         stmmac_enable_tx_timestamp(priv, first);
4419                 }
4420
4421                 /* Prepare the first descriptor setting the OWN bit too */
4422                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4423                                 csum_insertion, priv->mode, 0, last_segment,
4424                                 skb->len);
4425         }
4426
4427         if (tx_q->tbs & STMMAC_TBS_EN) {
4428                 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4429
4430                 tbs_desc = &tx_q->dma_entx[first_entry];
4431                 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4432         }
4433
4434         stmmac_set_tx_owner(priv, first);
4435
4436         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4437
4438         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4439
4440         stmmac_flush_tx_descriptors(priv, queue);
4441         stmmac_tx_timer_arm(priv, queue);
4442
4443         return NETDEV_TX_OK;
4444
4445 dma_map_err:
4446         netdev_err(priv->dev, "Tx DMA map failed\n");
4447         dev_kfree_skb(skb);
4448         priv->dev->stats.tx_dropped++;
4449         return NETDEV_TX_OK;
4450 }
4451
4452 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4453 {
4454         struct vlan_ethhdr *veth;
4455         __be16 vlan_proto;
4456         u16 vlanid;
4457
4458         veth = (struct vlan_ethhdr *)skb->data;
4459         vlan_proto = veth->h_vlan_proto;
4460
4461         if ((vlan_proto == htons(ETH_P_8021Q) &&
4462              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4463             (vlan_proto == htons(ETH_P_8021AD) &&
4464              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4465                 /* pop the vlan tag */
4466                 vlanid = ntohs(veth->h_vlan_TCI);
4467                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4468                 skb_pull(skb, VLAN_HLEN);
4469                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4470         }
4471 }
4472
4473 /**
4474  * stmmac_rx_refill - refill used skb preallocated buffers
4475  * @priv: driver private structure
4476  * @queue: RX queue index
4477  * Description : this is to reallocate the skb for the reception process
4478  * that is based on zero-copy.
4479  */
4480 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4481 {
4482         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4483         int dirty = stmmac_rx_dirty(priv, queue);
4484         unsigned int entry = rx_q->dirty_rx;
4485
4486         while (dirty-- > 0) {
4487                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4488                 struct dma_desc *p;
4489                 bool use_rx_wd;
4490
4491                 if (priv->extend_desc)
4492                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
4493                 else
4494                         p = rx_q->dma_rx + entry;
4495
4496                 if (!buf->page) {
4497                         buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
4498                         if (!buf->page)
4499                                 break;
4500                 }
4501
4502                 if (priv->sph && !buf->sec_page) {
4503                         buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
4504                         if (!buf->sec_page)
4505                                 break;
4506
4507                         buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4508                 }
4509
4510                 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4511
4512                 stmmac_set_desc_addr(priv, p, buf->addr);
4513                 if (priv->sph)
4514                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4515                 else
4516                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4517                 stmmac_refill_desc3(priv, rx_q, p);
4518
4519                 rx_q->rx_count_frames++;
4520                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4521                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4522                         rx_q->rx_count_frames = 0;
4523
4524                 use_rx_wd = !priv->rx_coal_frames[queue];
4525                 use_rx_wd |= rx_q->rx_count_frames > 0;
4526                 if (!priv->use_riwt)
4527                         use_rx_wd = false;
4528
4529                 dma_wmb();
4530                 stmmac_set_rx_owner(priv, p, use_rx_wd);
4531
4532                 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4533         }
4534         rx_q->dirty_rx = entry;
4535         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4536                             (rx_q->dirty_rx * sizeof(struct dma_desc));
4537         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4538 }
4539
4540 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4541                                        struct dma_desc *p,
4542                                        int status, unsigned int len)
4543 {
4544         unsigned int plen = 0, hlen = 0;
4545         int coe = priv->hw->rx_csum;
4546
4547         /* Not first descriptor, buffer is always zero */
4548         if (priv->sph && len)
4549                 return 0;
4550
4551         /* First descriptor, get split header length */
4552         stmmac_get_rx_header_len(priv, p, &hlen);
4553         if (priv->sph && hlen) {
4554                 priv->xstats.rx_split_hdr_pkt_n++;
4555                 return hlen;
4556         }
4557
4558         /* First descriptor, not last descriptor and not split header */
4559         if (status & rx_not_ls)
4560                 return priv->dma_buf_sz;
4561
4562         plen = stmmac_get_rx_frame_len(priv, p, coe);
4563
4564         /* First descriptor and last descriptor and not split header */
4565         return min_t(unsigned int, priv->dma_buf_sz, plen);
4566 }
4567
4568 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4569                                        struct dma_desc *p,
4570                                        int status, unsigned int len)
4571 {
4572         int coe = priv->hw->rx_csum;
4573         unsigned int plen = 0;
4574
4575         /* Not split header, buffer is not available */
4576         if (!priv->sph)
4577                 return 0;
4578
4579         /* Not last descriptor */
4580         if (status & rx_not_ls)
4581                 return priv->dma_buf_sz;
4582
4583         plen = stmmac_get_rx_frame_len(priv, p, coe);
4584
4585         /* Last descriptor */
4586         return plen - len;
4587 }
4588
4589 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4590                                 struct xdp_frame *xdpf, bool dma_map)
4591 {
4592         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4593         unsigned int entry = tx_q->cur_tx;
4594         struct dma_desc *tx_desc;
4595         dma_addr_t dma_addr;
4596         bool set_ic;
4597
4598         if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4599                 return STMMAC_XDP_CONSUMED;
4600
4601         if (likely(priv->extend_desc))
4602                 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4603         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4604                 tx_desc = &tx_q->dma_entx[entry].basic;
4605         else
4606                 tx_desc = tx_q->dma_tx + entry;
4607
4608         if (dma_map) {
4609                 dma_addr = dma_map_single(priv->device, xdpf->data,
4610                                           xdpf->len, DMA_TO_DEVICE);
4611                 if (dma_mapping_error(priv->device, dma_addr))
4612                         return STMMAC_XDP_CONSUMED;
4613
4614                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4615         } else {
4616                 struct page *page = virt_to_page(xdpf->data);
4617
4618                 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4619                            xdpf->headroom;
4620                 dma_sync_single_for_device(priv->device, dma_addr,
4621                                            xdpf->len, DMA_BIDIRECTIONAL);
4622
4623                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4624         }
4625
4626         tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4627         tx_q->tx_skbuff_dma[entry].map_as_page = false;
4628         tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4629         tx_q->tx_skbuff_dma[entry].last_segment = true;
4630         tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4631
4632         tx_q->xdpf[entry] = xdpf;
4633
4634         stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4635
4636         stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4637                                true, priv->mode, true, true,
4638                                xdpf->len);
4639
4640         tx_q->tx_count_frames++;
4641
4642         if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4643                 set_ic = true;
4644         else
4645                 set_ic = false;
4646
4647         if (set_ic) {
4648                 tx_q->tx_count_frames = 0;
4649                 stmmac_set_tx_ic(priv, tx_desc);
4650                 priv->xstats.tx_set_ic_bit++;
4651         }
4652
4653         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4654
4655         entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4656         tx_q->cur_tx = entry;
4657
4658         return STMMAC_XDP_TX;
4659 }
4660
4661 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4662                                    int cpu)
4663 {
4664         int index = cpu;
4665
4666         if (unlikely(index < 0))
4667                 index = 0;
4668
4669         while (index >= priv->plat->tx_queues_to_use)
4670                 index -= priv->plat->tx_queues_to_use;
4671
4672         return index;
4673 }
4674
4675 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4676                                 struct xdp_buff *xdp)
4677 {
4678         struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4679         int cpu = smp_processor_id();
4680         struct netdev_queue *nq;
4681         int queue;
4682         int res;
4683
4684         if (unlikely(!xdpf))
4685                 return STMMAC_XDP_CONSUMED;
4686
4687         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4688         nq = netdev_get_tx_queue(priv->dev, queue);
4689
4690         __netif_tx_lock(nq, cpu);
4691         /* Avoids TX time-out as we are sharing with slow path */
4692         nq->trans_start = jiffies;
4693
4694         res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4695         if (res == STMMAC_XDP_TX)
4696                 stmmac_flush_tx_descriptors(priv, queue);
4697
4698         __netif_tx_unlock(nq);
4699
4700         return res;
4701 }
4702
4703 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4704                                  struct bpf_prog *prog,
4705                                  struct xdp_buff *xdp)
4706 {
4707         u32 act;
4708         int res;
4709
4710         act = bpf_prog_run_xdp(prog, xdp);
4711         switch (act) {
4712         case XDP_PASS:
4713                 res = STMMAC_XDP_PASS;
4714                 break;
4715         case XDP_TX:
4716                 res = stmmac_xdp_xmit_back(priv, xdp);
4717                 break;
4718         case XDP_REDIRECT:
4719                 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4720                         res = STMMAC_XDP_CONSUMED;
4721                 else
4722                         res = STMMAC_XDP_REDIRECT;
4723                 break;
4724         default:
4725                 bpf_warn_invalid_xdp_action(act);
4726                 fallthrough;
4727         case XDP_ABORTED:
4728                 trace_xdp_exception(priv->dev, prog, act);
4729                 fallthrough;
4730         case XDP_DROP:
4731                 res = STMMAC_XDP_CONSUMED;
4732                 break;
4733         }
4734
4735         return res;
4736 }
4737
4738 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4739                                            struct xdp_buff *xdp)
4740 {
4741         struct bpf_prog *prog;
4742         int res;
4743
4744         prog = READ_ONCE(priv->xdp_prog);
4745         if (!prog) {
4746                 res = STMMAC_XDP_PASS;
4747                 goto out;
4748         }
4749
4750         res = __stmmac_xdp_run_prog(priv, prog, xdp);
4751 out:
4752         return ERR_PTR(-res);
4753 }
4754
4755 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4756                                    int xdp_status)
4757 {
4758         int cpu = smp_processor_id();
4759         int queue;
4760
4761         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4762
4763         if (xdp_status & STMMAC_XDP_TX)
4764                 stmmac_tx_timer_arm(priv, queue);
4765
4766         if (xdp_status & STMMAC_XDP_REDIRECT)
4767                 xdp_do_flush();
4768 }
4769
4770 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4771                                                struct xdp_buff *xdp)
4772 {
4773         unsigned int metasize = xdp->data - xdp->data_meta;
4774         unsigned int datasize = xdp->data_end - xdp->data;
4775         struct sk_buff *skb;
4776
4777         skb = __napi_alloc_skb(&ch->rxtx_napi,
4778                                xdp->data_end - xdp->data_hard_start,
4779                                GFP_ATOMIC | __GFP_NOWARN);
4780         if (unlikely(!skb))
4781                 return NULL;
4782
4783         skb_reserve(skb, xdp->data - xdp->data_hard_start);
4784         memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4785         if (metasize)
4786                 skb_metadata_set(skb, metasize);
4787
4788         return skb;
4789 }
4790
4791 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4792                                    struct dma_desc *p, struct dma_desc *np,
4793                                    struct xdp_buff *xdp)
4794 {
4795         struct stmmac_channel *ch = &priv->channel[queue];
4796         unsigned int len = xdp->data_end - xdp->data;
4797         enum pkt_hash_types hash_type;
4798         int coe = priv->hw->rx_csum;
4799         struct sk_buff *skb;
4800         u32 hash;
4801
4802         skb = stmmac_construct_skb_zc(ch, xdp);
4803         if (!skb) {
4804                 priv->dev->stats.rx_dropped++;
4805                 return;
4806         }
4807
4808         stmmac_get_rx_hwtstamp(priv, p, np, skb);
4809         stmmac_rx_vlan(priv->dev, skb);
4810         skb->protocol = eth_type_trans(skb, priv->dev);
4811
4812         if (unlikely(!coe))
4813                 skb_checksum_none_assert(skb);
4814         else
4815                 skb->ip_summed = CHECKSUM_UNNECESSARY;
4816
4817         if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4818                 skb_set_hash(skb, hash, hash_type);
4819
4820         skb_record_rx_queue(skb, queue);
4821         napi_gro_receive(&ch->rxtx_napi, skb);
4822
4823         priv->dev->stats.rx_packets++;
4824         priv->dev->stats.rx_bytes += len;
4825 }
4826
4827 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4828 {
4829         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4830         unsigned int entry = rx_q->dirty_rx;
4831         struct dma_desc *rx_desc = NULL;
4832         bool ret = true;
4833
4834         budget = min(budget, stmmac_rx_dirty(priv, queue));
4835
4836         while (budget-- > 0 && entry != rx_q->cur_rx) {
4837                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4838                 dma_addr_t dma_addr;
4839                 bool use_rx_wd;
4840
4841                 if (!buf->xdp) {
4842                         buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4843                         if (!buf->xdp) {
4844                                 ret = false;
4845                                 break;
4846                         }
4847                 }
4848
4849                 if (priv->extend_desc)
4850                         rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4851                 else
4852                         rx_desc = rx_q->dma_rx + entry;
4853
4854                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4855                 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4856                 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4857                 stmmac_refill_desc3(priv, rx_q, rx_desc);
4858
4859                 rx_q->rx_count_frames++;
4860                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4861                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4862                         rx_q->rx_count_frames = 0;
4863
4864                 use_rx_wd = !priv->rx_coal_frames[queue];
4865                 use_rx_wd |= rx_q->rx_count_frames > 0;
4866                 if (!priv->use_riwt)
4867                         use_rx_wd = false;
4868
4869                 dma_wmb();
4870                 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4871
4872                 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4873         }
4874
4875         if (rx_desc) {
4876                 rx_q->dirty_rx = entry;
4877                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4878                                      (rx_q->dirty_rx * sizeof(struct dma_desc));
4879                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4880         }
4881
4882         return ret;
4883 }
4884
4885 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4886 {
4887         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4888         unsigned int count = 0, error = 0, len = 0;
4889         int dirty = stmmac_rx_dirty(priv, queue);
4890         unsigned int next_entry = rx_q->cur_rx;
4891         unsigned int desc_size;
4892         struct bpf_prog *prog;
4893         bool failure = false;
4894         int xdp_status = 0;
4895         int status = 0;
4896
4897         if (netif_msg_rx_status(priv)) {
4898                 void *rx_head;
4899
4900                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4901                 if (priv->extend_desc) {
4902                         rx_head = (void *)rx_q->dma_erx;
4903                         desc_size = sizeof(struct dma_extended_desc);
4904                 } else {
4905                         rx_head = (void *)rx_q->dma_rx;
4906                         desc_size = sizeof(struct dma_desc);
4907                 }
4908
4909                 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4910                                     rx_q->dma_rx_phy, desc_size);
4911         }
4912         while (count < limit) {
4913                 struct stmmac_rx_buffer *buf;
4914                 unsigned int buf1_len = 0;
4915                 struct dma_desc *np, *p;
4916                 int entry;
4917                 int res;
4918
4919                 if (!count && rx_q->state_saved) {
4920                         error = rx_q->state.error;
4921                         len = rx_q->state.len;
4922                 } else {
4923                         rx_q->state_saved = false;
4924                         error = 0;
4925                         len = 0;
4926                 }
4927
4928                 if (count >= limit)
4929                         break;
4930
4931 read_again:
4932                 buf1_len = 0;
4933                 entry = next_entry;
4934                 buf = &rx_q->buf_pool[entry];
4935
4936                 if (dirty >= STMMAC_RX_FILL_BATCH) {
4937                         failure = failure ||
4938                                   !stmmac_rx_refill_zc(priv, queue, dirty);
4939                         dirty = 0;
4940                 }
4941
4942                 if (priv->extend_desc)
4943                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
4944                 else
4945                         p = rx_q->dma_rx + entry;
4946
4947                 /* read the status of the incoming frame */
4948                 status = stmmac_rx_status(priv, &priv->dev->stats,
4949                                           &priv->xstats, p);
4950                 /* check if managed by the DMA otherwise go ahead */
4951                 if (unlikely(status & dma_own))
4952                         break;
4953
4954                 /* Prefetch the next RX descriptor */
4955                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4956                                                 priv->dma_rx_size);
4957                 next_entry = rx_q->cur_rx;
4958
4959                 if (priv->extend_desc)
4960                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4961                 else
4962                         np = rx_q->dma_rx + next_entry;
4963
4964                 prefetch(np);
4965
4966                 /* Ensure a valid XSK buffer before proceed */
4967                 if (!buf->xdp)
4968                         break;
4969
4970                 if (priv->extend_desc)
4971                         stmmac_rx_extended_status(priv, &priv->dev->stats,
4972                                                   &priv->xstats,
4973                                                   rx_q->dma_erx + entry);
4974                 if (unlikely(status == discard_frame)) {
4975                         xsk_buff_free(buf->xdp);
4976                         buf->xdp = NULL;
4977                         dirty++;
4978                         error = 1;
4979                         if (!priv->hwts_rx_en)
4980                                 priv->dev->stats.rx_errors++;
4981                 }
4982
4983                 if (unlikely(error && (status & rx_not_ls)))
4984                         goto read_again;
4985                 if (unlikely(error)) {
4986                         count++;
4987                         continue;
4988                 }
4989
4990                 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
4991                 if (likely(status & rx_not_ls)) {
4992                         xsk_buff_free(buf->xdp);
4993                         buf->xdp = NULL;
4994                         dirty++;
4995                         count++;
4996                         goto read_again;
4997                 }
4998
4999                 /* XDP ZC Frame only support primary buffers for now */
5000                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5001                 len += buf1_len;
5002
5003                 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5004                  * Type frames (LLC/LLC-SNAP)
5005                  *
5006                  * llc_snap is never checked in GMAC >= 4, so this ACS
5007                  * feature is always disabled and packets need to be
5008                  * stripped manually.
5009                  */
5010                 if (likely(!(status & rx_not_ls)) &&
5011                     (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5012                      unlikely(status != llc_snap))) {
5013                         buf1_len -= ETH_FCS_LEN;
5014                         len -= ETH_FCS_LEN;
5015                 }
5016
5017                 /* RX buffer is good and fit into a XSK pool buffer */
5018                 buf->xdp->data_end = buf->xdp->data + buf1_len;
5019                 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5020
5021                 prog = READ_ONCE(priv->xdp_prog);
5022                 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5023
5024                 switch (res) {
5025                 case STMMAC_XDP_PASS:
5026                         stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5027                         xsk_buff_free(buf->xdp);
5028                         break;
5029                 case STMMAC_XDP_CONSUMED:
5030                         xsk_buff_free(buf->xdp);
5031                         priv->dev->stats.rx_dropped++;
5032                         break;
5033                 case STMMAC_XDP_TX:
5034                 case STMMAC_XDP_REDIRECT:
5035                         xdp_status |= res;
5036                         break;
5037                 }
5038
5039                 buf->xdp = NULL;
5040                 dirty++;
5041                 count++;
5042         }
5043
5044         if (status & rx_not_ls) {
5045                 rx_q->state_saved = true;
5046                 rx_q->state.error = error;
5047                 rx_q->state.len = len;
5048         }
5049
5050         stmmac_finalize_xdp_rx(priv, xdp_status);
5051
5052         priv->xstats.rx_pkt_n += count;
5053         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5054
5055         if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5056                 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5057                         xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5058                 else
5059                         xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5060
5061                 return (int)count;
5062         }
5063
5064         return failure ? limit : (int)count;
5065 }
5066
5067 /**
5068  * stmmac_rx - manage the receive process
5069  * @priv: driver private structure
5070  * @limit: napi bugget
5071  * @queue: RX queue index.
5072  * Description :  this the function called by the napi poll method.
5073  * It gets all the frames inside the ring.
5074  */
5075 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5076 {
5077         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5078         struct stmmac_channel *ch = &priv->channel[queue];
5079         unsigned int count = 0, error = 0, len = 0;
5080         int status = 0, coe = priv->hw->rx_csum;
5081         unsigned int next_entry = rx_q->cur_rx;
5082         enum dma_data_direction dma_dir;
5083         unsigned int desc_size;
5084         struct sk_buff *skb = NULL;
5085         struct xdp_buff xdp;
5086         int xdp_status = 0;
5087         int buf_sz;
5088
5089         dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5090         buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5091
5092         if (netif_msg_rx_status(priv)) {
5093                 void *rx_head;
5094
5095                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5096                 if (priv->extend_desc) {
5097                         rx_head = (void *)rx_q->dma_erx;
5098                         desc_size = sizeof(struct dma_extended_desc);
5099                 } else {
5100                         rx_head = (void *)rx_q->dma_rx;
5101                         desc_size = sizeof(struct dma_desc);
5102                 }
5103
5104                 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
5105                                     rx_q->dma_rx_phy, desc_size);
5106         }
5107         while (count < limit) {
5108                 unsigned int buf1_len = 0, buf2_len = 0;
5109                 enum pkt_hash_types hash_type;
5110                 struct stmmac_rx_buffer *buf;
5111                 struct dma_desc *np, *p;
5112                 int entry;
5113                 u32 hash;
5114
5115                 if (!count && rx_q->state_saved) {
5116                         skb = rx_q->state.skb;
5117                         error = rx_q->state.error;
5118                         len = rx_q->state.len;
5119                 } else {
5120                         rx_q->state_saved = false;
5121                         skb = NULL;
5122                         error = 0;
5123                         len = 0;
5124                 }
5125
5126                 if (count >= limit)
5127                         break;
5128
5129 read_again:
5130                 buf1_len = 0;
5131                 buf2_len = 0;
5132                 entry = next_entry;
5133                 buf = &rx_q->buf_pool[entry];
5134
5135                 if (priv->extend_desc)
5136                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5137                 else
5138                         p = rx_q->dma_rx + entry;
5139
5140                 /* read the status of the incoming frame */
5141                 status = stmmac_rx_status(priv, &priv->dev->stats,
5142                                 &priv->xstats, p);
5143                 /* check if managed by the DMA otherwise go ahead */
5144                 if (unlikely(status & dma_own))
5145                         break;
5146
5147                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5148                                                 priv->dma_rx_size);
5149                 next_entry = rx_q->cur_rx;
5150
5151                 if (priv->extend_desc)
5152                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5153                 else
5154                         np = rx_q->dma_rx + next_entry;
5155
5156                 prefetch(np);
5157
5158                 if (priv->extend_desc)
5159                         stmmac_rx_extended_status(priv, &priv->dev->stats,
5160                                         &priv->xstats, rx_q->dma_erx + entry);
5161                 if (unlikely(status == discard_frame)) {
5162                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5163                         buf->page = NULL;
5164                         error = 1;
5165                         if (!priv->hwts_rx_en)
5166                                 priv->dev->stats.rx_errors++;
5167                 }
5168
5169                 if (unlikely(error && (status & rx_not_ls)))
5170                         goto read_again;
5171                 if (unlikely(error)) {
5172                         dev_kfree_skb(skb);
5173                         skb = NULL;
5174                         count++;
5175                         continue;
5176                 }
5177
5178                 /* Buffer is good. Go on. */
5179
5180                 prefetch(page_address(buf->page) + buf->page_offset);
5181                 if (buf->sec_page)
5182                         prefetch(page_address(buf->sec_page));
5183
5184                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5185                 len += buf1_len;
5186                 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5187                 len += buf2_len;
5188
5189                 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5190                  * Type frames (LLC/LLC-SNAP)
5191                  *
5192                  * llc_snap is never checked in GMAC >= 4, so this ACS
5193                  * feature is always disabled and packets need to be
5194                  * stripped manually.
5195                  */
5196                 if (likely(!(status & rx_not_ls)) &&
5197                     (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5198                      unlikely(status != llc_snap))) {
5199                         if (buf2_len) {
5200                                 buf2_len -= ETH_FCS_LEN;
5201                                 len -= ETH_FCS_LEN;
5202                         } else if (buf1_len) {
5203                                 buf1_len -= ETH_FCS_LEN;
5204                                 len -= ETH_FCS_LEN;
5205                         }
5206                 }
5207
5208                 if (!skb) {
5209                         unsigned int pre_len, sync_len;
5210
5211                         dma_sync_single_for_cpu(priv->device, buf->addr,
5212                                                 buf1_len, dma_dir);
5213
5214                         xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5215                         xdp_prepare_buff(&xdp, page_address(buf->page),
5216                                          buf->page_offset, buf1_len, false);
5217
5218                         pre_len = xdp.data_end - xdp.data_hard_start -
5219                                   buf->page_offset;
5220                         skb = stmmac_xdp_run_prog(priv, &xdp);
5221                         /* Due xdp_adjust_tail: DMA sync for_device
5222                          * cover max len CPU touch
5223                          */
5224                         sync_len = xdp.data_end - xdp.data_hard_start -
5225                                    buf->page_offset;
5226                         sync_len = max(sync_len, pre_len);
5227
5228                         /* For Not XDP_PASS verdict */
5229                         if (IS_ERR(skb)) {
5230                                 unsigned int xdp_res = -PTR_ERR(skb);
5231
5232                                 if (xdp_res & STMMAC_XDP_CONSUMED) {
5233                                         page_pool_put_page(rx_q->page_pool,
5234                                                            virt_to_head_page(xdp.data),
5235                                                            sync_len, true);
5236                                         buf->page = NULL;
5237                                         priv->dev->stats.rx_dropped++;
5238
5239                                         /* Clear skb as it was set as
5240                                          * status by XDP program.
5241                                          */
5242                                         skb = NULL;
5243
5244                                         if (unlikely((status & rx_not_ls)))
5245                                                 goto read_again;
5246
5247                                         count++;
5248                                         continue;
5249                                 } else if (xdp_res & (STMMAC_XDP_TX |
5250                                                       STMMAC_XDP_REDIRECT)) {
5251                                         xdp_status |= xdp_res;
5252                                         buf->page = NULL;
5253                                         skb = NULL;
5254                                         count++;
5255                                         continue;
5256                                 }
5257                         }
5258                 }
5259
5260                 if (!skb) {
5261                         /* XDP program may expand or reduce tail */
5262                         buf1_len = xdp.data_end - xdp.data;
5263
5264                         skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5265                         if (!skb) {
5266                                 priv->dev->stats.rx_dropped++;
5267                                 count++;
5268                                 goto drain_data;
5269                         }
5270
5271                         /* XDP program may adjust header */
5272                         skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5273                         skb_put(skb, buf1_len);
5274
5275                         /* Data payload copied into SKB, page ready for recycle */
5276                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5277                         buf->page = NULL;
5278                 } else if (buf1_len) {
5279                         dma_sync_single_for_cpu(priv->device, buf->addr,
5280                                                 buf1_len, dma_dir);
5281                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5282                                         buf->page, buf->page_offset, buf1_len,
5283                                         priv->dma_buf_sz);
5284
5285                         /* Data payload appended into SKB */
5286                         page_pool_release_page(rx_q->page_pool, buf->page);
5287                         buf->page = NULL;
5288                 }
5289
5290                 if (buf2_len) {
5291                         dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5292                                                 buf2_len, dma_dir);
5293                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5294                                         buf->sec_page, 0, buf2_len,
5295                                         priv->dma_buf_sz);
5296
5297                         /* Data payload appended into SKB */
5298                         page_pool_release_page(rx_q->page_pool, buf->sec_page);
5299                         buf->sec_page = NULL;
5300                 }
5301
5302 drain_data:
5303                 if (likely(status & rx_not_ls))
5304                         goto read_again;
5305                 if (!skb)
5306                         continue;
5307
5308                 /* Got entire packet into SKB. Finish it. */
5309
5310                 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5311                 stmmac_rx_vlan(priv->dev, skb);
5312                 skb->protocol = eth_type_trans(skb, priv->dev);
5313
5314                 if (unlikely(!coe))
5315                         skb_checksum_none_assert(skb);
5316                 else
5317                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5318
5319                 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5320                         skb_set_hash(skb, hash, hash_type);
5321
5322                 skb_record_rx_queue(skb, queue);
5323                 napi_gro_receive(&ch->rx_napi, skb);
5324                 skb = NULL;
5325
5326                 priv->dev->stats.rx_packets++;
5327                 priv->dev->stats.rx_bytes += len;
5328                 count++;
5329         }
5330
5331         if (status & rx_not_ls || skb) {
5332                 rx_q->state_saved = true;
5333                 rx_q->state.skb = skb;
5334                 rx_q->state.error = error;
5335                 rx_q->state.len = len;
5336         }
5337
5338         stmmac_finalize_xdp_rx(priv, xdp_status);
5339
5340         stmmac_rx_refill(priv, queue);
5341
5342         priv->xstats.rx_pkt_n += count;
5343         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5344
5345         return count;
5346 }
5347
5348 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5349 {
5350         struct stmmac_channel *ch =
5351                 container_of(napi, struct stmmac_channel, rx_napi);
5352         struct stmmac_priv *priv = ch->priv_data;
5353         u32 chan = ch->index;
5354         int work_done;
5355
5356         priv->xstats.napi_poll++;
5357
5358         work_done = stmmac_rx(priv, budget, chan);
5359         if (work_done < budget && napi_complete_done(napi, work_done)) {
5360                 unsigned long flags;
5361
5362                 spin_lock_irqsave(&ch->lock, flags);
5363                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5364                 spin_unlock_irqrestore(&ch->lock, flags);
5365         }
5366
5367         return work_done;
5368 }
5369
5370 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5371 {
5372         struct stmmac_channel *ch =
5373                 container_of(napi, struct stmmac_channel, tx_napi);
5374         struct stmmac_priv *priv = ch->priv_data;
5375         u32 chan = ch->index;
5376         int work_done;
5377
5378         priv->xstats.napi_poll++;
5379
5380         work_done = stmmac_tx_clean(priv, budget, chan);
5381         work_done = min(work_done, budget);
5382
5383         if (work_done < budget && napi_complete_done(napi, work_done)) {
5384                 unsigned long flags;
5385
5386                 spin_lock_irqsave(&ch->lock, flags);
5387                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5388                 spin_unlock_irqrestore(&ch->lock, flags);
5389         }
5390
5391         return work_done;
5392 }
5393
5394 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5395 {
5396         struct stmmac_channel *ch =
5397                 container_of(napi, struct stmmac_channel, rxtx_napi);
5398         struct stmmac_priv *priv = ch->priv_data;
5399         int rx_done, tx_done, rxtx_done;
5400         u32 chan = ch->index;
5401
5402         priv->xstats.napi_poll++;
5403
5404         tx_done = stmmac_tx_clean(priv, budget, chan);
5405         tx_done = min(tx_done, budget);
5406
5407         rx_done = stmmac_rx_zc(priv, budget, chan);
5408
5409         rxtx_done = max(tx_done, rx_done);
5410
5411         /* If either TX or RX work is not complete, return budget
5412          * and keep pooling
5413          */
5414         if (rxtx_done >= budget)
5415                 return budget;
5416
5417         /* all work done, exit the polling mode */
5418         if (napi_complete_done(napi, rxtx_done)) {
5419                 unsigned long flags;
5420
5421                 spin_lock_irqsave(&ch->lock, flags);
5422                 /* Both RX and TX work done are compelte,
5423                  * so enable both RX & TX IRQs.
5424                  */
5425                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5426                 spin_unlock_irqrestore(&ch->lock, flags);
5427         }
5428
5429         return min(rxtx_done, budget - 1);
5430 }
5431
5432 /**
5433  *  stmmac_tx_timeout
5434  *  @dev : Pointer to net device structure
5435  *  @txqueue: the index of the hanging transmit queue
5436  *  Description: this function is called when a packet transmission fails to
5437  *   complete within a reasonable time. The driver will mark the error in the
5438  *   netdev structure and arrange for the device to be reset to a sane state
5439  *   in order to transmit a new packet.
5440  */
5441 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5442 {
5443         struct stmmac_priv *priv = netdev_priv(dev);
5444
5445         stmmac_global_err(priv);
5446 }
5447
5448 /**
5449  *  stmmac_set_rx_mode - entry point for multicast addressing
5450  *  @dev : pointer to the device structure
5451  *  Description:
5452  *  This function is a driver entry point which gets called by the kernel
5453  *  whenever multicast addresses must be enabled/disabled.
5454  *  Return value:
5455  *  void.
5456  */
5457 static void stmmac_set_rx_mode(struct net_device *dev)
5458 {
5459         struct stmmac_priv *priv = netdev_priv(dev);
5460
5461         stmmac_set_filter(priv, priv->hw, dev);
5462 }
5463
5464 /**
5465  *  stmmac_change_mtu - entry point to change MTU size for the device.
5466  *  @dev : device pointer.
5467  *  @new_mtu : the new MTU size for the device.
5468  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5469  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5470  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5471  *  Return value:
5472  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5473  *  file on failure.
5474  */
5475 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5476 {
5477         struct stmmac_priv *priv = netdev_priv(dev);
5478         int txfifosz = priv->plat->tx_fifo_size;
5479         const int mtu = new_mtu;
5480
5481         if (txfifosz == 0)
5482                 txfifosz = priv->dma_cap.tx_fifo_size;
5483
5484         txfifosz /= priv->plat->tx_queues_to_use;
5485
5486         if (netif_running(dev)) {
5487                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
5488                 return -EBUSY;
5489         }
5490
5491         if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5492                 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5493                 return -EINVAL;
5494         }
5495
5496         new_mtu = STMMAC_ALIGN(new_mtu);
5497
5498         /* If condition true, FIFO is too small or MTU too large */
5499         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5500                 return -EINVAL;
5501
5502         dev->mtu = mtu;
5503
5504         netdev_update_features(dev);
5505
5506         return 0;
5507 }
5508
5509 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5510                                              netdev_features_t features)
5511 {
5512         struct stmmac_priv *priv = netdev_priv(dev);
5513
5514         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5515                 features &= ~NETIF_F_RXCSUM;
5516
5517         if (!priv->plat->tx_coe)
5518                 features &= ~NETIF_F_CSUM_MASK;
5519
5520         /* Some GMAC devices have a bugged Jumbo frame support that
5521          * needs to have the Tx COE disabled for oversized frames
5522          * (due to limited buffer sizes). In this case we disable
5523          * the TX csum insertion in the TDES and not use SF.
5524          */
5525         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5526                 features &= ~NETIF_F_CSUM_MASK;
5527
5528         /* Disable tso if asked by ethtool */
5529         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5530                 if (features & NETIF_F_TSO)
5531                         priv->tso = true;
5532                 else
5533                         priv->tso = false;
5534         }
5535
5536         return features;
5537 }
5538
5539 static int stmmac_set_features(struct net_device *netdev,
5540                                netdev_features_t features)
5541 {
5542         struct stmmac_priv *priv = netdev_priv(netdev);
5543         bool sph_en;
5544         u32 chan;
5545
5546         /* Keep the COE Type in case of csum is supporting */
5547         if (features & NETIF_F_RXCSUM)
5548                 priv->hw->rx_csum = priv->plat->rx_coe;
5549         else
5550                 priv->hw->rx_csum = 0;
5551         /* No check needed because rx_coe has been set before and it will be
5552          * fixed in case of issue.
5553          */
5554         stmmac_rx_ipc(priv, priv->hw);
5555
5556         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5557
5558         for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5559                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5560
5561         return 0;
5562 }
5563
5564 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5565 {
5566         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5567         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5568         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5569         bool *hs_enable = &fpe_cfg->hs_enable;
5570
5571         if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5572                 return;
5573
5574         /* If LP has sent verify mPacket, LP is FPE capable */
5575         if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5576                 if (*lp_state < FPE_STATE_CAPABLE)
5577                         *lp_state = FPE_STATE_CAPABLE;
5578
5579                 /* If user has requested FPE enable, quickly response */
5580                 if (*hs_enable)
5581                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5582                                                 MPACKET_RESPONSE);
5583         }
5584
5585         /* If Local has sent verify mPacket, Local is FPE capable */
5586         if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5587                 if (*lo_state < FPE_STATE_CAPABLE)
5588                         *lo_state = FPE_STATE_CAPABLE;
5589         }
5590
5591         /* If LP has sent response mPacket, LP is entering FPE ON */
5592         if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5593                 *lp_state = FPE_STATE_ENTERING_ON;
5594
5595         /* If Local has sent response mPacket, Local is entering FPE ON */
5596         if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5597                 *lo_state = FPE_STATE_ENTERING_ON;
5598
5599         if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5600             !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5601             priv->fpe_wq) {
5602                 queue_work(priv->fpe_wq, &priv->fpe_task);
5603         }
5604 }
5605
5606 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5607 {
5608         u32 rx_cnt = priv->plat->rx_queues_to_use;
5609         u32 tx_cnt = priv->plat->tx_queues_to_use;
5610         u32 queues_count;
5611         u32 queue;
5612         bool xmac;
5613
5614         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5615         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5616
5617         if (priv->irq_wake)
5618                 pm_wakeup_event(priv->device, 0);
5619
5620         if (priv->dma_cap.estsel)
5621                 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5622                                       &priv->xstats, tx_cnt);
5623
5624         if (priv->dma_cap.fpesel) {
5625                 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5626                                                    priv->dev);
5627
5628                 stmmac_fpe_event_status(priv, status);
5629         }
5630
5631         /* To handle GMAC own interrupts */
5632         if ((priv->plat->has_gmac) || xmac) {
5633                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5634
5635                 if (unlikely(status)) {
5636                         /* For LPI we need to save the tx status */
5637                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5638                                 priv->tx_path_in_lpi_mode = true;
5639                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5640                                 priv->tx_path_in_lpi_mode = false;
5641                 }
5642
5643                 for (queue = 0; queue < queues_count; queue++) {
5644                         status = stmmac_host_mtl_irq_status(priv, priv->hw,
5645                                                             queue);
5646                 }
5647
5648                 /* PCS link status */
5649                 if (priv->hw->pcs) {
5650                         if (priv->xstats.pcs_link)
5651                                 netif_carrier_on(priv->dev);
5652                         else
5653                                 netif_carrier_off(priv->dev);
5654                 }
5655
5656                 stmmac_timestamp_interrupt(priv, priv);
5657         }
5658 }
5659
5660 /**
5661  *  stmmac_interrupt - main ISR
5662  *  @irq: interrupt number.
5663  *  @dev_id: to pass the net device pointer.
5664  *  Description: this is the main driver interrupt service routine.
5665  *  It can call:
5666  *  o DMA service routine (to manage incoming frame reception and transmission
5667  *    status)
5668  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5669  *    interrupts.
5670  */
5671 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5672 {
5673         struct net_device *dev = (struct net_device *)dev_id;
5674         struct stmmac_priv *priv = netdev_priv(dev);
5675
5676         /* Check if adapter is up */
5677         if (test_bit(STMMAC_DOWN, &priv->state))
5678                 return IRQ_HANDLED;
5679
5680         /* Check if a fatal error happened */
5681         if (stmmac_safety_feat_interrupt(priv))
5682                 return IRQ_HANDLED;
5683
5684         /* To handle Common interrupts */
5685         stmmac_common_interrupt(priv);
5686
5687         /* To handle DMA interrupts */
5688         stmmac_dma_interrupt(priv);
5689
5690         return IRQ_HANDLED;
5691 }
5692
5693 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5694 {
5695         struct net_device *dev = (struct net_device *)dev_id;
5696         struct stmmac_priv *priv = netdev_priv(dev);
5697
5698         if (unlikely(!dev)) {
5699                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5700                 return IRQ_NONE;
5701         }
5702
5703         /* Check if adapter is up */
5704         if (test_bit(STMMAC_DOWN, &priv->state))
5705                 return IRQ_HANDLED;
5706
5707         /* To handle Common interrupts */
5708         stmmac_common_interrupt(priv);
5709
5710         return IRQ_HANDLED;
5711 }
5712
5713 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5714 {
5715         struct net_device *dev = (struct net_device *)dev_id;
5716         struct stmmac_priv *priv = netdev_priv(dev);
5717
5718         if (unlikely(!dev)) {
5719                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5720                 return IRQ_NONE;
5721         }
5722
5723         /* Check if adapter is up */
5724         if (test_bit(STMMAC_DOWN, &priv->state))
5725                 return IRQ_HANDLED;
5726
5727         /* Check if a fatal error happened */
5728         stmmac_safety_feat_interrupt(priv);
5729
5730         return IRQ_HANDLED;
5731 }
5732
5733 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5734 {
5735         struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5736         int chan = tx_q->queue_index;
5737         struct stmmac_priv *priv;
5738         int status;
5739
5740         priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
5741
5742         if (unlikely(!data)) {
5743                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5744                 return IRQ_NONE;
5745         }
5746
5747         /* Check if adapter is up */
5748         if (test_bit(STMMAC_DOWN, &priv->state))
5749                 return IRQ_HANDLED;
5750
5751         status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5752
5753         if (unlikely(status & tx_hard_error_bump_tc)) {
5754                 /* Try to bump up the dma threshold on this failure */
5755                 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
5756                     tc <= 256) {
5757                         tc += 64;
5758                         if (priv->plat->force_thresh_dma_mode)
5759                                 stmmac_set_dma_operation_mode(priv,
5760                                                               tc,
5761                                                               tc,
5762                                                               chan);
5763                         else
5764                                 stmmac_set_dma_operation_mode(priv,
5765                                                               tc,
5766                                                               SF_DMA_MODE,
5767                                                               chan);
5768                         priv->xstats.threshold = tc;
5769                 }
5770         } else if (unlikely(status == tx_hard_error)) {
5771                 stmmac_tx_err(priv, chan);
5772         }
5773
5774         return IRQ_HANDLED;
5775 }
5776
5777 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5778 {
5779         struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5780         int chan = rx_q->queue_index;
5781         struct stmmac_priv *priv;
5782
5783         priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
5784
5785         if (unlikely(!data)) {
5786                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5787                 return IRQ_NONE;
5788         }
5789
5790         /* Check if adapter is up */
5791         if (test_bit(STMMAC_DOWN, &priv->state))
5792                 return IRQ_HANDLED;
5793
5794         stmmac_napi_check(priv, chan, DMA_DIR_RX);
5795
5796         return IRQ_HANDLED;
5797 }
5798
5799 #ifdef CONFIG_NET_POLL_CONTROLLER
5800 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5801  * to allow network I/O with interrupts disabled.
5802  */
5803 static void stmmac_poll_controller(struct net_device *dev)
5804 {
5805         struct stmmac_priv *priv = netdev_priv(dev);
5806         int i;
5807
5808         /* If adapter is down, do nothing */
5809         if (test_bit(STMMAC_DOWN, &priv->state))
5810                 return;
5811
5812         if (priv->plat->multi_msi_en) {
5813                 for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5814                         stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
5815
5816                 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5817                         stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
5818         } else {
5819                 disable_irq(dev->irq);
5820                 stmmac_interrupt(dev->irq, dev);
5821                 enable_irq(dev->irq);
5822         }
5823 }
5824 #endif
5825
5826 /**
5827  *  stmmac_ioctl - Entry point for the Ioctl
5828  *  @dev: Device pointer.
5829  *  @rq: An IOCTL specefic structure, that can contain a pointer to
5830  *  a proprietary structure used to pass information to the driver.
5831  *  @cmd: IOCTL command
5832  *  Description:
5833  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5834  */
5835 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5836 {
5837         struct stmmac_priv *priv = netdev_priv (dev);
5838         int ret = -EOPNOTSUPP;
5839
5840         if (!netif_running(dev))
5841                 return -EINVAL;
5842
5843         switch (cmd) {
5844         case SIOCGMIIPHY:
5845         case SIOCGMIIREG:
5846         case SIOCSMIIREG:
5847                 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5848                 break;
5849         case SIOCSHWTSTAMP:
5850                 ret = stmmac_hwtstamp_set(dev, rq);
5851                 break;
5852         case SIOCGHWTSTAMP:
5853                 ret = stmmac_hwtstamp_get(dev, rq);
5854                 break;
5855         default:
5856                 break;
5857         }
5858
5859         return ret;
5860 }
5861
5862 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5863                                     void *cb_priv)
5864 {
5865         struct stmmac_priv *priv = cb_priv;
5866         int ret = -EOPNOTSUPP;
5867
5868         if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5869                 return ret;
5870
5871         __stmmac_disable_all_queues(priv);
5872
5873         switch (type) {
5874         case TC_SETUP_CLSU32:
5875                 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5876                 break;
5877         case TC_SETUP_CLSFLOWER:
5878                 ret = stmmac_tc_setup_cls(priv, priv, type_data);
5879                 break;
5880         default:
5881                 break;
5882         }
5883
5884         stmmac_enable_all_queues(priv);
5885         return ret;
5886 }
5887
5888 static LIST_HEAD(stmmac_block_cb_list);
5889
5890 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5891                            void *type_data)
5892 {
5893         struct stmmac_priv *priv = netdev_priv(ndev);
5894
5895         switch (type) {
5896         case TC_SETUP_BLOCK:
5897                 return flow_block_cb_setup_simple(type_data,
5898                                                   &stmmac_block_cb_list,
5899                                                   stmmac_setup_tc_block_cb,
5900                                                   priv, priv, true);
5901         case TC_SETUP_QDISC_CBS:
5902                 return stmmac_tc_setup_cbs(priv, priv, type_data);
5903         case TC_SETUP_QDISC_TAPRIO:
5904                 return stmmac_tc_setup_taprio(priv, priv, type_data);
5905         case TC_SETUP_QDISC_ETF:
5906                 return stmmac_tc_setup_etf(priv, priv, type_data);
5907         default:
5908                 return -EOPNOTSUPP;
5909         }
5910 }
5911
5912 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
5913                                struct net_device *sb_dev)
5914 {
5915         int gso = skb_shinfo(skb)->gso_type;
5916
5917         if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
5918                 /*
5919                  * There is no way to determine the number of TSO/USO
5920                  * capable Queues. Let's use always the Queue 0
5921                  * because if TSO/USO is supported then at least this
5922                  * one will be capable.
5923                  */
5924                 return 0;
5925         }
5926
5927         return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
5928 }
5929
5930 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
5931 {
5932         struct stmmac_priv *priv = netdev_priv(ndev);
5933         int ret = 0;
5934
5935         ret = pm_runtime_get_sync(priv->device);
5936         if (ret < 0) {
5937                 pm_runtime_put_noidle(priv->device);
5938                 return ret;
5939         }
5940
5941         ret = eth_mac_addr(ndev, addr);
5942         if (ret)
5943                 goto set_mac_error;
5944
5945         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
5946
5947 set_mac_error:
5948         pm_runtime_put(priv->device);
5949
5950         return ret;
5951 }
5952
5953 #ifdef CONFIG_DEBUG_FS
5954 static struct dentry *stmmac_fs_dir;
5955
5956 static void sysfs_display_ring(void *head, int size, int extend_desc,
5957                                struct seq_file *seq, dma_addr_t dma_phy_addr)
5958 {
5959         int i;
5960         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
5961         struct dma_desc *p = (struct dma_desc *)head;
5962         dma_addr_t dma_addr;
5963
5964         for (i = 0; i < size; i++) {
5965                 if (extend_desc) {
5966                         dma_addr = dma_phy_addr + i * sizeof(*ep);
5967                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5968                                    i, &dma_addr,
5969                                    le32_to_cpu(ep->basic.des0),
5970                                    le32_to_cpu(ep->basic.des1),
5971                                    le32_to_cpu(ep->basic.des2),
5972                                    le32_to_cpu(ep->basic.des3));
5973                         ep++;
5974                 } else {
5975                         dma_addr = dma_phy_addr + i * sizeof(*p);
5976                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5977                                    i, &dma_addr,
5978                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5979                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5980                         p++;
5981                 }
5982                 seq_printf(seq, "\n");
5983         }
5984 }
5985
5986 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
5987 {
5988         struct net_device *dev = seq->private;
5989         struct stmmac_priv *priv = netdev_priv(dev);
5990         u32 rx_count = priv->plat->rx_queues_to_use;
5991         u32 tx_count = priv->plat->tx_queues_to_use;
5992         u32 queue;
5993
5994         if ((dev->flags & IFF_UP) == 0)
5995                 return 0;
5996
5997         for (queue = 0; queue < rx_count; queue++) {
5998                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5999
6000                 seq_printf(seq, "RX Queue %d:\n", queue);
6001
6002                 if (priv->extend_desc) {
6003                         seq_printf(seq, "Extended descriptor ring:\n");
6004                         sysfs_display_ring((void *)rx_q->dma_erx,
6005                                            priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6006                 } else {
6007                         seq_printf(seq, "Descriptor ring:\n");
6008                         sysfs_display_ring((void *)rx_q->dma_rx,
6009                                            priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6010                 }
6011         }
6012
6013         for (queue = 0; queue < tx_count; queue++) {
6014                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6015
6016                 seq_printf(seq, "TX Queue %d:\n", queue);
6017
6018                 if (priv->extend_desc) {
6019                         seq_printf(seq, "Extended descriptor ring:\n");
6020                         sysfs_display_ring((void *)tx_q->dma_etx,
6021                                            priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6022                 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6023                         seq_printf(seq, "Descriptor ring:\n");
6024                         sysfs_display_ring((void *)tx_q->dma_tx,
6025                                            priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6026                 }
6027         }
6028
6029         return 0;
6030 }
6031 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6032
6033 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6034 {
6035         struct net_device *dev = seq->private;
6036         struct stmmac_priv *priv = netdev_priv(dev);
6037
6038         if (!priv->hw_cap_support) {
6039                 seq_printf(seq, "DMA HW features not supported\n");
6040                 return 0;
6041         }
6042
6043         seq_printf(seq, "==============================\n");
6044         seq_printf(seq, "\tDMA HW features\n");
6045         seq_printf(seq, "==============================\n");
6046
6047         seq_printf(seq, "\t10/100 Mbps: %s\n",
6048                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6049         seq_printf(seq, "\t1000 Mbps: %s\n",
6050                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
6051         seq_printf(seq, "\tHalf duplex: %s\n",
6052                    (priv->dma_cap.half_duplex) ? "Y" : "N");
6053         seq_printf(seq, "\tHash Filter: %s\n",
6054                    (priv->dma_cap.hash_filter) ? "Y" : "N");
6055         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6056                    (priv->dma_cap.multi_addr) ? "Y" : "N");
6057         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6058                    (priv->dma_cap.pcs) ? "Y" : "N");
6059         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6060                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
6061         seq_printf(seq, "\tPMT Remote wake up: %s\n",
6062                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6063         seq_printf(seq, "\tPMT Magic Frame: %s\n",
6064                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6065         seq_printf(seq, "\tRMON module: %s\n",
6066                    (priv->dma_cap.rmon) ? "Y" : "N");
6067         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6068                    (priv->dma_cap.time_stamp) ? "Y" : "N");
6069         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6070                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
6071         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6072                    (priv->dma_cap.eee) ? "Y" : "N");
6073         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6074         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6075                    (priv->dma_cap.tx_coe) ? "Y" : "N");
6076         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6077                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6078                            (priv->dma_cap.rx_coe) ? "Y" : "N");
6079         } else {
6080                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6081                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6082                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6083                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6084         }
6085         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6086                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6087         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6088                    priv->dma_cap.number_rx_channel);
6089         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6090                    priv->dma_cap.number_tx_channel);
6091         seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6092                    priv->dma_cap.number_rx_queues);
6093         seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6094                    priv->dma_cap.number_tx_queues);
6095         seq_printf(seq, "\tEnhanced descriptors: %s\n",
6096                    (priv->dma_cap.enh_desc) ? "Y" : "N");
6097         seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6098         seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6099         seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6100         seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6101         seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6102                    priv->dma_cap.pps_out_num);
6103         seq_printf(seq, "\tSafety Features: %s\n",
6104                    priv->dma_cap.asp ? "Y" : "N");
6105         seq_printf(seq, "\tFlexible RX Parser: %s\n",
6106                    priv->dma_cap.frpsel ? "Y" : "N");
6107         seq_printf(seq, "\tEnhanced Addressing: %d\n",
6108                    priv->dma_cap.addr64);
6109         seq_printf(seq, "\tReceive Side Scaling: %s\n",
6110                    priv->dma_cap.rssen ? "Y" : "N");
6111         seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6112                    priv->dma_cap.vlhash ? "Y" : "N");
6113         seq_printf(seq, "\tSplit Header: %s\n",
6114                    priv->dma_cap.sphen ? "Y" : "N");
6115         seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6116                    priv->dma_cap.vlins ? "Y" : "N");
6117         seq_printf(seq, "\tDouble VLAN: %s\n",
6118                    priv->dma_cap.dvlan ? "Y" : "N");
6119         seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6120                    priv->dma_cap.l3l4fnum);
6121         seq_printf(seq, "\tARP Offloading: %s\n",
6122                    priv->dma_cap.arpoffsel ? "Y" : "N");
6123         seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6124                    priv->dma_cap.estsel ? "Y" : "N");
6125         seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6126                    priv->dma_cap.fpesel ? "Y" : "N");
6127         seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6128                    priv->dma_cap.tbssel ? "Y" : "N");
6129         return 0;
6130 }
6131 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6132
6133 /* Use network device events to rename debugfs file entries.
6134  */
6135 static int stmmac_device_event(struct notifier_block *unused,
6136                                unsigned long event, void *ptr)
6137 {
6138         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6139         struct stmmac_priv *priv = netdev_priv(dev);
6140
6141         if (dev->netdev_ops != &stmmac_netdev_ops)
6142                 goto done;
6143
6144         switch (event) {
6145         case NETDEV_CHANGENAME:
6146                 if (priv->dbgfs_dir)
6147                         priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6148                                                          priv->dbgfs_dir,
6149                                                          stmmac_fs_dir,
6150                                                          dev->name);
6151                 break;
6152         }
6153 done:
6154         return NOTIFY_DONE;
6155 }
6156
6157 static struct notifier_block stmmac_notifier = {
6158         .notifier_call = stmmac_device_event,
6159 };
6160
6161 static void stmmac_init_fs(struct net_device *dev)
6162 {
6163         struct stmmac_priv *priv = netdev_priv(dev);
6164
6165         rtnl_lock();
6166
6167         /* Create per netdev entries */
6168         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6169
6170         /* Entry to report DMA RX/TX rings */
6171         debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6172                             &stmmac_rings_status_fops);
6173
6174         /* Entry to report the DMA HW features */
6175         debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6176                             &stmmac_dma_cap_fops);
6177
6178         rtnl_unlock();
6179 }
6180
6181 static void stmmac_exit_fs(struct net_device *dev)
6182 {
6183         struct stmmac_priv *priv = netdev_priv(dev);
6184
6185         debugfs_remove_recursive(priv->dbgfs_dir);
6186 }
6187 #endif /* CONFIG_DEBUG_FS */
6188
6189 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6190 {
6191         unsigned char *data = (unsigned char *)&vid_le;
6192         unsigned char data_byte = 0;
6193         u32 crc = ~0x0;
6194         u32 temp = 0;
6195         int i, bits;
6196
6197         bits = get_bitmask_order(VLAN_VID_MASK);
6198         for (i = 0; i < bits; i++) {
6199                 if ((i % 8) == 0)
6200                         data_byte = data[i / 8];
6201
6202                 temp = ((crc & 1) ^ data_byte) & 1;
6203                 crc >>= 1;
6204                 data_byte >>= 1;
6205
6206                 if (temp)
6207                         crc ^= 0xedb88320;
6208         }
6209
6210         return crc;
6211 }
6212
6213 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6214 {
6215         u32 crc, hash = 0;
6216         __le16 pmatch = 0;
6217         int count = 0;
6218         u16 vid = 0;
6219
6220         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6221                 __le16 vid_le = cpu_to_le16(vid);
6222                 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6223                 hash |= (1 << crc);
6224                 count++;
6225         }
6226
6227         if (!priv->dma_cap.vlhash) {
6228                 if (count > 2) /* VID = 0 always passes filter */
6229                         return -EOPNOTSUPP;
6230
6231                 pmatch = cpu_to_le16(vid);
6232                 hash = 0;
6233         }
6234
6235         return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6236 }
6237
6238 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6239 {
6240         struct stmmac_priv *priv = netdev_priv(ndev);
6241         bool is_double = false;
6242         int ret;
6243
6244         if (be16_to_cpu(proto) == ETH_P_8021AD)
6245                 is_double = true;
6246
6247         set_bit(vid, priv->active_vlans);
6248         ret = stmmac_vlan_update(priv, is_double);
6249         if (ret) {
6250                 clear_bit(vid, priv->active_vlans);
6251                 return ret;
6252         }
6253
6254         if (priv->hw->num_vlan) {
6255                 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6256                 if (ret)
6257                         return ret;
6258         }
6259
6260         return 0;
6261 }
6262
6263 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6264 {
6265         struct stmmac_priv *priv = netdev_priv(ndev);
6266         bool is_double = false;
6267         int ret;
6268
6269         ret = pm_runtime_get_sync(priv->device);
6270         if (ret < 0) {
6271                 pm_runtime_put_noidle(priv->device);
6272                 return ret;
6273         }
6274
6275         if (be16_to_cpu(proto) == ETH_P_8021AD)
6276                 is_double = true;
6277
6278         clear_bit(vid, priv->active_vlans);
6279
6280         if (priv->hw->num_vlan) {
6281                 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6282                 if (ret)
6283                         goto del_vlan_error;
6284         }
6285
6286         ret = stmmac_vlan_update(priv, is_double);
6287
6288 del_vlan_error:
6289         pm_runtime_put(priv->device);
6290
6291         return ret;
6292 }
6293
6294 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6295 {
6296         struct stmmac_priv *priv = netdev_priv(dev);
6297
6298         switch (bpf->command) {
6299         case XDP_SETUP_PROG:
6300                 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6301         case XDP_SETUP_XSK_POOL:
6302                 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6303                                              bpf->xsk.queue_id);
6304         default:
6305                 return -EOPNOTSUPP;
6306         }
6307 }
6308
6309 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6310                            struct xdp_frame **frames, u32 flags)
6311 {
6312         struct stmmac_priv *priv = netdev_priv(dev);
6313         int cpu = smp_processor_id();
6314         struct netdev_queue *nq;
6315         int i, nxmit = 0;
6316         int queue;
6317
6318         if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6319                 return -ENETDOWN;
6320
6321         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6322                 return -EINVAL;
6323
6324         queue = stmmac_xdp_get_tx_queue(priv, cpu);
6325         nq = netdev_get_tx_queue(priv->dev, queue);
6326
6327         __netif_tx_lock(nq, cpu);
6328         /* Avoids TX time-out as we are sharing with slow path */
6329         nq->trans_start = jiffies;
6330
6331         for (i = 0; i < num_frames; i++) {
6332                 int res;
6333
6334                 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6335                 if (res == STMMAC_XDP_CONSUMED)
6336                         break;
6337
6338                 nxmit++;
6339         }
6340
6341         if (flags & XDP_XMIT_FLUSH) {
6342                 stmmac_flush_tx_descriptors(priv, queue);
6343                 stmmac_tx_timer_arm(priv, queue);
6344         }
6345
6346         __netif_tx_unlock(nq);
6347
6348         return nxmit;
6349 }
6350
6351 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6352 {
6353         struct stmmac_channel *ch = &priv->channel[queue];
6354         unsigned long flags;
6355
6356         spin_lock_irqsave(&ch->lock, flags);
6357         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6358         spin_unlock_irqrestore(&ch->lock, flags);
6359
6360         stmmac_stop_rx_dma(priv, queue);
6361         __free_dma_rx_desc_resources(priv, queue);
6362 }
6363
6364 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6365 {
6366         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6367         struct stmmac_channel *ch = &priv->channel[queue];
6368         unsigned long flags;
6369         u32 buf_size;
6370         int ret;
6371
6372         ret = __alloc_dma_rx_desc_resources(priv, queue);
6373         if (ret) {
6374                 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6375                 return;
6376         }
6377
6378         ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
6379         if (ret) {
6380                 __free_dma_rx_desc_resources(priv, queue);
6381                 netdev_err(priv->dev, "Failed to init RX desc.\n");
6382                 return;
6383         }
6384
6385         stmmac_clear_rx_descriptors(priv, queue);
6386
6387         stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6388                             rx_q->dma_rx_phy, rx_q->queue_index);
6389
6390         rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6391                              sizeof(struct dma_desc));
6392         stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6393                                rx_q->rx_tail_addr, rx_q->queue_index);
6394
6395         if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6396                 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6397                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6398                                       buf_size,
6399                                       rx_q->queue_index);
6400         } else {
6401                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6402                                       priv->dma_buf_sz,
6403                                       rx_q->queue_index);
6404         }
6405
6406         stmmac_start_rx_dma(priv, queue);
6407
6408         spin_lock_irqsave(&ch->lock, flags);
6409         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6410         spin_unlock_irqrestore(&ch->lock, flags);
6411 }
6412
6413 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6414 {
6415         struct stmmac_channel *ch = &priv->channel[queue];
6416         unsigned long flags;
6417
6418         spin_lock_irqsave(&ch->lock, flags);
6419         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6420         spin_unlock_irqrestore(&ch->lock, flags);
6421
6422         stmmac_stop_tx_dma(priv, queue);
6423         __free_dma_tx_desc_resources(priv, queue);
6424 }
6425
6426 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6427 {
6428         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6429         struct stmmac_channel *ch = &priv->channel[queue];
6430         unsigned long flags;
6431         int ret;
6432
6433         ret = __alloc_dma_tx_desc_resources(priv, queue);
6434         if (ret) {
6435                 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6436                 return;
6437         }
6438
6439         ret = __init_dma_tx_desc_rings(priv, queue);
6440         if (ret) {
6441                 __free_dma_tx_desc_resources(priv, queue);
6442                 netdev_err(priv->dev, "Failed to init TX desc.\n");
6443                 return;
6444         }
6445
6446         stmmac_clear_tx_descriptors(priv, queue);
6447
6448         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6449                             tx_q->dma_tx_phy, tx_q->queue_index);
6450
6451         if (tx_q->tbs & STMMAC_TBS_AVAIL)
6452                 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6453
6454         tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6455         stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6456                                tx_q->tx_tail_addr, tx_q->queue_index);
6457
6458         stmmac_start_tx_dma(priv, queue);
6459
6460         spin_lock_irqsave(&ch->lock, flags);
6461         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6462         spin_unlock_irqrestore(&ch->lock, flags);
6463 }
6464
6465 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6466 {
6467         struct stmmac_priv *priv = netdev_priv(dev);
6468         struct stmmac_rx_queue *rx_q;
6469         struct stmmac_tx_queue *tx_q;
6470         struct stmmac_channel *ch;
6471
6472         if (test_bit(STMMAC_DOWN, &priv->state) ||
6473             !netif_carrier_ok(priv->dev))
6474                 return -ENETDOWN;
6475
6476         if (!stmmac_xdp_is_enabled(priv))
6477                 return -ENXIO;
6478
6479         if (queue >= priv->plat->rx_queues_to_use ||
6480             queue >= priv->plat->tx_queues_to_use)
6481                 return -EINVAL;
6482
6483         rx_q = &priv->rx_queue[queue];
6484         tx_q = &priv->tx_queue[queue];
6485         ch = &priv->channel[queue];
6486
6487         if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6488                 return -ENXIO;
6489
6490         if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6491                 /* EQoS does not have per-DMA channel SW interrupt,
6492                  * so we schedule RX Napi straight-away.
6493                  */
6494                 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6495                         __napi_schedule(&ch->rxtx_napi);
6496         }
6497
6498         return 0;
6499 }
6500
6501 static const struct net_device_ops stmmac_netdev_ops = {
6502         .ndo_open = stmmac_open,
6503         .ndo_start_xmit = stmmac_xmit,
6504         .ndo_stop = stmmac_release,
6505         .ndo_change_mtu = stmmac_change_mtu,
6506         .ndo_fix_features = stmmac_fix_features,
6507         .ndo_set_features = stmmac_set_features,
6508         .ndo_set_rx_mode = stmmac_set_rx_mode,
6509         .ndo_tx_timeout = stmmac_tx_timeout,
6510         .ndo_eth_ioctl = stmmac_ioctl,
6511         .ndo_setup_tc = stmmac_setup_tc,
6512         .ndo_select_queue = stmmac_select_queue,
6513 #ifdef CONFIG_NET_POLL_CONTROLLER
6514         .ndo_poll_controller = stmmac_poll_controller,
6515 #endif
6516         .ndo_set_mac_address = stmmac_set_mac_address,
6517         .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6518         .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6519         .ndo_bpf = stmmac_bpf,
6520         .ndo_xdp_xmit = stmmac_xdp_xmit,
6521         .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6522 };
6523
6524 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6525 {
6526         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6527                 return;
6528         if (test_bit(STMMAC_DOWN, &priv->state))
6529                 return;
6530
6531         netdev_err(priv->dev, "Reset adapter.\n");
6532
6533         rtnl_lock();
6534         netif_trans_update(priv->dev);
6535         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6536                 usleep_range(1000, 2000);
6537
6538         set_bit(STMMAC_DOWN, &priv->state);
6539         dev_close(priv->dev);
6540         dev_open(priv->dev, NULL);
6541         clear_bit(STMMAC_DOWN, &priv->state);
6542         clear_bit(STMMAC_RESETING, &priv->state);
6543         rtnl_unlock();
6544 }
6545
6546 static void stmmac_service_task(struct work_struct *work)
6547 {
6548         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6549                         service_task);
6550
6551         stmmac_reset_subtask(priv);
6552         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6553 }
6554
6555 /**
6556  *  stmmac_hw_init - Init the MAC device
6557  *  @priv: driver private structure
6558  *  Description: this function is to configure the MAC device according to
6559  *  some platform parameters or the HW capability register. It prepares the
6560  *  driver to use either ring or chain modes and to setup either enhanced or
6561  *  normal descriptors.
6562  */
6563 static int stmmac_hw_init(struct stmmac_priv *priv)
6564 {
6565         int ret;
6566
6567         /* dwmac-sun8i only work in chain mode */
6568         if (priv->plat->has_sun8i)
6569                 chain_mode = 1;
6570         priv->chain_mode = chain_mode;
6571
6572         /* Initialize HW Interface */
6573         ret = stmmac_hwif_init(priv);
6574         if (ret)
6575                 return ret;
6576
6577         /* Get the HW capability (new GMAC newer than 3.50a) */
6578         priv->hw_cap_support = stmmac_get_hw_features(priv);
6579         if (priv->hw_cap_support) {
6580                 dev_info(priv->device, "DMA HW capability register supported\n");
6581
6582                 /* We can override some gmac/dma configuration fields: e.g.
6583                  * enh_desc, tx_coe (e.g. that are passed through the
6584                  * platform) with the values from the HW capability
6585                  * register (if supported).
6586                  */
6587                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
6588                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6589                                 !priv->plat->use_phy_wol;
6590                 priv->hw->pmt = priv->plat->pmt;
6591                 if (priv->dma_cap.hash_tb_sz) {
6592                         priv->hw->multicast_filter_bins =
6593                                         (BIT(priv->dma_cap.hash_tb_sz) << 5);
6594                         priv->hw->mcast_bits_log2 =
6595                                         ilog2(priv->hw->multicast_filter_bins);
6596                 }
6597
6598                 /* TXCOE doesn't work in thresh DMA mode */
6599                 if (priv->plat->force_thresh_dma_mode)
6600                         priv->plat->tx_coe = 0;
6601                 else
6602                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
6603
6604                 /* In case of GMAC4 rx_coe is from HW cap register. */
6605                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
6606
6607                 if (priv->dma_cap.rx_coe_type2)
6608                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6609                 else if (priv->dma_cap.rx_coe_type1)
6610                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6611
6612         } else {
6613                 dev_info(priv->device, "No HW DMA feature register supported\n");
6614         }
6615
6616         if (priv->plat->rx_coe) {
6617                 priv->hw->rx_csum = priv->plat->rx_coe;
6618                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6619                 if (priv->synopsys_id < DWMAC_CORE_4_00)
6620                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6621         }
6622         if (priv->plat->tx_coe)
6623                 dev_info(priv->device, "TX Checksum insertion supported\n");
6624
6625         if (priv->plat->pmt) {
6626                 dev_info(priv->device, "Wake-Up On Lan supported\n");
6627                 device_set_wakeup_capable(priv->device, 1);
6628         }
6629
6630         if (priv->dma_cap.tsoen)
6631                 dev_info(priv->device, "TSO supported\n");
6632
6633         priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6634         priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6635
6636         /* Run HW quirks, if any */
6637         if (priv->hwif_quirks) {
6638                 ret = priv->hwif_quirks(priv);
6639                 if (ret)
6640                         return ret;
6641         }
6642
6643         /* Rx Watchdog is available in the COREs newer than the 3.40.
6644          * In some case, for example on bugged HW this feature
6645          * has to be disable and this can be done by passing the
6646          * riwt_off field from the platform.
6647          */
6648         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6649             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6650                 priv->use_riwt = 1;
6651                 dev_info(priv->device,
6652                          "Enable RX Mitigation via HW Watchdog Timer\n");
6653         }
6654
6655         return 0;
6656 }
6657
6658 static void stmmac_napi_add(struct net_device *dev)
6659 {
6660         struct stmmac_priv *priv = netdev_priv(dev);
6661         u32 queue, maxq;
6662
6663         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6664
6665         for (queue = 0; queue < maxq; queue++) {
6666                 struct stmmac_channel *ch = &priv->channel[queue];
6667
6668                 ch->priv_data = priv;
6669                 ch->index = queue;
6670                 spin_lock_init(&ch->lock);
6671
6672                 if (queue < priv->plat->rx_queues_to_use) {
6673                         netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
6674                                        NAPI_POLL_WEIGHT);
6675                 }
6676                 if (queue < priv->plat->tx_queues_to_use) {
6677                         netif_tx_napi_add(dev, &ch->tx_napi,
6678                                           stmmac_napi_poll_tx,
6679                                           NAPI_POLL_WEIGHT);
6680                 }
6681                 if (queue < priv->plat->rx_queues_to_use &&
6682                     queue < priv->plat->tx_queues_to_use) {
6683                         netif_napi_add(dev, &ch->rxtx_napi,
6684                                        stmmac_napi_poll_rxtx,
6685                                        NAPI_POLL_WEIGHT);
6686                 }
6687         }
6688 }
6689
6690 static void stmmac_napi_del(struct net_device *dev)
6691 {
6692         struct stmmac_priv *priv = netdev_priv(dev);
6693         u32 queue, maxq;
6694
6695         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6696
6697         for (queue = 0; queue < maxq; queue++) {
6698                 struct stmmac_channel *ch = &priv->channel[queue];
6699
6700                 if (queue < priv->plat->rx_queues_to_use)
6701                         netif_napi_del(&ch->rx_napi);
6702                 if (queue < priv->plat->tx_queues_to_use)
6703                         netif_napi_del(&ch->tx_napi);
6704                 if (queue < priv->plat->rx_queues_to_use &&
6705                     queue < priv->plat->tx_queues_to_use) {
6706                         netif_napi_del(&ch->rxtx_napi);
6707                 }
6708         }
6709 }
6710
6711 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6712 {
6713         struct stmmac_priv *priv = netdev_priv(dev);
6714         int ret = 0;
6715
6716         if (netif_running(dev))
6717                 stmmac_release(dev);
6718
6719         stmmac_napi_del(dev);
6720
6721         priv->plat->rx_queues_to_use = rx_cnt;
6722         priv->plat->tx_queues_to_use = tx_cnt;
6723
6724         stmmac_napi_add(dev);
6725
6726         if (netif_running(dev))
6727                 ret = stmmac_open(dev);
6728
6729         return ret;
6730 }
6731
6732 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6733 {
6734         struct stmmac_priv *priv = netdev_priv(dev);
6735         int ret = 0;
6736
6737         if (netif_running(dev))
6738                 stmmac_release(dev);
6739
6740         priv->dma_rx_size = rx_size;
6741         priv->dma_tx_size = tx_size;
6742
6743         if (netif_running(dev))
6744                 ret = stmmac_open(dev);
6745
6746         return ret;
6747 }
6748
6749 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6750 static void stmmac_fpe_lp_task(struct work_struct *work)
6751 {
6752         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6753                                                 fpe_task);
6754         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6755         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6756         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6757         bool *hs_enable = &fpe_cfg->hs_enable;
6758         bool *enable = &fpe_cfg->enable;
6759         int retries = 20;
6760
6761         while (retries-- > 0) {
6762                 /* Bail out immediately if FPE handshake is OFF */
6763                 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6764                         break;
6765
6766                 if (*lo_state == FPE_STATE_ENTERING_ON &&
6767                     *lp_state == FPE_STATE_ENTERING_ON) {
6768                         stmmac_fpe_configure(priv, priv->ioaddr,
6769                                              priv->plat->tx_queues_to_use,
6770                                              priv->plat->rx_queues_to_use,
6771                                              *enable);
6772
6773                         netdev_info(priv->dev, "configured FPE\n");
6774
6775                         *lo_state = FPE_STATE_ON;
6776                         *lp_state = FPE_STATE_ON;
6777                         netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
6778                         break;
6779                 }
6780
6781                 if ((*lo_state == FPE_STATE_CAPABLE ||
6782                      *lo_state == FPE_STATE_ENTERING_ON) &&
6783                      *lp_state != FPE_STATE_ON) {
6784                         netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
6785                                     *lo_state, *lp_state);
6786                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6787                                                 MPACKET_VERIFY);
6788                 }
6789                 /* Sleep then retry */
6790                 msleep(500);
6791         }
6792
6793         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
6794 }
6795
6796 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
6797 {
6798         if (priv->plat->fpe_cfg->hs_enable != enable) {
6799                 if (enable) {
6800                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6801                                                 MPACKET_VERIFY);
6802                 } else {
6803                         priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
6804                         priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
6805                 }
6806
6807                 priv->plat->fpe_cfg->hs_enable = enable;
6808         }
6809 }
6810
6811 /**
6812  * stmmac_dvr_probe
6813  * @device: device pointer
6814  * @plat_dat: platform data pointer
6815  * @res: stmmac resource pointer
6816  * Description: this is the main probe function used to
6817  * call the alloc_etherdev, allocate the priv structure.
6818  * Return:
6819  * returns 0 on success, otherwise errno.
6820  */
6821 int stmmac_dvr_probe(struct device *device,
6822                      struct plat_stmmacenet_data *plat_dat,
6823                      struct stmmac_resources *res)
6824 {
6825         struct net_device *ndev = NULL;
6826         struct stmmac_priv *priv;
6827         u32 rxq;
6828         int i, ret = 0;
6829
6830         ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
6831                                        MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
6832         if (!ndev)
6833                 return -ENOMEM;
6834
6835         SET_NETDEV_DEV(ndev, device);
6836
6837         priv = netdev_priv(ndev);
6838         priv->device = device;
6839         priv->dev = ndev;
6840
6841         stmmac_set_ethtool_ops(ndev);
6842         priv->pause = pause;
6843         priv->plat = plat_dat;
6844         priv->ioaddr = res->addr;
6845         priv->dev->base_addr = (unsigned long)res->addr;
6846         priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
6847
6848         priv->dev->irq = res->irq;
6849         priv->wol_irq = res->wol_irq;
6850         priv->lpi_irq = res->lpi_irq;
6851         priv->sfty_ce_irq = res->sfty_ce_irq;
6852         priv->sfty_ue_irq = res->sfty_ue_irq;
6853         for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
6854                 priv->rx_irq[i] = res->rx_irq[i];
6855         for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
6856                 priv->tx_irq[i] = res->tx_irq[i];
6857
6858         if (!is_zero_ether_addr(res->mac))
6859                 eth_hw_addr_set(priv->dev, res->mac);
6860
6861         dev_set_drvdata(device, priv->dev);
6862
6863         /* Verify driver arguments */
6864         stmmac_verify_args();
6865
6866         priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
6867         if (!priv->af_xdp_zc_qps)
6868                 return -ENOMEM;
6869
6870         /* Allocate workqueue */
6871         priv->wq = create_singlethread_workqueue("stmmac_wq");
6872         if (!priv->wq) {
6873                 dev_err(priv->device, "failed to create workqueue\n");
6874                 return -ENOMEM;
6875         }
6876
6877         INIT_WORK(&priv->service_task, stmmac_service_task);
6878
6879         /* Initialize Link Partner FPE workqueue */
6880         INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
6881
6882         /* Override with kernel parameters if supplied XXX CRS XXX
6883          * this needs to have multiple instances
6884          */
6885         if ((phyaddr >= 0) && (phyaddr <= 31))
6886                 priv->plat->phy_addr = phyaddr;
6887
6888         if (priv->plat->stmmac_rst) {
6889                 ret = reset_control_assert(priv->plat->stmmac_rst);
6890                 reset_control_deassert(priv->plat->stmmac_rst);
6891                 /* Some reset controllers have only reset callback instead of
6892                  * assert + deassert callbacks pair.
6893                  */
6894                 if (ret == -ENOTSUPP)
6895                         reset_control_reset(priv->plat->stmmac_rst);
6896         }
6897
6898         ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
6899         if (ret == -ENOTSUPP)
6900                 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
6901                         ERR_PTR(ret));
6902
6903         /* Init MAC and get the capabilities */
6904         ret = stmmac_hw_init(priv);
6905         if (ret)
6906                 goto error_hw_init;
6907
6908         /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
6909          */
6910         if (priv->synopsys_id < DWMAC_CORE_5_20)
6911                 priv->plat->dma_cfg->dche = false;
6912
6913         stmmac_check_ether_addr(priv);
6914
6915         ndev->netdev_ops = &stmmac_netdev_ops;
6916
6917         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6918                             NETIF_F_RXCSUM;
6919
6920         ret = stmmac_tc_init(priv, priv);
6921         if (!ret) {
6922                 ndev->hw_features |= NETIF_F_HW_TC;
6923         }
6924
6925         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
6926                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
6927                 if (priv->plat->has_gmac4)
6928                         ndev->hw_features |= NETIF_F_GSO_UDP_L4;
6929                 priv->tso = true;
6930                 dev_info(priv->device, "TSO feature enabled\n");
6931         }
6932
6933         if (priv->dma_cap.sphen) {
6934                 ndev->hw_features |= NETIF_F_GRO;
6935                 priv->sph_cap = true;
6936                 priv->sph = priv->sph_cap;
6937                 dev_info(priv->device, "SPH feature enabled\n");
6938         }
6939
6940         /* The current IP register MAC_HW_Feature1[ADDR64] only define
6941          * 32/40/64 bit width, but some SOC support others like i.MX8MP
6942          * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
6943          * So overwrite dma_cap.addr64 according to HW real design.
6944          */
6945         if (priv->plat->addr64)
6946                 priv->dma_cap.addr64 = priv->plat->addr64;
6947
6948         if (priv->dma_cap.addr64) {
6949                 ret = dma_set_mask_and_coherent(device,
6950                                 DMA_BIT_MASK(priv->dma_cap.addr64));
6951                 if (!ret) {
6952                         dev_info(priv->device, "Using %d bits DMA width\n",
6953                                  priv->dma_cap.addr64);
6954
6955                         /*
6956                          * If more than 32 bits can be addressed, make sure to
6957                          * enable enhanced addressing mode.
6958                          */
6959                         if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
6960                                 priv->plat->dma_cfg->eame = true;
6961                 } else {
6962                         ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
6963                         if (ret) {
6964                                 dev_err(priv->device, "Failed to set DMA Mask\n");
6965                                 goto error_hw_init;
6966                         }
6967
6968                         priv->dma_cap.addr64 = 32;
6969                 }
6970         }
6971
6972         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
6973         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
6974 #ifdef STMMAC_VLAN_TAG_USED
6975         /* Both mac100 and gmac support receive VLAN tag detection */
6976         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
6977         if (priv->dma_cap.vlhash) {
6978                 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6979                 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
6980         }
6981         if (priv->dma_cap.vlins) {
6982                 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
6983                 if (priv->dma_cap.dvlan)
6984                         ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
6985         }
6986 #endif
6987         priv->msg_enable = netif_msg_init(debug, default_msg_level);
6988
6989         /* Initialize RSS */
6990         rxq = priv->plat->rx_queues_to_use;
6991         netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
6992         for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
6993                 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
6994
6995         if (priv->dma_cap.rssen && priv->plat->rss_en)
6996                 ndev->features |= NETIF_F_RXHASH;
6997
6998         /* MTU range: 46 - hw-specific max */
6999         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7000         if (priv->plat->has_xgmac)
7001                 ndev->max_mtu = XGMAC_JUMBO_LEN;
7002         else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7003                 ndev->max_mtu = JUMBO_LEN;
7004         else
7005                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7006         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7007          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7008          */
7009         if ((priv->plat->maxmtu < ndev->max_mtu) &&
7010             (priv->plat->maxmtu >= ndev->min_mtu))
7011                 ndev->max_mtu = priv->plat->maxmtu;
7012         else if (priv->plat->maxmtu < ndev->min_mtu)
7013                 dev_warn(priv->device,
7014                          "%s: warning: maxmtu having invalid value (%d)\n",
7015                          __func__, priv->plat->maxmtu);
7016
7017         if (flow_ctrl)
7018                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
7019
7020         /* Setup channels NAPI */
7021         stmmac_napi_add(ndev);
7022
7023         mutex_init(&priv->lock);
7024
7025         /* If a specific clk_csr value is passed from the platform
7026          * this means that the CSR Clock Range selection cannot be
7027          * changed at run-time and it is fixed. Viceversa the driver'll try to
7028          * set the MDC clock dynamically according to the csr actual
7029          * clock input.
7030          */
7031         if (priv->plat->clk_csr >= 0)
7032                 priv->clk_csr = priv->plat->clk_csr;
7033         else
7034                 stmmac_clk_csr_set(priv);
7035
7036         stmmac_check_pcs_mode(priv);
7037
7038         pm_runtime_get_noresume(device);
7039         pm_runtime_set_active(device);
7040         pm_runtime_enable(device);
7041
7042         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7043             priv->hw->pcs != STMMAC_PCS_RTBI) {
7044                 /* MDIO bus Registration */
7045                 ret = stmmac_mdio_register(ndev);
7046                 if (ret < 0) {
7047                         dev_err(priv->device,
7048                                 "%s: MDIO bus (id: %d) registration failed",
7049                                 __func__, priv->plat->bus_id);
7050                         goto error_mdio_register;
7051                 }
7052         }
7053
7054         if (priv->plat->speed_mode_2500)
7055                 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7056
7057         if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7058                 ret = stmmac_xpcs_setup(priv->mii);
7059                 if (ret)
7060                         goto error_xpcs_setup;
7061         }
7062
7063         ret = stmmac_phy_setup(priv);
7064         if (ret) {
7065                 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7066                 goto error_phy_setup;
7067         }
7068
7069         ret = register_netdev(ndev);
7070         if (ret) {
7071                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7072                         __func__, ret);
7073                 goto error_netdev_register;
7074         }
7075
7076         if (priv->plat->serdes_powerup) {
7077                 ret = priv->plat->serdes_powerup(ndev,
7078                                                  priv->plat->bsp_priv);
7079
7080                 if (ret < 0)
7081                         goto error_serdes_powerup;
7082         }
7083
7084 #ifdef CONFIG_DEBUG_FS
7085         stmmac_init_fs(ndev);
7086 #endif
7087
7088         /* Let pm_runtime_put() disable the clocks.
7089          * If CONFIG_PM is not enabled, the clocks will stay powered.
7090          */
7091         pm_runtime_put(device);
7092
7093         return ret;
7094
7095 error_serdes_powerup:
7096         unregister_netdev(ndev);
7097 error_netdev_register:
7098         phylink_destroy(priv->phylink);
7099 error_xpcs_setup:
7100 error_phy_setup:
7101         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7102             priv->hw->pcs != STMMAC_PCS_RTBI)
7103                 stmmac_mdio_unregister(ndev);
7104 error_mdio_register:
7105         stmmac_napi_del(ndev);
7106 error_hw_init:
7107         destroy_workqueue(priv->wq);
7108         bitmap_free(priv->af_xdp_zc_qps);
7109
7110         return ret;
7111 }
7112 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7113
7114 /**
7115  * stmmac_dvr_remove
7116  * @dev: device pointer
7117  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7118  * changes the link status, releases the DMA descriptor rings.
7119  */
7120 int stmmac_dvr_remove(struct device *dev)
7121 {
7122         struct net_device *ndev = dev_get_drvdata(dev);
7123         struct stmmac_priv *priv = netdev_priv(ndev);
7124
7125         netdev_info(priv->dev, "%s: removing driver", __func__);
7126
7127         stmmac_stop_all_dma(priv);
7128         stmmac_mac_set(priv, priv->ioaddr, false);
7129         netif_carrier_off(ndev);
7130         unregister_netdev(ndev);
7131
7132         /* Serdes power down needs to happen after VLAN filter
7133          * is deleted that is triggered by unregister_netdev().
7134          */
7135         if (priv->plat->serdes_powerdown)
7136                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7137
7138 #ifdef CONFIG_DEBUG_FS
7139         stmmac_exit_fs(ndev);
7140 #endif
7141         phylink_destroy(priv->phylink);
7142         if (priv->plat->stmmac_rst)
7143                 reset_control_assert(priv->plat->stmmac_rst);
7144         reset_control_assert(priv->plat->stmmac_ahb_rst);
7145         pm_runtime_put(dev);
7146         pm_runtime_disable(dev);
7147         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7148             priv->hw->pcs != STMMAC_PCS_RTBI)
7149                 stmmac_mdio_unregister(ndev);
7150         destroy_workqueue(priv->wq);
7151         mutex_destroy(&priv->lock);
7152         bitmap_free(priv->af_xdp_zc_qps);
7153
7154         return 0;
7155 }
7156 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7157
7158 /**
7159  * stmmac_suspend - suspend callback
7160  * @dev: device pointer
7161  * Description: this is the function to suspend the device and it is called
7162  * by the platform driver to stop the network queue, release the resources,
7163  * program the PMT register (for WoL), clean and release driver resources.
7164  */
7165 int stmmac_suspend(struct device *dev)
7166 {
7167         struct net_device *ndev = dev_get_drvdata(dev);
7168         struct stmmac_priv *priv = netdev_priv(ndev);
7169         u32 chan;
7170
7171         if (!ndev || !netif_running(ndev))
7172                 return 0;
7173
7174         mutex_lock(&priv->lock);
7175
7176         netif_device_detach(ndev);
7177
7178         stmmac_disable_all_queues(priv);
7179
7180         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7181                 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
7182
7183         if (priv->eee_enabled) {
7184                 priv->tx_path_in_lpi_mode = false;
7185                 del_timer_sync(&priv->eee_ctrl_timer);
7186         }
7187
7188         /* Stop TX/RX DMA */
7189         stmmac_stop_all_dma(priv);
7190
7191         if (priv->plat->serdes_powerdown)
7192                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7193
7194         /* Enable Power down mode by programming the PMT regs */
7195         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7196                 stmmac_pmt(priv, priv->hw, priv->wolopts);
7197                 priv->irq_wake = 1;
7198         } else {
7199                 stmmac_mac_set(priv, priv->ioaddr, false);
7200                 pinctrl_pm_select_sleep_state(priv->device);
7201         }
7202
7203         mutex_unlock(&priv->lock);
7204
7205         rtnl_lock();
7206         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7207                 phylink_suspend(priv->phylink, true);
7208         } else {
7209                 if (device_may_wakeup(priv->device))
7210                         phylink_speed_down(priv->phylink, false);
7211                 phylink_suspend(priv->phylink, false);
7212         }
7213         rtnl_unlock();
7214
7215         if (priv->dma_cap.fpesel) {
7216                 /* Disable FPE */
7217                 stmmac_fpe_configure(priv, priv->ioaddr,
7218                                      priv->plat->tx_queues_to_use,
7219                                      priv->plat->rx_queues_to_use, false);
7220
7221                 stmmac_fpe_handshake(priv, false);
7222                 stmmac_fpe_stop_wq(priv);
7223         }
7224
7225         priv->speed = SPEED_UNKNOWN;
7226         return 0;
7227 }
7228 EXPORT_SYMBOL_GPL(stmmac_suspend);
7229
7230 /**
7231  * stmmac_reset_queues_param - reset queue parameters
7232  * @priv: device pointer
7233  */
7234 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7235 {
7236         u32 rx_cnt = priv->plat->rx_queues_to_use;
7237         u32 tx_cnt = priv->plat->tx_queues_to_use;
7238         u32 queue;
7239
7240         for (queue = 0; queue < rx_cnt; queue++) {
7241                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
7242
7243                 rx_q->cur_rx = 0;
7244                 rx_q->dirty_rx = 0;
7245         }
7246
7247         for (queue = 0; queue < tx_cnt; queue++) {
7248                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
7249
7250                 tx_q->cur_tx = 0;
7251                 tx_q->dirty_tx = 0;
7252                 tx_q->mss = 0;
7253
7254                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7255         }
7256 }
7257
7258 /**
7259  * stmmac_resume - resume callback
7260  * @dev: device pointer
7261  * Description: when resume this function is invoked to setup the DMA and CORE
7262  * in a usable state.
7263  */
7264 int stmmac_resume(struct device *dev)
7265 {
7266         struct net_device *ndev = dev_get_drvdata(dev);
7267         struct stmmac_priv *priv = netdev_priv(ndev);
7268         int ret;
7269
7270         if (!netif_running(ndev))
7271                 return 0;
7272
7273         /* Power Down bit, into the PM register, is cleared
7274          * automatically as soon as a magic packet or a Wake-up frame
7275          * is received. Anyway, it's better to manually clear
7276          * this bit because it can generate problems while resuming
7277          * from another devices (e.g. serial console).
7278          */
7279         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7280                 mutex_lock(&priv->lock);
7281                 stmmac_pmt(priv, priv->hw, 0);
7282                 mutex_unlock(&priv->lock);
7283                 priv->irq_wake = 0;
7284         } else {
7285                 pinctrl_pm_select_default_state(priv->device);
7286                 /* reset the phy so that it's ready */
7287                 if (priv->mii)
7288                         stmmac_mdio_reset(priv->mii);
7289         }
7290
7291         if (priv->plat->serdes_powerup) {
7292                 ret = priv->plat->serdes_powerup(ndev,
7293                                                  priv->plat->bsp_priv);
7294
7295                 if (ret < 0)
7296                         return ret;
7297         }
7298
7299         rtnl_lock();
7300         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7301                 phylink_resume(priv->phylink);
7302         } else {
7303                 phylink_resume(priv->phylink);
7304                 if (device_may_wakeup(priv->device))
7305                         phylink_speed_up(priv->phylink);
7306         }
7307         rtnl_unlock();
7308
7309         rtnl_lock();
7310         mutex_lock(&priv->lock);
7311
7312         stmmac_reset_queues_param(priv);
7313
7314         stmmac_free_tx_skbufs(priv);
7315         stmmac_clear_descriptors(priv);
7316
7317         stmmac_hw_setup(ndev, false);
7318         stmmac_init_coalesce(priv);
7319         stmmac_set_rx_mode(ndev);
7320
7321         stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7322
7323         stmmac_enable_all_queues(priv);
7324
7325         mutex_unlock(&priv->lock);
7326         rtnl_unlock();
7327
7328         netif_device_attach(ndev);
7329
7330         return 0;
7331 }
7332 EXPORT_SYMBOL_GPL(stmmac_resume);
7333
7334 #ifndef MODULE
7335 static int __init stmmac_cmdline_opt(char *str)
7336 {
7337         char *opt;
7338
7339         if (!str || !*str)
7340                 return -EINVAL;
7341         while ((opt = strsep(&str, ",")) != NULL) {
7342                 if (!strncmp(opt, "debug:", 6)) {
7343                         if (kstrtoint(opt + 6, 0, &debug))
7344                                 goto err;
7345                 } else if (!strncmp(opt, "phyaddr:", 8)) {
7346                         if (kstrtoint(opt + 8, 0, &phyaddr))
7347                                 goto err;
7348                 } else if (!strncmp(opt, "buf_sz:", 7)) {
7349                         if (kstrtoint(opt + 7, 0, &buf_sz))
7350                                 goto err;
7351                 } else if (!strncmp(opt, "tc:", 3)) {
7352                         if (kstrtoint(opt + 3, 0, &tc))
7353                                 goto err;
7354                 } else if (!strncmp(opt, "watchdog:", 9)) {
7355                         if (kstrtoint(opt + 9, 0, &watchdog))
7356                                 goto err;
7357                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7358                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
7359                                 goto err;
7360                 } else if (!strncmp(opt, "pause:", 6)) {
7361                         if (kstrtoint(opt + 6, 0, &pause))
7362                                 goto err;
7363                 } else if (!strncmp(opt, "eee_timer:", 10)) {
7364                         if (kstrtoint(opt + 10, 0, &eee_timer))
7365                                 goto err;
7366                 } else if (!strncmp(opt, "chain_mode:", 11)) {
7367                         if (kstrtoint(opt + 11, 0, &chain_mode))
7368                                 goto err;
7369                 }
7370         }
7371         return 0;
7372
7373 err:
7374         pr_err("%s: ERROR broken module parameter conversion", __func__);
7375         return -EINVAL;
7376 }
7377
7378 __setup("stmmaceth=", stmmac_cmdline_opt);
7379 #endif /* MODULE */
7380
7381 static int __init stmmac_init(void)
7382 {
7383 #ifdef CONFIG_DEBUG_FS
7384         /* Create debugfs main directory if it doesn't exist yet */
7385         if (!stmmac_fs_dir)
7386                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7387         register_netdevice_notifier(&stmmac_notifier);
7388 #endif
7389
7390         return 0;
7391 }
7392
7393 static void __exit stmmac_exit(void)
7394 {
7395 #ifdef CONFIG_DEBUG_FS
7396         unregister_netdevice_notifier(&stmmac_notifier);
7397         debugfs_remove_recursive(stmmac_fs_dir);
7398 #endif
7399 }
7400
7401 module_init(stmmac_init)
7402 module_exit(stmmac_exit)
7403
7404 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7405 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7406 MODULE_LICENSE("GPL");