18c7ca29da2cd6dcedaa66fdf10f4c49390bd1bc
[linux-2.6-microblaze.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5
6         Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11   Documentation available at:
12         http://www.stlinux.com
13   Support available at:
14         https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
45 #include "stmmac.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
50 #include "dwxgmac2.h"
51 #include "hwif.h"
52
53 /* As long as the interface is active, we keep the timestamping counter enabled
54  * with fine resolution and binary rollover. This avoid non-monotonic behavior
55  * (clock jumps) when changing timestamping settings at runtime.
56  */
57 #define STMMAC_HWTS_ACTIVE      (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58                                  PTP_TCR_TSCTRLSSR)
59
60 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
62
63 /* Module parameters */
64 #define TX_TIMEO        5000
65 static int watchdog = TX_TIMEO;
66 module_param(watchdog, int, 0644);
67 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
68
69 static int debug = -1;
70 module_param(debug, int, 0644);
71 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
72
73 static int phyaddr = -1;
74 module_param(phyaddr, int, 0444);
75 MODULE_PARM_DESC(phyaddr, "Physical device address");
76
77 #define STMMAC_TX_THRESH(x)     ((x)->dma_conf.dma_tx_size / 4)
78 #define STMMAC_RX_THRESH(x)     ((x)->dma_conf.dma_rx_size / 4)
79
80 /* Limit to make sure XDP TX and slow path can coexist */
81 #define STMMAC_XSK_TX_BUDGET_MAX        256
82 #define STMMAC_TX_XSK_AVAIL             16
83 #define STMMAC_RX_FILL_BATCH            16
84
85 #define STMMAC_XDP_PASS         0
86 #define STMMAC_XDP_CONSUMED     BIT(0)
87 #define STMMAC_XDP_TX           BIT(1)
88 #define STMMAC_XDP_REDIRECT     BIT(2)
89
90 static int flow_ctrl = FLOW_AUTO;
91 module_param(flow_ctrl, int, 0644);
92 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
93
94 static int pause = PAUSE_TIME;
95 module_param(pause, int, 0644);
96 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
97
98 #define TC_DEFAULT 64
99 static int tc = TC_DEFAULT;
100 module_param(tc, int, 0644);
101 MODULE_PARM_DESC(tc, "DMA threshold control value");
102
103 #define DEFAULT_BUFSIZE 1536
104 static int buf_sz = DEFAULT_BUFSIZE;
105 module_param(buf_sz, int, 0644);
106 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
107
108 #define STMMAC_RX_COPYBREAK     256
109
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
112                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
114 #define STMMAC_DEFAULT_LPI_TIMER        1000
115 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, int, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121  * but allow user to force to use the chain instead of the ring
122  */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
139                                           u32 rxmode, u32 chan);
140
141 #ifdef CONFIG_DEBUG_FS
142 static const struct net_device_ops stmmac_netdev_ops;
143 static void stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(struct net_device *dev);
145 #endif
146
147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
148
149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
150 {
151         int ret = 0;
152
153         if (enabled) {
154                 ret = clk_prepare_enable(priv->plat->stmmac_clk);
155                 if (ret)
156                         return ret;
157                 ret = clk_prepare_enable(priv->plat->pclk);
158                 if (ret) {
159                         clk_disable_unprepare(priv->plat->stmmac_clk);
160                         return ret;
161                 }
162                 if (priv->plat->clks_config) {
163                         ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164                         if (ret) {
165                                 clk_disable_unprepare(priv->plat->stmmac_clk);
166                                 clk_disable_unprepare(priv->plat->pclk);
167                                 return ret;
168                         }
169                 }
170         } else {
171                 clk_disable_unprepare(priv->plat->stmmac_clk);
172                 clk_disable_unprepare(priv->plat->pclk);
173                 if (priv->plat->clks_config)
174                         priv->plat->clks_config(priv->plat->bsp_priv, enabled);
175         }
176
177         return ret;
178 }
179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
180
181 /**
182  * stmmac_verify_args - verify the driver parameters.
183  * Description: it checks the driver parameters and set a default in case of
184  * errors.
185  */
186 static void stmmac_verify_args(void)
187 {
188         if (unlikely(watchdog < 0))
189                 watchdog = TX_TIMEO;
190         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
191                 buf_sz = DEFAULT_BUFSIZE;
192         if (unlikely(flow_ctrl > 1))
193                 flow_ctrl = FLOW_AUTO;
194         else if (likely(flow_ctrl < 0))
195                 flow_ctrl = FLOW_OFF;
196         if (unlikely((pause < 0) || (pause > 0xffff)))
197                 pause = PAUSE_TIME;
198         if (eee_timer < 0)
199                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
200 }
201
202 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
203 {
204         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
205         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
207         u32 queue;
208
209         for (queue = 0; queue < maxq; queue++) {
210                 struct stmmac_channel *ch = &priv->channel[queue];
211
212                 if (stmmac_xdp_is_enabled(priv) &&
213                     test_bit(queue, priv->af_xdp_zc_qps)) {
214                         napi_disable(&ch->rxtx_napi);
215                         continue;
216                 }
217
218                 if (queue < rx_queues_cnt)
219                         napi_disable(&ch->rx_napi);
220                 if (queue < tx_queues_cnt)
221                         napi_disable(&ch->tx_napi);
222         }
223 }
224
225 /**
226  * stmmac_disable_all_queues - Disable all queues
227  * @priv: driver private structure
228  */
229 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
230 {
231         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232         struct stmmac_rx_queue *rx_q;
233         u32 queue;
234
235         /* synchronize_rcu() needed for pending XDP buffers to drain */
236         for (queue = 0; queue < rx_queues_cnt; queue++) {
237                 rx_q = &priv->dma_conf.rx_queue[queue];
238                 if (rx_q->xsk_pool) {
239                         synchronize_rcu();
240                         break;
241                 }
242         }
243
244         __stmmac_disable_all_queues(priv);
245 }
246
247 /**
248  * stmmac_enable_all_queues - Enable all queues
249  * @priv: driver private structure
250  */
251 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
252 {
253         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
254         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
255         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
256         u32 queue;
257
258         for (queue = 0; queue < maxq; queue++) {
259                 struct stmmac_channel *ch = &priv->channel[queue];
260
261                 if (stmmac_xdp_is_enabled(priv) &&
262                     test_bit(queue, priv->af_xdp_zc_qps)) {
263                         napi_enable(&ch->rxtx_napi);
264                         continue;
265                 }
266
267                 if (queue < rx_queues_cnt)
268                         napi_enable(&ch->rx_napi);
269                 if (queue < tx_queues_cnt)
270                         napi_enable(&ch->tx_napi);
271         }
272 }
273
274 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
275 {
276         if (!test_bit(STMMAC_DOWN, &priv->state) &&
277             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
278                 queue_work(priv->wq, &priv->service_task);
279 }
280
281 static void stmmac_global_err(struct stmmac_priv *priv)
282 {
283         netif_carrier_off(priv->dev);
284         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
285         stmmac_service_event_schedule(priv);
286 }
287
288 /**
289  * stmmac_clk_csr_set - dynamically set the MDC clock
290  * @priv: driver private structure
291  * Description: this is to dynamically set the MDC clock according to the csr
292  * clock input.
293  * Note:
294  *      If a specific clk_csr value is passed from the platform
295  *      this means that the CSR Clock Range selection cannot be
296  *      changed at run-time and it is fixed (as reported in the driver
297  *      documentation). Viceversa the driver will try to set the MDC
298  *      clock dynamically according to the actual clock input.
299  */
300 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
301 {
302         u32 clk_rate;
303
304         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
305
306         /* Platform provided default clk_csr would be assumed valid
307          * for all other cases except for the below mentioned ones.
308          * For values higher than the IEEE 802.3 specified frequency
309          * we can not estimate the proper divider as it is not known
310          * the frequency of clk_csr_i. So we do not change the default
311          * divider.
312          */
313         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
314                 if (clk_rate < CSR_F_35M)
315                         priv->clk_csr = STMMAC_CSR_20_35M;
316                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
317                         priv->clk_csr = STMMAC_CSR_35_60M;
318                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
319                         priv->clk_csr = STMMAC_CSR_60_100M;
320                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
321                         priv->clk_csr = STMMAC_CSR_100_150M;
322                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
323                         priv->clk_csr = STMMAC_CSR_150_250M;
324                 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
325                         priv->clk_csr = STMMAC_CSR_250_300M;
326         }
327
328         if (priv->plat->has_sun8i) {
329                 if (clk_rate > 160000000)
330                         priv->clk_csr = 0x03;
331                 else if (clk_rate > 80000000)
332                         priv->clk_csr = 0x02;
333                 else if (clk_rate > 40000000)
334                         priv->clk_csr = 0x01;
335                 else
336                         priv->clk_csr = 0;
337         }
338
339         if (priv->plat->has_xgmac) {
340                 if (clk_rate > 400000000)
341                         priv->clk_csr = 0x5;
342                 else if (clk_rate > 350000000)
343                         priv->clk_csr = 0x4;
344                 else if (clk_rate > 300000000)
345                         priv->clk_csr = 0x3;
346                 else if (clk_rate > 250000000)
347                         priv->clk_csr = 0x2;
348                 else if (clk_rate > 150000000)
349                         priv->clk_csr = 0x1;
350                 else
351                         priv->clk_csr = 0x0;
352         }
353 }
354
355 static void print_pkt(unsigned char *buf, int len)
356 {
357         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
358         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
359 }
360
361 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
362 {
363         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
364         u32 avail;
365
366         if (tx_q->dirty_tx > tx_q->cur_tx)
367                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
368         else
369                 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
370
371         return avail;
372 }
373
374 /**
375  * stmmac_rx_dirty - Get RX queue dirty
376  * @priv: driver private structure
377  * @queue: RX queue index
378  */
379 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
380 {
381         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
382         u32 dirty;
383
384         if (rx_q->dirty_rx <= rx_q->cur_rx)
385                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
386         else
387                 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
388
389         return dirty;
390 }
391
392 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
393 {
394         int tx_lpi_timer;
395
396         /* Clear/set the SW EEE timer flag based on LPI ET enablement */
397         priv->eee_sw_timer_en = en ? 0 : 1;
398         tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
399         stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
400 }
401
402 /**
403  * stmmac_enable_eee_mode - check and enter in LPI mode
404  * @priv: driver private structure
405  * Description: this function is to verify and enter in LPI mode in case of
406  * EEE.
407  */
408 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
409 {
410         u32 tx_cnt = priv->plat->tx_queues_to_use;
411         u32 queue;
412
413         /* check if all TX queues have the work finished */
414         for (queue = 0; queue < tx_cnt; queue++) {
415                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
416
417                 if (tx_q->dirty_tx != tx_q->cur_tx)
418                         return -EBUSY; /* still unfinished work */
419         }
420
421         /* Check and enter in LPI mode */
422         if (!priv->tx_path_in_lpi_mode)
423                 stmmac_set_eee_mode(priv, priv->hw,
424                                 priv->plat->en_tx_lpi_clockgating);
425         return 0;
426 }
427
428 /**
429  * stmmac_disable_eee_mode - disable and exit from LPI mode
430  * @priv: driver private structure
431  * Description: this function is to exit and disable EEE in case of
432  * LPI state is true. This is called by the xmit.
433  */
434 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
435 {
436         if (!priv->eee_sw_timer_en) {
437                 stmmac_lpi_entry_timer_config(priv, 0);
438                 return;
439         }
440
441         stmmac_reset_eee_mode(priv, priv->hw);
442         del_timer_sync(&priv->eee_ctrl_timer);
443         priv->tx_path_in_lpi_mode = false;
444 }
445
446 /**
447  * stmmac_eee_ctrl_timer - EEE TX SW timer.
448  * @t:  timer_list struct containing private info
449  * Description:
450  *  if there is no data transfer and if we are not in LPI state,
451  *  then MAC Transmitter can be moved to LPI state.
452  */
453 static void stmmac_eee_ctrl_timer(struct timer_list *t)
454 {
455         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
456
457         if (stmmac_enable_eee_mode(priv))
458                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
459 }
460
461 /**
462  * stmmac_eee_init - init EEE
463  * @priv: driver private structure
464  * Description:
465  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
466  *  can also manage EEE, this function enable the LPI state and start related
467  *  timer.
468  */
469 bool stmmac_eee_init(struct stmmac_priv *priv)
470 {
471         int eee_tw_timer = priv->eee_tw_timer;
472
473         /* Using PCS we cannot dial with the phy registers at this stage
474          * so we do not support extra feature like EEE.
475          */
476         if (priv->hw->pcs == STMMAC_PCS_TBI ||
477             priv->hw->pcs == STMMAC_PCS_RTBI)
478                 return false;
479
480         /* Check if MAC core supports the EEE feature. */
481         if (!priv->dma_cap.eee)
482                 return false;
483
484         mutex_lock(&priv->lock);
485
486         /* Check if it needs to be deactivated */
487         if (!priv->eee_active) {
488                 if (priv->eee_enabled) {
489                         netdev_dbg(priv->dev, "disable EEE\n");
490                         stmmac_lpi_entry_timer_config(priv, 0);
491                         del_timer_sync(&priv->eee_ctrl_timer);
492                         stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
493                         if (priv->hw->xpcs)
494                                 xpcs_config_eee(priv->hw->xpcs,
495                                                 priv->plat->mult_fact_100ns,
496                                                 false);
497                 }
498                 mutex_unlock(&priv->lock);
499                 return false;
500         }
501
502         if (priv->eee_active && !priv->eee_enabled) {
503                 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
504                 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
505                                      eee_tw_timer);
506                 if (priv->hw->xpcs)
507                         xpcs_config_eee(priv->hw->xpcs,
508                                         priv->plat->mult_fact_100ns,
509                                         true);
510         }
511
512         if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
513                 del_timer_sync(&priv->eee_ctrl_timer);
514                 priv->tx_path_in_lpi_mode = false;
515                 stmmac_lpi_entry_timer_config(priv, 1);
516         } else {
517                 stmmac_lpi_entry_timer_config(priv, 0);
518                 mod_timer(&priv->eee_ctrl_timer,
519                           STMMAC_LPI_T(priv->tx_lpi_timer));
520         }
521
522         mutex_unlock(&priv->lock);
523         netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
524         return true;
525 }
526
527 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
528  * @priv: driver private structure
529  * @p : descriptor pointer
530  * @skb : the socket buffer
531  * Description :
532  * This function will read timestamp from the descriptor & pass it to stack.
533  * and also perform some sanity checks.
534  */
535 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
536                                    struct dma_desc *p, struct sk_buff *skb)
537 {
538         struct skb_shared_hwtstamps shhwtstamp;
539         bool found = false;
540         u64 ns = 0;
541
542         if (!priv->hwts_tx_en)
543                 return;
544
545         /* exit if skb doesn't support hw tstamp */
546         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
547                 return;
548
549         /* check tx tstamp status */
550         if (stmmac_get_tx_timestamp_status(priv, p)) {
551                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
552                 found = true;
553         } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
554                 found = true;
555         }
556
557         if (found) {
558                 ns -= priv->plat->cdc_error_adj;
559
560                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
561                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
562
563                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
564                 /* pass tstamp to stack */
565                 skb_tstamp_tx(skb, &shhwtstamp);
566         }
567 }
568
569 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
570  * @priv: driver private structure
571  * @p : descriptor pointer
572  * @np : next descriptor pointer
573  * @skb : the socket buffer
574  * Description :
575  * This function will read received packet's timestamp from the descriptor
576  * and pass it to stack. It also perform some sanity checks.
577  */
578 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
579                                    struct dma_desc *np, struct sk_buff *skb)
580 {
581         struct skb_shared_hwtstamps *shhwtstamp = NULL;
582         struct dma_desc *desc = p;
583         u64 ns = 0;
584
585         if (!priv->hwts_rx_en)
586                 return;
587         /* For GMAC4, the valid timestamp is from CTX next desc. */
588         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
589                 desc = np;
590
591         /* Check if timestamp is available */
592         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
593                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
594
595                 ns -= priv->plat->cdc_error_adj;
596
597                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
598                 shhwtstamp = skb_hwtstamps(skb);
599                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
600                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
601         } else  {
602                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
603         }
604 }
605
606 /**
607  *  stmmac_hwtstamp_set - control hardware timestamping.
608  *  @dev: device pointer.
609  *  @ifr: An IOCTL specific structure, that can contain a pointer to
610  *  a proprietary structure used to pass information to the driver.
611  *  Description:
612  *  This function configures the MAC to enable/disable both outgoing(TX)
613  *  and incoming(RX) packets time stamping based on user input.
614  *  Return Value:
615  *  0 on success and an appropriate -ve integer on failure.
616  */
617 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
618 {
619         struct stmmac_priv *priv = netdev_priv(dev);
620         struct hwtstamp_config config;
621         u32 ptp_v2 = 0;
622         u32 tstamp_all = 0;
623         u32 ptp_over_ipv4_udp = 0;
624         u32 ptp_over_ipv6_udp = 0;
625         u32 ptp_over_ethernet = 0;
626         u32 snap_type_sel = 0;
627         u32 ts_master_en = 0;
628         u32 ts_event_en = 0;
629
630         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
631                 netdev_alert(priv->dev, "No support for HW time stamping\n");
632                 priv->hwts_tx_en = 0;
633                 priv->hwts_rx_en = 0;
634
635                 return -EOPNOTSUPP;
636         }
637
638         if (copy_from_user(&config, ifr->ifr_data,
639                            sizeof(config)))
640                 return -EFAULT;
641
642         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
643                    __func__, config.flags, config.tx_type, config.rx_filter);
644
645         if (config.tx_type != HWTSTAMP_TX_OFF &&
646             config.tx_type != HWTSTAMP_TX_ON)
647                 return -ERANGE;
648
649         if (priv->adv_ts) {
650                 switch (config.rx_filter) {
651                 case HWTSTAMP_FILTER_NONE:
652                         /* time stamp no incoming packet at all */
653                         config.rx_filter = HWTSTAMP_FILTER_NONE;
654                         break;
655
656                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
657                         /* PTP v1, UDP, any kind of event packet */
658                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
659                         /* 'xmac' hardware can support Sync, Pdelay_Req and
660                          * Pdelay_resp by setting bit14 and bits17/16 to 01
661                          * This leaves Delay_Req timestamps out.
662                          * Enable all events *and* general purpose message
663                          * timestamping
664                          */
665                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
666                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
667                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
668                         break;
669
670                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
671                         /* PTP v1, UDP, Sync packet */
672                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
673                         /* take time stamp for SYNC messages only */
674                         ts_event_en = PTP_TCR_TSEVNTENA;
675
676                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678                         break;
679
680                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
681                         /* PTP v1, UDP, Delay_req packet */
682                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
683                         /* take time stamp for Delay_Req messages only */
684                         ts_master_en = PTP_TCR_TSMSTRENA;
685                         ts_event_en = PTP_TCR_TSEVNTENA;
686
687                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
688                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
689                         break;
690
691                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
692                         /* PTP v2, UDP, any kind of event packet */
693                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
694                         ptp_v2 = PTP_TCR_TSVER2ENA;
695                         /* take time stamp for all event messages */
696                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
697
698                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
699                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
700                         break;
701
702                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
703                         /* PTP v2, UDP, Sync packet */
704                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
705                         ptp_v2 = PTP_TCR_TSVER2ENA;
706                         /* take time stamp for SYNC messages only */
707                         ts_event_en = PTP_TCR_TSEVNTENA;
708
709                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
710                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
711                         break;
712
713                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
714                         /* PTP v2, UDP, Delay_req packet */
715                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
716                         ptp_v2 = PTP_TCR_TSVER2ENA;
717                         /* take time stamp for Delay_Req messages only */
718                         ts_master_en = PTP_TCR_TSMSTRENA;
719                         ts_event_en = PTP_TCR_TSEVNTENA;
720
721                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
722                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
723                         break;
724
725                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
726                         /* PTP v2/802.AS1 any layer, any kind of event packet */
727                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
728                         ptp_v2 = PTP_TCR_TSVER2ENA;
729                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
730                         if (priv->synopsys_id < DWMAC_CORE_4_10)
731                                 ts_event_en = PTP_TCR_TSEVNTENA;
732                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
733                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
734                         ptp_over_ethernet = PTP_TCR_TSIPENA;
735                         break;
736
737                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
738                         /* PTP v2/802.AS1, any layer, Sync packet */
739                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
740                         ptp_v2 = PTP_TCR_TSVER2ENA;
741                         /* take time stamp for SYNC messages only */
742                         ts_event_en = PTP_TCR_TSEVNTENA;
743
744                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
745                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
746                         ptp_over_ethernet = PTP_TCR_TSIPENA;
747                         break;
748
749                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
750                         /* PTP v2/802.AS1, any layer, Delay_req packet */
751                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
752                         ptp_v2 = PTP_TCR_TSVER2ENA;
753                         /* take time stamp for Delay_Req messages only */
754                         ts_master_en = PTP_TCR_TSMSTRENA;
755                         ts_event_en = PTP_TCR_TSEVNTENA;
756
757                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
758                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
759                         ptp_over_ethernet = PTP_TCR_TSIPENA;
760                         break;
761
762                 case HWTSTAMP_FILTER_NTP_ALL:
763                 case HWTSTAMP_FILTER_ALL:
764                         /* time stamp any incoming packet */
765                         config.rx_filter = HWTSTAMP_FILTER_ALL;
766                         tstamp_all = PTP_TCR_TSENALL;
767                         break;
768
769                 default:
770                         return -ERANGE;
771                 }
772         } else {
773                 switch (config.rx_filter) {
774                 case HWTSTAMP_FILTER_NONE:
775                         config.rx_filter = HWTSTAMP_FILTER_NONE;
776                         break;
777                 default:
778                         /* PTP v1, UDP, any kind of event packet */
779                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
780                         break;
781                 }
782         }
783         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
784         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
785
786         priv->systime_flags = STMMAC_HWTS_ACTIVE;
787
788         if (priv->hwts_tx_en || priv->hwts_rx_en) {
789                 priv->systime_flags |= tstamp_all | ptp_v2 |
790                                        ptp_over_ethernet | ptp_over_ipv6_udp |
791                                        ptp_over_ipv4_udp | ts_event_en |
792                                        ts_master_en | snap_type_sel;
793         }
794
795         stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
796
797         memcpy(&priv->tstamp_config, &config, sizeof(config));
798
799         return copy_to_user(ifr->ifr_data, &config,
800                             sizeof(config)) ? -EFAULT : 0;
801 }
802
803 /**
804  *  stmmac_hwtstamp_get - read hardware timestamping.
805  *  @dev: device pointer.
806  *  @ifr: An IOCTL specific structure, that can contain a pointer to
807  *  a proprietary structure used to pass information to the driver.
808  *  Description:
809  *  This function obtain the current hardware timestamping settings
810  *  as requested.
811  */
812 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
813 {
814         struct stmmac_priv *priv = netdev_priv(dev);
815         struct hwtstamp_config *config = &priv->tstamp_config;
816
817         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
818                 return -EOPNOTSUPP;
819
820         return copy_to_user(ifr->ifr_data, config,
821                             sizeof(*config)) ? -EFAULT : 0;
822 }
823
824 /**
825  * stmmac_init_tstamp_counter - init hardware timestamping counter
826  * @priv: driver private structure
827  * @systime_flags: timestamping flags
828  * Description:
829  * Initialize hardware counter for packet timestamping.
830  * This is valid as long as the interface is open and not suspended.
831  * Will be rerun after resuming from suspend, case in which the timestamping
832  * flags updated by stmmac_hwtstamp_set() also need to be restored.
833  */
834 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
835 {
836         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
837         struct timespec64 now;
838         u32 sec_inc = 0;
839         u64 temp = 0;
840
841         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
842                 return -EOPNOTSUPP;
843
844         stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
845         priv->systime_flags = systime_flags;
846
847         /* program Sub Second Increment reg */
848         stmmac_config_sub_second_increment(priv, priv->ptpaddr,
849                                            priv->plat->clk_ptp_rate,
850                                            xmac, &sec_inc);
851         temp = div_u64(1000000000ULL, sec_inc);
852
853         /* Store sub second increment for later use */
854         priv->sub_second_inc = sec_inc;
855
856         /* calculate default added value:
857          * formula is :
858          * addend = (2^32)/freq_div_ratio;
859          * where, freq_div_ratio = 1e9ns/sec_inc
860          */
861         temp = (u64)(temp << 32);
862         priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
863         stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
864
865         /* initialize system time */
866         ktime_get_real_ts64(&now);
867
868         /* lower 32 bits of tv_sec are safe until y2106 */
869         stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
870
871         return 0;
872 }
873 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
874
875 /**
876  * stmmac_init_ptp - init PTP
877  * @priv: driver private structure
878  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
879  * This is done by looking at the HW cap. register.
880  * This function also registers the ptp driver.
881  */
882 static int stmmac_init_ptp(struct stmmac_priv *priv)
883 {
884         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
885         int ret;
886
887         if (priv->plat->ptp_clk_freq_config)
888                 priv->plat->ptp_clk_freq_config(priv);
889
890         ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
891         if (ret)
892                 return ret;
893
894         priv->adv_ts = 0;
895         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
896         if (xmac && priv->dma_cap.atime_stamp)
897                 priv->adv_ts = 1;
898         /* Dwmac 3.x core with extend_desc can support adv_ts */
899         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
900                 priv->adv_ts = 1;
901
902         if (priv->dma_cap.time_stamp)
903                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
904
905         if (priv->adv_ts)
906                 netdev_info(priv->dev,
907                             "IEEE 1588-2008 Advanced Timestamp supported\n");
908
909         priv->hwts_tx_en = 0;
910         priv->hwts_rx_en = 0;
911
912         return 0;
913 }
914
915 static void stmmac_release_ptp(struct stmmac_priv *priv)
916 {
917         clk_disable_unprepare(priv->plat->clk_ptp_ref);
918         stmmac_ptp_unregister(priv);
919 }
920
921 /**
922  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
923  *  @priv: driver private structure
924  *  @duplex: duplex passed to the next function
925  *  Description: It is used for configuring the flow control in all queues
926  */
927 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
928 {
929         u32 tx_cnt = priv->plat->tx_queues_to_use;
930
931         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
932                         priv->pause, tx_cnt);
933 }
934
935 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
936                                                  phy_interface_t interface)
937 {
938         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
939
940         if (!priv->hw->xpcs)
941                 return NULL;
942
943         return &priv->hw->xpcs->pcs;
944 }
945
946 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
947                               const struct phylink_link_state *state)
948 {
949         /* Nothing to do, xpcs_config() handles everything */
950 }
951
952 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
953 {
954         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
955         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
956         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
957         bool *hs_enable = &fpe_cfg->hs_enable;
958
959         if (is_up && *hs_enable) {
960                 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
961         } else {
962                 *lo_state = FPE_STATE_OFF;
963                 *lp_state = FPE_STATE_OFF;
964         }
965 }
966
967 static void stmmac_mac_link_down(struct phylink_config *config,
968                                  unsigned int mode, phy_interface_t interface)
969 {
970         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
971
972         stmmac_mac_set(priv, priv->ioaddr, false);
973         priv->eee_active = false;
974         priv->tx_lpi_enabled = false;
975         priv->eee_enabled = stmmac_eee_init(priv);
976         stmmac_set_eee_pls(priv, priv->hw, false);
977
978         if (priv->dma_cap.fpesel)
979                 stmmac_fpe_link_state_handle(priv, false);
980 }
981
982 static void stmmac_mac_link_up(struct phylink_config *config,
983                                struct phy_device *phy,
984                                unsigned int mode, phy_interface_t interface,
985                                int speed, int duplex,
986                                bool tx_pause, bool rx_pause)
987 {
988         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989         u32 old_ctrl, ctrl;
990
991         old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
992         ctrl = old_ctrl & ~priv->hw->link.speed_mask;
993
994         if (interface == PHY_INTERFACE_MODE_USXGMII) {
995                 switch (speed) {
996                 case SPEED_10000:
997                         ctrl |= priv->hw->link.xgmii.speed10000;
998                         break;
999                 case SPEED_5000:
1000                         ctrl |= priv->hw->link.xgmii.speed5000;
1001                         break;
1002                 case SPEED_2500:
1003                         ctrl |= priv->hw->link.xgmii.speed2500;
1004                         break;
1005                 default:
1006                         return;
1007                 }
1008         } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1009                 switch (speed) {
1010                 case SPEED_100000:
1011                         ctrl |= priv->hw->link.xlgmii.speed100000;
1012                         break;
1013                 case SPEED_50000:
1014                         ctrl |= priv->hw->link.xlgmii.speed50000;
1015                         break;
1016                 case SPEED_40000:
1017                         ctrl |= priv->hw->link.xlgmii.speed40000;
1018                         break;
1019                 case SPEED_25000:
1020                         ctrl |= priv->hw->link.xlgmii.speed25000;
1021                         break;
1022                 case SPEED_10000:
1023                         ctrl |= priv->hw->link.xgmii.speed10000;
1024                         break;
1025                 case SPEED_2500:
1026                         ctrl |= priv->hw->link.speed2500;
1027                         break;
1028                 case SPEED_1000:
1029                         ctrl |= priv->hw->link.speed1000;
1030                         break;
1031                 default:
1032                         return;
1033                 }
1034         } else {
1035                 switch (speed) {
1036                 case SPEED_2500:
1037                         ctrl |= priv->hw->link.speed2500;
1038                         break;
1039                 case SPEED_1000:
1040                         ctrl |= priv->hw->link.speed1000;
1041                         break;
1042                 case SPEED_100:
1043                         ctrl |= priv->hw->link.speed100;
1044                         break;
1045                 case SPEED_10:
1046                         ctrl |= priv->hw->link.speed10;
1047                         break;
1048                 default:
1049                         return;
1050                 }
1051         }
1052
1053         priv->speed = speed;
1054
1055         if (priv->plat->fix_mac_speed)
1056                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1057
1058         if (!duplex)
1059                 ctrl &= ~priv->hw->link.duplex;
1060         else
1061                 ctrl |= priv->hw->link.duplex;
1062
1063         /* Flow Control operation */
1064         if (rx_pause && tx_pause)
1065                 priv->flow_ctrl = FLOW_AUTO;
1066         else if (rx_pause && !tx_pause)
1067                 priv->flow_ctrl = FLOW_RX;
1068         else if (!rx_pause && tx_pause)
1069                 priv->flow_ctrl = FLOW_TX;
1070         else
1071                 priv->flow_ctrl = FLOW_OFF;
1072
1073         stmmac_mac_flow_ctrl(priv, duplex);
1074
1075         if (ctrl != old_ctrl)
1076                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1077
1078         stmmac_mac_set(priv, priv->ioaddr, true);
1079         if (phy && priv->dma_cap.eee) {
1080                 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1081                 priv->eee_enabled = stmmac_eee_init(priv);
1082                 priv->tx_lpi_enabled = priv->eee_enabled;
1083                 stmmac_set_eee_pls(priv, priv->hw, true);
1084         }
1085
1086         if (priv->dma_cap.fpesel)
1087                 stmmac_fpe_link_state_handle(priv, true);
1088 }
1089
1090 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1091         .mac_select_pcs = stmmac_mac_select_pcs,
1092         .mac_config = stmmac_mac_config,
1093         .mac_link_down = stmmac_mac_link_down,
1094         .mac_link_up = stmmac_mac_link_up,
1095 };
1096
1097 /**
1098  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1099  * @priv: driver private structure
1100  * Description: this is to verify if the HW supports the PCS.
1101  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1102  * configured for the TBI, RTBI, or SGMII PHY interface.
1103  */
1104 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1105 {
1106         int interface = priv->plat->interface;
1107
1108         if (priv->dma_cap.pcs) {
1109                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1110                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1111                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1112                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1113                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1114                         priv->hw->pcs = STMMAC_PCS_RGMII;
1115                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1116                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1117                         priv->hw->pcs = STMMAC_PCS_SGMII;
1118                 }
1119         }
1120 }
1121
1122 /**
1123  * stmmac_init_phy - PHY initialization
1124  * @dev: net device structure
1125  * Description: it initializes the driver's PHY state, and attaches the PHY
1126  * to the mac driver.
1127  *  Return value:
1128  *  0 on success
1129  */
1130 static int stmmac_init_phy(struct net_device *dev)
1131 {
1132         struct stmmac_priv *priv = netdev_priv(dev);
1133         struct fwnode_handle *fwnode;
1134         int ret;
1135
1136         fwnode = of_fwnode_handle(priv->plat->phylink_node);
1137         if (!fwnode)
1138                 fwnode = dev_fwnode(priv->device);
1139
1140         if (fwnode)
1141                 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1142
1143         /* Some DT bindings do not set-up the PHY handle. Let's try to
1144          * manually parse it
1145          */
1146         if (!fwnode || ret) {
1147                 int addr = priv->plat->phy_addr;
1148                 struct phy_device *phydev;
1149
1150                 phydev = mdiobus_get_phy(priv->mii, addr);
1151                 if (!phydev) {
1152                         netdev_err(priv->dev, "no phy at addr %d\n", addr);
1153                         return -ENODEV;
1154                 }
1155
1156                 ret = phylink_connect_phy(priv->phylink, phydev);
1157         }
1158
1159         if (!priv->plat->pmt) {
1160                 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1161
1162                 phylink_ethtool_get_wol(priv->phylink, &wol);
1163                 device_set_wakeup_capable(priv->device, !!wol.supported);
1164         }
1165
1166         return ret;
1167 }
1168
1169 static int stmmac_phy_setup(struct stmmac_priv *priv)
1170 {
1171         struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1172         struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1173         int max_speed = priv->plat->max_speed;
1174         int mode = priv->plat->phy_interface;
1175         struct phylink *phylink;
1176
1177         priv->phylink_config.dev = &priv->dev->dev;
1178         priv->phylink_config.type = PHYLINK_NETDEV;
1179         if (priv->plat->mdio_bus_data)
1180                 priv->phylink_config.ovr_an_inband =
1181                         mdio_bus_data->xpcs_an_inband;
1182
1183         if (!fwnode)
1184                 fwnode = dev_fwnode(priv->device);
1185
1186         /* Set the platform/firmware specified interface mode */
1187         __set_bit(mode, priv->phylink_config.supported_interfaces);
1188
1189         /* If we have an xpcs, it defines which PHY interfaces are supported. */
1190         if (priv->hw->xpcs)
1191                 xpcs_get_interfaces(priv->hw->xpcs,
1192                                     priv->phylink_config.supported_interfaces);
1193
1194         priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1195                 MAC_10 | MAC_100;
1196
1197         if (!max_speed || max_speed >= 1000)
1198                 priv->phylink_config.mac_capabilities |= MAC_1000;
1199
1200         if (priv->plat->has_gmac4) {
1201                 if (!max_speed || max_speed >= 2500)
1202                         priv->phylink_config.mac_capabilities |= MAC_2500FD;
1203         } else if (priv->plat->has_xgmac) {
1204                 if (!max_speed || max_speed >= 2500)
1205                         priv->phylink_config.mac_capabilities |= MAC_2500FD;
1206                 if (!max_speed || max_speed >= 5000)
1207                         priv->phylink_config.mac_capabilities |= MAC_5000FD;
1208                 if (!max_speed || max_speed >= 10000)
1209                         priv->phylink_config.mac_capabilities |= MAC_10000FD;
1210                 if (!max_speed || max_speed >= 25000)
1211                         priv->phylink_config.mac_capabilities |= MAC_25000FD;
1212                 if (!max_speed || max_speed >= 40000)
1213                         priv->phylink_config.mac_capabilities |= MAC_40000FD;
1214                 if (!max_speed || max_speed >= 50000)
1215                         priv->phylink_config.mac_capabilities |= MAC_50000FD;
1216                 if (!max_speed || max_speed >= 100000)
1217                         priv->phylink_config.mac_capabilities |= MAC_100000FD;
1218         }
1219
1220         /* Half-Duplex can only work with single queue */
1221         if (priv->plat->tx_queues_to_use > 1)
1222                 priv->phylink_config.mac_capabilities &=
1223                         ~(MAC_10HD | MAC_100HD | MAC_1000HD);
1224         priv->phylink_config.mac_managed_pm = true;
1225
1226         phylink = phylink_create(&priv->phylink_config, fwnode,
1227                                  mode, &stmmac_phylink_mac_ops);
1228         if (IS_ERR(phylink))
1229                 return PTR_ERR(phylink);
1230
1231         priv->phylink = phylink;
1232         return 0;
1233 }
1234
1235 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1236                                     struct stmmac_dma_conf *dma_conf)
1237 {
1238         u32 rx_cnt = priv->plat->rx_queues_to_use;
1239         unsigned int desc_size;
1240         void *head_rx;
1241         u32 queue;
1242
1243         /* Display RX rings */
1244         for (queue = 0; queue < rx_cnt; queue++) {
1245                 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1246
1247                 pr_info("\tRX Queue %u rings\n", queue);
1248
1249                 if (priv->extend_desc) {
1250                         head_rx = (void *)rx_q->dma_erx;
1251                         desc_size = sizeof(struct dma_extended_desc);
1252                 } else {
1253                         head_rx = (void *)rx_q->dma_rx;
1254                         desc_size = sizeof(struct dma_desc);
1255                 }
1256
1257                 /* Display RX ring */
1258                 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1259                                     rx_q->dma_rx_phy, desc_size);
1260         }
1261 }
1262
1263 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1264                                     struct stmmac_dma_conf *dma_conf)
1265 {
1266         u32 tx_cnt = priv->plat->tx_queues_to_use;
1267         unsigned int desc_size;
1268         void *head_tx;
1269         u32 queue;
1270
1271         /* Display TX rings */
1272         for (queue = 0; queue < tx_cnt; queue++) {
1273                 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1274
1275                 pr_info("\tTX Queue %d rings\n", queue);
1276
1277                 if (priv->extend_desc) {
1278                         head_tx = (void *)tx_q->dma_etx;
1279                         desc_size = sizeof(struct dma_extended_desc);
1280                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1281                         head_tx = (void *)tx_q->dma_entx;
1282                         desc_size = sizeof(struct dma_edesc);
1283                 } else {
1284                         head_tx = (void *)tx_q->dma_tx;
1285                         desc_size = sizeof(struct dma_desc);
1286                 }
1287
1288                 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1289                                     tx_q->dma_tx_phy, desc_size);
1290         }
1291 }
1292
1293 static void stmmac_display_rings(struct stmmac_priv *priv,
1294                                  struct stmmac_dma_conf *dma_conf)
1295 {
1296         /* Display RX ring */
1297         stmmac_display_rx_rings(priv, dma_conf);
1298
1299         /* Display TX ring */
1300         stmmac_display_tx_rings(priv, dma_conf);
1301 }
1302
1303 static int stmmac_set_bfsize(int mtu, int bufsize)
1304 {
1305         int ret = bufsize;
1306
1307         if (mtu >= BUF_SIZE_8KiB)
1308                 ret = BUF_SIZE_16KiB;
1309         else if (mtu >= BUF_SIZE_4KiB)
1310                 ret = BUF_SIZE_8KiB;
1311         else if (mtu >= BUF_SIZE_2KiB)
1312                 ret = BUF_SIZE_4KiB;
1313         else if (mtu > DEFAULT_BUFSIZE)
1314                 ret = BUF_SIZE_2KiB;
1315         else
1316                 ret = DEFAULT_BUFSIZE;
1317
1318         return ret;
1319 }
1320
1321 /**
1322  * stmmac_clear_rx_descriptors - clear RX descriptors
1323  * @priv: driver private structure
1324  * @dma_conf: structure to take the dma data
1325  * @queue: RX queue index
1326  * Description: this function is called to clear the RX descriptors
1327  * in case of both basic and extended descriptors are used.
1328  */
1329 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1330                                         struct stmmac_dma_conf *dma_conf,
1331                                         u32 queue)
1332 {
1333         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1334         int i;
1335
1336         /* Clear the RX descriptors */
1337         for (i = 0; i < dma_conf->dma_rx_size; i++)
1338                 if (priv->extend_desc)
1339                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1340                                         priv->use_riwt, priv->mode,
1341                                         (i == dma_conf->dma_rx_size - 1),
1342                                         dma_conf->dma_buf_sz);
1343                 else
1344                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1345                                         priv->use_riwt, priv->mode,
1346                                         (i == dma_conf->dma_rx_size - 1),
1347                                         dma_conf->dma_buf_sz);
1348 }
1349
1350 /**
1351  * stmmac_clear_tx_descriptors - clear tx descriptors
1352  * @priv: driver private structure
1353  * @dma_conf: structure to take the dma data
1354  * @queue: TX queue index.
1355  * Description: this function is called to clear the TX descriptors
1356  * in case of both basic and extended descriptors are used.
1357  */
1358 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1359                                         struct stmmac_dma_conf *dma_conf,
1360                                         u32 queue)
1361 {
1362         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1363         int i;
1364
1365         /* Clear the TX descriptors */
1366         for (i = 0; i < dma_conf->dma_tx_size; i++) {
1367                 int last = (i == (dma_conf->dma_tx_size - 1));
1368                 struct dma_desc *p;
1369
1370                 if (priv->extend_desc)
1371                         p = &tx_q->dma_etx[i].basic;
1372                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1373                         p = &tx_q->dma_entx[i].basic;
1374                 else
1375                         p = &tx_q->dma_tx[i];
1376
1377                 stmmac_init_tx_desc(priv, p, priv->mode, last);
1378         }
1379 }
1380
1381 /**
1382  * stmmac_clear_descriptors - clear descriptors
1383  * @priv: driver private structure
1384  * @dma_conf: structure to take the dma data
1385  * Description: this function is called to clear the TX and RX descriptors
1386  * in case of both basic and extended descriptors are used.
1387  */
1388 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1389                                      struct stmmac_dma_conf *dma_conf)
1390 {
1391         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1392         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1393         u32 queue;
1394
1395         /* Clear the RX descriptors */
1396         for (queue = 0; queue < rx_queue_cnt; queue++)
1397                 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1398
1399         /* Clear the TX descriptors */
1400         for (queue = 0; queue < tx_queue_cnt; queue++)
1401                 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1402 }
1403
1404 /**
1405  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1406  * @priv: driver private structure
1407  * @dma_conf: structure to take the dma data
1408  * @p: descriptor pointer
1409  * @i: descriptor index
1410  * @flags: gfp flag
1411  * @queue: RX queue index
1412  * Description: this function is called to allocate a receive buffer, perform
1413  * the DMA mapping and init the descriptor.
1414  */
1415 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1416                                   struct stmmac_dma_conf *dma_conf,
1417                                   struct dma_desc *p,
1418                                   int i, gfp_t flags, u32 queue)
1419 {
1420         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1421         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1422         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1423
1424         if (priv->dma_cap.addr64 <= 32)
1425                 gfp |= GFP_DMA32;
1426
1427         if (!buf->page) {
1428                 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1429                 if (!buf->page)
1430                         return -ENOMEM;
1431                 buf->page_offset = stmmac_rx_offset(priv);
1432         }
1433
1434         if (priv->sph && !buf->sec_page) {
1435                 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1436                 if (!buf->sec_page)
1437                         return -ENOMEM;
1438
1439                 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1440                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1441         } else {
1442                 buf->sec_page = NULL;
1443                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1444         }
1445
1446         buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1447
1448         stmmac_set_desc_addr(priv, p, buf->addr);
1449         if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1450                 stmmac_init_desc3(priv, p);
1451
1452         return 0;
1453 }
1454
1455 /**
1456  * stmmac_free_rx_buffer - free RX dma buffers
1457  * @priv: private structure
1458  * @rx_q: RX queue
1459  * @i: buffer index.
1460  */
1461 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1462                                   struct stmmac_rx_queue *rx_q,
1463                                   int i)
1464 {
1465         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1466
1467         if (buf->page)
1468                 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1469         buf->page = NULL;
1470
1471         if (buf->sec_page)
1472                 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1473         buf->sec_page = NULL;
1474 }
1475
1476 /**
1477  * stmmac_free_tx_buffer - free RX dma buffers
1478  * @priv: private structure
1479  * @dma_conf: structure to take the dma data
1480  * @queue: RX queue index
1481  * @i: buffer index.
1482  */
1483 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1484                                   struct stmmac_dma_conf *dma_conf,
1485                                   u32 queue, int i)
1486 {
1487         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1488
1489         if (tx_q->tx_skbuff_dma[i].buf &&
1490             tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1491                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1492                         dma_unmap_page(priv->device,
1493                                        tx_q->tx_skbuff_dma[i].buf,
1494                                        tx_q->tx_skbuff_dma[i].len,
1495                                        DMA_TO_DEVICE);
1496                 else
1497                         dma_unmap_single(priv->device,
1498                                          tx_q->tx_skbuff_dma[i].buf,
1499                                          tx_q->tx_skbuff_dma[i].len,
1500                                          DMA_TO_DEVICE);
1501         }
1502
1503         if (tx_q->xdpf[i] &&
1504             (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1505              tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1506                 xdp_return_frame(tx_q->xdpf[i]);
1507                 tx_q->xdpf[i] = NULL;
1508         }
1509
1510         if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1511                 tx_q->xsk_frames_done++;
1512
1513         if (tx_q->tx_skbuff[i] &&
1514             tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1515                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1516                 tx_q->tx_skbuff[i] = NULL;
1517         }
1518
1519         tx_q->tx_skbuff_dma[i].buf = 0;
1520         tx_q->tx_skbuff_dma[i].map_as_page = false;
1521 }
1522
1523 /**
1524  * dma_free_rx_skbufs - free RX dma buffers
1525  * @priv: private structure
1526  * @dma_conf: structure to take the dma data
1527  * @queue: RX queue index
1528  */
1529 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1530                                struct stmmac_dma_conf *dma_conf,
1531                                u32 queue)
1532 {
1533         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1534         int i;
1535
1536         for (i = 0; i < dma_conf->dma_rx_size; i++)
1537                 stmmac_free_rx_buffer(priv, rx_q, i);
1538 }
1539
1540 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1541                                    struct stmmac_dma_conf *dma_conf,
1542                                    u32 queue, gfp_t flags)
1543 {
1544         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1545         int i;
1546
1547         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1548                 struct dma_desc *p;
1549                 int ret;
1550
1551                 if (priv->extend_desc)
1552                         p = &((rx_q->dma_erx + i)->basic);
1553                 else
1554                         p = rx_q->dma_rx + i;
1555
1556                 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1557                                              queue);
1558                 if (ret)
1559                         return ret;
1560
1561                 rx_q->buf_alloc_num++;
1562         }
1563
1564         return 0;
1565 }
1566
1567 /**
1568  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1569  * @priv: private structure
1570  * @dma_conf: structure to take the dma data
1571  * @queue: RX queue index
1572  */
1573 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1574                                 struct stmmac_dma_conf *dma_conf,
1575                                 u32 queue)
1576 {
1577         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1578         int i;
1579
1580         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1581                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1582
1583                 if (!buf->xdp)
1584                         continue;
1585
1586                 xsk_buff_free(buf->xdp);
1587                 buf->xdp = NULL;
1588         }
1589 }
1590
1591 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1592                                       struct stmmac_dma_conf *dma_conf,
1593                                       u32 queue)
1594 {
1595         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1596         int i;
1597
1598         for (i = 0; i < dma_conf->dma_rx_size; i++) {
1599                 struct stmmac_rx_buffer *buf;
1600                 dma_addr_t dma_addr;
1601                 struct dma_desc *p;
1602
1603                 if (priv->extend_desc)
1604                         p = (struct dma_desc *)(rx_q->dma_erx + i);
1605                 else
1606                         p = rx_q->dma_rx + i;
1607
1608                 buf = &rx_q->buf_pool[i];
1609
1610                 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1611                 if (!buf->xdp)
1612                         return -ENOMEM;
1613
1614                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1615                 stmmac_set_desc_addr(priv, p, dma_addr);
1616                 rx_q->buf_alloc_num++;
1617         }
1618
1619         return 0;
1620 }
1621
1622 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1623 {
1624         if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1625                 return NULL;
1626
1627         return xsk_get_pool_from_qid(priv->dev, queue);
1628 }
1629
1630 /**
1631  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1632  * @priv: driver private structure
1633  * @dma_conf: structure to take the dma data
1634  * @queue: RX queue index
1635  * @flags: gfp flag.
1636  * Description: this function initializes the DMA RX descriptors
1637  * and allocates the socket buffers. It supports the chained and ring
1638  * modes.
1639  */
1640 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1641                                     struct stmmac_dma_conf *dma_conf,
1642                                     u32 queue, gfp_t flags)
1643 {
1644         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1645         int ret;
1646
1647         netif_dbg(priv, probe, priv->dev,
1648                   "(%s) dma_rx_phy=0x%08x\n", __func__,
1649                   (u32)rx_q->dma_rx_phy);
1650
1651         stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1652
1653         xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1654
1655         rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1656
1657         if (rx_q->xsk_pool) {
1658                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1659                                                    MEM_TYPE_XSK_BUFF_POOL,
1660                                                    NULL));
1661                 netdev_info(priv->dev,
1662                             "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1663                             rx_q->queue_index);
1664                 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1665         } else {
1666                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1667                                                    MEM_TYPE_PAGE_POOL,
1668                                                    rx_q->page_pool));
1669                 netdev_info(priv->dev,
1670                             "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1671                             rx_q->queue_index);
1672         }
1673
1674         if (rx_q->xsk_pool) {
1675                 /* RX XDP ZC buffer pool may not be populated, e.g.
1676                  * xdpsock TX-only.
1677                  */
1678                 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1679         } else {
1680                 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1681                 if (ret < 0)
1682                         return -ENOMEM;
1683         }
1684
1685         /* Setup the chained descriptor addresses */
1686         if (priv->mode == STMMAC_CHAIN_MODE) {
1687                 if (priv->extend_desc)
1688                         stmmac_mode_init(priv, rx_q->dma_erx,
1689                                          rx_q->dma_rx_phy,
1690                                          dma_conf->dma_rx_size, 1);
1691                 else
1692                         stmmac_mode_init(priv, rx_q->dma_rx,
1693                                          rx_q->dma_rx_phy,
1694                                          dma_conf->dma_rx_size, 0);
1695         }
1696
1697         return 0;
1698 }
1699
1700 static int init_dma_rx_desc_rings(struct net_device *dev,
1701                                   struct stmmac_dma_conf *dma_conf,
1702                                   gfp_t flags)
1703 {
1704         struct stmmac_priv *priv = netdev_priv(dev);
1705         u32 rx_count = priv->plat->rx_queues_to_use;
1706         int queue;
1707         int ret;
1708
1709         /* RX INITIALIZATION */
1710         netif_dbg(priv, probe, priv->dev,
1711                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1712
1713         for (queue = 0; queue < rx_count; queue++) {
1714                 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1715                 if (ret)
1716                         goto err_init_rx_buffers;
1717         }
1718
1719         return 0;
1720
1721 err_init_rx_buffers:
1722         while (queue >= 0) {
1723                 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1724
1725                 if (rx_q->xsk_pool)
1726                         dma_free_rx_xskbufs(priv, dma_conf, queue);
1727                 else
1728                         dma_free_rx_skbufs(priv, dma_conf, queue);
1729
1730                 rx_q->buf_alloc_num = 0;
1731                 rx_q->xsk_pool = NULL;
1732
1733                 queue--;
1734         }
1735
1736         return ret;
1737 }
1738
1739 /**
1740  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1741  * @priv: driver private structure
1742  * @dma_conf: structure to take the dma data
1743  * @queue: TX queue index
1744  * Description: this function initializes the DMA TX descriptors
1745  * and allocates the socket buffers. It supports the chained and ring
1746  * modes.
1747  */
1748 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1749                                     struct stmmac_dma_conf *dma_conf,
1750                                     u32 queue)
1751 {
1752         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1753         int i;
1754
1755         netif_dbg(priv, probe, priv->dev,
1756                   "(%s) dma_tx_phy=0x%08x\n", __func__,
1757                   (u32)tx_q->dma_tx_phy);
1758
1759         /* Setup the chained descriptor addresses */
1760         if (priv->mode == STMMAC_CHAIN_MODE) {
1761                 if (priv->extend_desc)
1762                         stmmac_mode_init(priv, tx_q->dma_etx,
1763                                          tx_q->dma_tx_phy,
1764                                          dma_conf->dma_tx_size, 1);
1765                 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1766                         stmmac_mode_init(priv, tx_q->dma_tx,
1767                                          tx_q->dma_tx_phy,
1768                                          dma_conf->dma_tx_size, 0);
1769         }
1770
1771         tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1772
1773         for (i = 0; i < dma_conf->dma_tx_size; i++) {
1774                 struct dma_desc *p;
1775
1776                 if (priv->extend_desc)
1777                         p = &((tx_q->dma_etx + i)->basic);
1778                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1779                         p = &((tx_q->dma_entx + i)->basic);
1780                 else
1781                         p = tx_q->dma_tx + i;
1782
1783                 stmmac_clear_desc(priv, p);
1784
1785                 tx_q->tx_skbuff_dma[i].buf = 0;
1786                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1787                 tx_q->tx_skbuff_dma[i].len = 0;
1788                 tx_q->tx_skbuff_dma[i].last_segment = false;
1789                 tx_q->tx_skbuff[i] = NULL;
1790         }
1791
1792         return 0;
1793 }
1794
1795 static int init_dma_tx_desc_rings(struct net_device *dev,
1796                                   struct stmmac_dma_conf *dma_conf)
1797 {
1798         struct stmmac_priv *priv = netdev_priv(dev);
1799         u32 tx_queue_cnt;
1800         u32 queue;
1801
1802         tx_queue_cnt = priv->plat->tx_queues_to_use;
1803
1804         for (queue = 0; queue < tx_queue_cnt; queue++)
1805                 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1806
1807         return 0;
1808 }
1809
1810 /**
1811  * init_dma_desc_rings - init the RX/TX descriptor rings
1812  * @dev: net device structure
1813  * @dma_conf: structure to take the dma data
1814  * @flags: gfp flag.
1815  * Description: this function initializes the DMA RX/TX descriptors
1816  * and allocates the socket buffers. It supports the chained and ring
1817  * modes.
1818  */
1819 static int init_dma_desc_rings(struct net_device *dev,
1820                                struct stmmac_dma_conf *dma_conf,
1821                                gfp_t flags)
1822 {
1823         struct stmmac_priv *priv = netdev_priv(dev);
1824         int ret;
1825
1826         ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1827         if (ret)
1828                 return ret;
1829
1830         ret = init_dma_tx_desc_rings(dev, dma_conf);
1831
1832         stmmac_clear_descriptors(priv, dma_conf);
1833
1834         if (netif_msg_hw(priv))
1835                 stmmac_display_rings(priv, dma_conf);
1836
1837         return ret;
1838 }
1839
1840 /**
1841  * dma_free_tx_skbufs - free TX dma buffers
1842  * @priv: private structure
1843  * @dma_conf: structure to take the dma data
1844  * @queue: TX queue index
1845  */
1846 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1847                                struct stmmac_dma_conf *dma_conf,
1848                                u32 queue)
1849 {
1850         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1851         int i;
1852
1853         tx_q->xsk_frames_done = 0;
1854
1855         for (i = 0; i < dma_conf->dma_tx_size; i++)
1856                 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1857
1858         if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1859                 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1860                 tx_q->xsk_frames_done = 0;
1861                 tx_q->xsk_pool = NULL;
1862         }
1863 }
1864
1865 /**
1866  * stmmac_free_tx_skbufs - free TX skb buffers
1867  * @priv: private structure
1868  */
1869 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1870 {
1871         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1872         u32 queue;
1873
1874         for (queue = 0; queue < tx_queue_cnt; queue++)
1875                 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1876 }
1877
1878 /**
1879  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1880  * @priv: private structure
1881  * @dma_conf: structure to take the dma data
1882  * @queue: RX queue index
1883  */
1884 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1885                                          struct stmmac_dma_conf *dma_conf,
1886                                          u32 queue)
1887 {
1888         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1889
1890         /* Release the DMA RX socket buffers */
1891         if (rx_q->xsk_pool)
1892                 dma_free_rx_xskbufs(priv, dma_conf, queue);
1893         else
1894                 dma_free_rx_skbufs(priv, dma_conf, queue);
1895
1896         rx_q->buf_alloc_num = 0;
1897         rx_q->xsk_pool = NULL;
1898
1899         /* Free DMA regions of consistent memory previously allocated */
1900         if (!priv->extend_desc)
1901                 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1902                                   sizeof(struct dma_desc),
1903                                   rx_q->dma_rx, rx_q->dma_rx_phy);
1904         else
1905                 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1906                                   sizeof(struct dma_extended_desc),
1907                                   rx_q->dma_erx, rx_q->dma_rx_phy);
1908
1909         if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1910                 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1911
1912         kfree(rx_q->buf_pool);
1913         if (rx_q->page_pool)
1914                 page_pool_destroy(rx_q->page_pool);
1915 }
1916
1917 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1918                                        struct stmmac_dma_conf *dma_conf)
1919 {
1920         u32 rx_count = priv->plat->rx_queues_to_use;
1921         u32 queue;
1922
1923         /* Free RX queue resources */
1924         for (queue = 0; queue < rx_count; queue++)
1925                 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1926 }
1927
1928 /**
1929  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1930  * @priv: private structure
1931  * @dma_conf: structure to take the dma data
1932  * @queue: TX queue index
1933  */
1934 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1935                                          struct stmmac_dma_conf *dma_conf,
1936                                          u32 queue)
1937 {
1938         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1939         size_t size;
1940         void *addr;
1941
1942         /* Release the DMA TX socket buffers */
1943         dma_free_tx_skbufs(priv, dma_conf, queue);
1944
1945         if (priv->extend_desc) {
1946                 size = sizeof(struct dma_extended_desc);
1947                 addr = tx_q->dma_etx;
1948         } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1949                 size = sizeof(struct dma_edesc);
1950                 addr = tx_q->dma_entx;
1951         } else {
1952                 size = sizeof(struct dma_desc);
1953                 addr = tx_q->dma_tx;
1954         }
1955
1956         size *= dma_conf->dma_tx_size;
1957
1958         dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1959
1960         kfree(tx_q->tx_skbuff_dma);
1961         kfree(tx_q->tx_skbuff);
1962 }
1963
1964 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1965                                        struct stmmac_dma_conf *dma_conf)
1966 {
1967         u32 tx_count = priv->plat->tx_queues_to_use;
1968         u32 queue;
1969
1970         /* Free TX queue resources */
1971         for (queue = 0; queue < tx_count; queue++)
1972                 __free_dma_tx_desc_resources(priv, dma_conf, queue);
1973 }
1974
1975 /**
1976  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1977  * @priv: private structure
1978  * @dma_conf: structure to take the dma data
1979  * @queue: RX queue index
1980  * Description: according to which descriptor can be used (extend or basic)
1981  * this function allocates the resources for TX and RX paths. In case of
1982  * reception, for example, it pre-allocated the RX socket buffer in order to
1983  * allow zero-copy mechanism.
1984  */
1985 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
1986                                          struct stmmac_dma_conf *dma_conf,
1987                                          u32 queue)
1988 {
1989         struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1990         struct stmmac_channel *ch = &priv->channel[queue];
1991         bool xdp_prog = stmmac_xdp_is_enabled(priv);
1992         struct page_pool_params pp_params = { 0 };
1993         unsigned int num_pages;
1994         unsigned int napi_id;
1995         int ret;
1996
1997         rx_q->queue_index = queue;
1998         rx_q->priv_data = priv;
1999
2000         pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2001         pp_params.pool_size = dma_conf->dma_rx_size;
2002         num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2003         pp_params.order = ilog2(num_pages);
2004         pp_params.nid = dev_to_node(priv->device);
2005         pp_params.dev = priv->device;
2006         pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2007         pp_params.offset = stmmac_rx_offset(priv);
2008         pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2009
2010         rx_q->page_pool = page_pool_create(&pp_params);
2011         if (IS_ERR(rx_q->page_pool)) {
2012                 ret = PTR_ERR(rx_q->page_pool);
2013                 rx_q->page_pool = NULL;
2014                 return ret;
2015         }
2016
2017         rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2018                                  sizeof(*rx_q->buf_pool),
2019                                  GFP_KERNEL);
2020         if (!rx_q->buf_pool)
2021                 return -ENOMEM;
2022
2023         if (priv->extend_desc) {
2024                 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2025                                                    dma_conf->dma_rx_size *
2026                                                    sizeof(struct dma_extended_desc),
2027                                                    &rx_q->dma_rx_phy,
2028                                                    GFP_KERNEL);
2029                 if (!rx_q->dma_erx)
2030                         return -ENOMEM;
2031
2032         } else {
2033                 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2034                                                   dma_conf->dma_rx_size *
2035                                                   sizeof(struct dma_desc),
2036                                                   &rx_q->dma_rx_phy,
2037                                                   GFP_KERNEL);
2038                 if (!rx_q->dma_rx)
2039                         return -ENOMEM;
2040         }
2041
2042         if (stmmac_xdp_is_enabled(priv) &&
2043             test_bit(queue, priv->af_xdp_zc_qps))
2044                 napi_id = ch->rxtx_napi.napi_id;
2045         else
2046                 napi_id = ch->rx_napi.napi_id;
2047
2048         ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2049                                rx_q->queue_index,
2050                                napi_id);
2051         if (ret) {
2052                 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2053                 return -EINVAL;
2054         }
2055
2056         return 0;
2057 }
2058
2059 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2060                                        struct stmmac_dma_conf *dma_conf)
2061 {
2062         u32 rx_count = priv->plat->rx_queues_to_use;
2063         u32 queue;
2064         int ret;
2065
2066         /* RX queues buffers and DMA */
2067         for (queue = 0; queue < rx_count; queue++) {
2068                 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2069                 if (ret)
2070                         goto err_dma;
2071         }
2072
2073         return 0;
2074
2075 err_dma:
2076         free_dma_rx_desc_resources(priv, dma_conf);
2077
2078         return ret;
2079 }
2080
2081 /**
2082  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2083  * @priv: private structure
2084  * @dma_conf: structure to take the dma data
2085  * @queue: TX queue index
2086  * Description: according to which descriptor can be used (extend or basic)
2087  * this function allocates the resources for TX and RX paths. In case of
2088  * reception, for example, it pre-allocated the RX socket buffer in order to
2089  * allow zero-copy mechanism.
2090  */
2091 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2092                                          struct stmmac_dma_conf *dma_conf,
2093                                          u32 queue)
2094 {
2095         struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2096         size_t size;
2097         void *addr;
2098
2099         tx_q->queue_index = queue;
2100         tx_q->priv_data = priv;
2101
2102         tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2103                                       sizeof(*tx_q->tx_skbuff_dma),
2104                                       GFP_KERNEL);
2105         if (!tx_q->tx_skbuff_dma)
2106                 return -ENOMEM;
2107
2108         tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2109                                   sizeof(struct sk_buff *),
2110                                   GFP_KERNEL);
2111         if (!tx_q->tx_skbuff)
2112                 return -ENOMEM;
2113
2114         if (priv->extend_desc)
2115                 size = sizeof(struct dma_extended_desc);
2116         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2117                 size = sizeof(struct dma_edesc);
2118         else
2119                 size = sizeof(struct dma_desc);
2120
2121         size *= dma_conf->dma_tx_size;
2122
2123         addr = dma_alloc_coherent(priv->device, size,
2124                                   &tx_q->dma_tx_phy, GFP_KERNEL);
2125         if (!addr)
2126                 return -ENOMEM;
2127
2128         if (priv->extend_desc)
2129                 tx_q->dma_etx = addr;
2130         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2131                 tx_q->dma_entx = addr;
2132         else
2133                 tx_q->dma_tx = addr;
2134
2135         return 0;
2136 }
2137
2138 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2139                                        struct stmmac_dma_conf *dma_conf)
2140 {
2141         u32 tx_count = priv->plat->tx_queues_to_use;
2142         u32 queue;
2143         int ret;
2144
2145         /* TX queues buffers and DMA */
2146         for (queue = 0; queue < tx_count; queue++) {
2147                 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2148                 if (ret)
2149                         goto err_dma;
2150         }
2151
2152         return 0;
2153
2154 err_dma:
2155         free_dma_tx_desc_resources(priv, dma_conf);
2156         return ret;
2157 }
2158
2159 /**
2160  * alloc_dma_desc_resources - alloc TX/RX resources.
2161  * @priv: private structure
2162  * @dma_conf: structure to take the dma data
2163  * Description: according to which descriptor can be used (extend or basic)
2164  * this function allocates the resources for TX and RX paths. In case of
2165  * reception, for example, it pre-allocated the RX socket buffer in order to
2166  * allow zero-copy mechanism.
2167  */
2168 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2169                                     struct stmmac_dma_conf *dma_conf)
2170 {
2171         /* RX Allocation */
2172         int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2173
2174         if (ret)
2175                 return ret;
2176
2177         ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2178
2179         return ret;
2180 }
2181
2182 /**
2183  * free_dma_desc_resources - free dma desc resources
2184  * @priv: private structure
2185  * @dma_conf: structure to take the dma data
2186  */
2187 static void free_dma_desc_resources(struct stmmac_priv *priv,
2188                                     struct stmmac_dma_conf *dma_conf)
2189 {
2190         /* Release the DMA TX socket buffers */
2191         free_dma_tx_desc_resources(priv, dma_conf);
2192
2193         /* Release the DMA RX socket buffers later
2194          * to ensure all pending XDP_TX buffers are returned.
2195          */
2196         free_dma_rx_desc_resources(priv, dma_conf);
2197 }
2198
2199 /**
2200  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2201  *  @priv: driver private structure
2202  *  Description: It is used for enabling the rx queues in the MAC
2203  */
2204 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2205 {
2206         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2207         int queue;
2208         u8 mode;
2209
2210         for (queue = 0; queue < rx_queues_count; queue++) {
2211                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2212                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2213         }
2214 }
2215
2216 /**
2217  * stmmac_start_rx_dma - start RX DMA channel
2218  * @priv: driver private structure
2219  * @chan: RX channel index
2220  * Description:
2221  * This starts a RX DMA channel
2222  */
2223 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2224 {
2225         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2226         stmmac_start_rx(priv, priv->ioaddr, chan);
2227 }
2228
2229 /**
2230  * stmmac_start_tx_dma - start TX DMA channel
2231  * @priv: driver private structure
2232  * @chan: TX channel index
2233  * Description:
2234  * This starts a TX DMA channel
2235  */
2236 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2237 {
2238         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2239         stmmac_start_tx(priv, priv->ioaddr, chan);
2240 }
2241
2242 /**
2243  * stmmac_stop_rx_dma - stop RX DMA channel
2244  * @priv: driver private structure
2245  * @chan: RX channel index
2246  * Description:
2247  * This stops a RX DMA channel
2248  */
2249 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2250 {
2251         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2252         stmmac_stop_rx(priv, priv->ioaddr, chan);
2253 }
2254
2255 /**
2256  * stmmac_stop_tx_dma - stop TX DMA channel
2257  * @priv: driver private structure
2258  * @chan: TX channel index
2259  * Description:
2260  * This stops a TX DMA channel
2261  */
2262 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2263 {
2264         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2265         stmmac_stop_tx(priv, priv->ioaddr, chan);
2266 }
2267
2268 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2269 {
2270         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2271         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2272         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2273         u32 chan;
2274
2275         for (chan = 0; chan < dma_csr_ch; chan++) {
2276                 struct stmmac_channel *ch = &priv->channel[chan];
2277                 unsigned long flags;
2278
2279                 spin_lock_irqsave(&ch->lock, flags);
2280                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2281                 spin_unlock_irqrestore(&ch->lock, flags);
2282         }
2283 }
2284
2285 /**
2286  * stmmac_start_all_dma - start all RX and TX DMA channels
2287  * @priv: driver private structure
2288  * Description:
2289  * This starts all the RX and TX DMA channels
2290  */
2291 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2292 {
2293         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2294         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2295         u32 chan = 0;
2296
2297         for (chan = 0; chan < rx_channels_count; chan++)
2298                 stmmac_start_rx_dma(priv, chan);
2299
2300         for (chan = 0; chan < tx_channels_count; chan++)
2301                 stmmac_start_tx_dma(priv, chan);
2302 }
2303
2304 /**
2305  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2306  * @priv: driver private structure
2307  * Description:
2308  * This stops the RX and TX DMA channels
2309  */
2310 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2311 {
2312         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2313         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2314         u32 chan = 0;
2315
2316         for (chan = 0; chan < rx_channels_count; chan++)
2317                 stmmac_stop_rx_dma(priv, chan);
2318
2319         for (chan = 0; chan < tx_channels_count; chan++)
2320                 stmmac_stop_tx_dma(priv, chan);
2321 }
2322
2323 /**
2324  *  stmmac_dma_operation_mode - HW DMA operation mode
2325  *  @priv: driver private structure
2326  *  Description: it is used for configuring the DMA operation mode register in
2327  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2328  */
2329 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2330 {
2331         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2332         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2333         int rxfifosz = priv->plat->rx_fifo_size;
2334         int txfifosz = priv->plat->tx_fifo_size;
2335         u32 txmode = 0;
2336         u32 rxmode = 0;
2337         u32 chan = 0;
2338         u8 qmode = 0;
2339
2340         if (rxfifosz == 0)
2341                 rxfifosz = priv->dma_cap.rx_fifo_size;
2342         if (txfifosz == 0)
2343                 txfifosz = priv->dma_cap.tx_fifo_size;
2344
2345         /* Adjust for real per queue fifo size */
2346         rxfifosz /= rx_channels_count;
2347         txfifosz /= tx_channels_count;
2348
2349         if (priv->plat->force_thresh_dma_mode) {
2350                 txmode = tc;
2351                 rxmode = tc;
2352         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2353                 /*
2354                  * In case of GMAC, SF mode can be enabled
2355                  * to perform the TX COE in HW. This depends on:
2356                  * 1) TX COE if actually supported
2357                  * 2) There is no bugged Jumbo frame support
2358                  *    that needs to not insert csum in the TDES.
2359                  */
2360                 txmode = SF_DMA_MODE;
2361                 rxmode = SF_DMA_MODE;
2362                 priv->xstats.threshold = SF_DMA_MODE;
2363         } else {
2364                 txmode = tc;
2365                 rxmode = SF_DMA_MODE;
2366         }
2367
2368         /* configure all channels */
2369         for (chan = 0; chan < rx_channels_count; chan++) {
2370                 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2371                 u32 buf_size;
2372
2373                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2374
2375                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2376                                 rxfifosz, qmode);
2377
2378                 if (rx_q->xsk_pool) {
2379                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2380                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2381                                               buf_size,
2382                                               chan);
2383                 } else {
2384                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2385                                               priv->dma_conf.dma_buf_sz,
2386                                               chan);
2387                 }
2388         }
2389
2390         for (chan = 0; chan < tx_channels_count; chan++) {
2391                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2392
2393                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2394                                 txfifosz, qmode);
2395         }
2396 }
2397
2398 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2399 {
2400         struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2401         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2402         struct xsk_buff_pool *pool = tx_q->xsk_pool;
2403         unsigned int entry = tx_q->cur_tx;
2404         struct dma_desc *tx_desc = NULL;
2405         struct xdp_desc xdp_desc;
2406         bool work_done = true;
2407
2408         /* Avoids TX time-out as we are sharing with slow path */
2409         txq_trans_cond_update(nq);
2410
2411         budget = min(budget, stmmac_tx_avail(priv, queue));
2412
2413         while (budget-- > 0) {
2414                 dma_addr_t dma_addr;
2415                 bool set_ic;
2416
2417                 /* We are sharing with slow path and stop XSK TX desc submission when
2418                  * available TX ring is less than threshold.
2419                  */
2420                 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2421                     !netif_carrier_ok(priv->dev)) {
2422                         work_done = false;
2423                         break;
2424                 }
2425
2426                 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2427                         break;
2428
2429                 if (likely(priv->extend_desc))
2430                         tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2431                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2432                         tx_desc = &tx_q->dma_entx[entry].basic;
2433                 else
2434                         tx_desc = tx_q->dma_tx + entry;
2435
2436                 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2437                 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2438
2439                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2440
2441                 /* To return XDP buffer to XSK pool, we simple call
2442                  * xsk_tx_completed(), so we don't need to fill up
2443                  * 'buf' and 'xdpf'.
2444                  */
2445                 tx_q->tx_skbuff_dma[entry].buf = 0;
2446                 tx_q->xdpf[entry] = NULL;
2447
2448                 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2449                 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2450                 tx_q->tx_skbuff_dma[entry].last_segment = true;
2451                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2452
2453                 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2454
2455                 tx_q->tx_count_frames++;
2456
2457                 if (!priv->tx_coal_frames[queue])
2458                         set_ic = false;
2459                 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2460                         set_ic = true;
2461                 else
2462                         set_ic = false;
2463
2464                 if (set_ic) {
2465                         tx_q->tx_count_frames = 0;
2466                         stmmac_set_tx_ic(priv, tx_desc);
2467                         priv->xstats.tx_set_ic_bit++;
2468                 }
2469
2470                 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2471                                        true, priv->mode, true, true,
2472                                        xdp_desc.len);
2473
2474                 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2475
2476                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2477                 entry = tx_q->cur_tx;
2478         }
2479
2480         if (tx_desc) {
2481                 stmmac_flush_tx_descriptors(priv, queue);
2482                 xsk_tx_release(pool);
2483         }
2484
2485         /* Return true if all of the 3 conditions are met
2486          *  a) TX Budget is still available
2487          *  b) work_done = true when XSK TX desc peek is empty (no more
2488          *     pending XSK TX for transmission)
2489          */
2490         return !!budget && work_done;
2491 }
2492
2493 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2494 {
2495         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2496                 tc += 64;
2497
2498                 if (priv->plat->force_thresh_dma_mode)
2499                         stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2500                 else
2501                         stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2502                                                       chan);
2503
2504                 priv->xstats.threshold = tc;
2505         }
2506 }
2507
2508 /**
2509  * stmmac_tx_clean - to manage the transmission completion
2510  * @priv: driver private structure
2511  * @budget: napi budget limiting this functions packet handling
2512  * @queue: TX queue index
2513  * Description: it reclaims the transmit resources after transmission completes.
2514  */
2515 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2516 {
2517         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2518         unsigned int bytes_compl = 0, pkts_compl = 0;
2519         unsigned int entry, xmits = 0, count = 0;
2520
2521         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2522
2523         priv->xstats.tx_clean++;
2524
2525         tx_q->xsk_frames_done = 0;
2526
2527         entry = tx_q->dirty_tx;
2528
2529         /* Try to clean all TX complete frame in 1 shot */
2530         while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2531                 struct xdp_frame *xdpf;
2532                 struct sk_buff *skb;
2533                 struct dma_desc *p;
2534                 int status;
2535
2536                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2537                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2538                         xdpf = tx_q->xdpf[entry];
2539                         skb = NULL;
2540                 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2541                         xdpf = NULL;
2542                         skb = tx_q->tx_skbuff[entry];
2543                 } else {
2544                         xdpf = NULL;
2545                         skb = NULL;
2546                 }
2547
2548                 if (priv->extend_desc)
2549                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
2550                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2551                         p = &tx_q->dma_entx[entry].basic;
2552                 else
2553                         p = tx_q->dma_tx + entry;
2554
2555                 status = stmmac_tx_status(priv, &priv->dev->stats,
2556                                 &priv->xstats, p, priv->ioaddr);
2557                 /* Check if the descriptor is owned by the DMA */
2558                 if (unlikely(status & tx_dma_own))
2559                         break;
2560
2561                 count++;
2562
2563                 /* Make sure descriptor fields are read after reading
2564                  * the own bit.
2565                  */
2566                 dma_rmb();
2567
2568                 /* Just consider the last segment and ...*/
2569                 if (likely(!(status & tx_not_ls))) {
2570                         /* ... verify the status error condition */
2571                         if (unlikely(status & tx_err)) {
2572                                 priv->dev->stats.tx_errors++;
2573                                 if (unlikely(status & tx_err_bump_tc))
2574                                         stmmac_bump_dma_threshold(priv, queue);
2575                         } else {
2576                                 priv->dev->stats.tx_packets++;
2577                                 priv->xstats.tx_pkt_n++;
2578                                 priv->xstats.txq_stats[queue].tx_pkt_n++;
2579                         }
2580                         if (skb)
2581                                 stmmac_get_tx_hwtstamp(priv, p, skb);
2582                 }
2583
2584                 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2585                            tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2586                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
2587                                 dma_unmap_page(priv->device,
2588                                                tx_q->tx_skbuff_dma[entry].buf,
2589                                                tx_q->tx_skbuff_dma[entry].len,
2590                                                DMA_TO_DEVICE);
2591                         else
2592                                 dma_unmap_single(priv->device,
2593                                                  tx_q->tx_skbuff_dma[entry].buf,
2594                                                  tx_q->tx_skbuff_dma[entry].len,
2595                                                  DMA_TO_DEVICE);
2596                         tx_q->tx_skbuff_dma[entry].buf = 0;
2597                         tx_q->tx_skbuff_dma[entry].len = 0;
2598                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
2599                 }
2600
2601                 stmmac_clean_desc3(priv, tx_q, p);
2602
2603                 tx_q->tx_skbuff_dma[entry].last_segment = false;
2604                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2605
2606                 if (xdpf &&
2607                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2608                         xdp_return_frame_rx_napi(xdpf);
2609                         tx_q->xdpf[entry] = NULL;
2610                 }
2611
2612                 if (xdpf &&
2613                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2614                         xdp_return_frame(xdpf);
2615                         tx_q->xdpf[entry] = NULL;
2616                 }
2617
2618                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2619                         tx_q->xsk_frames_done++;
2620
2621                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2622                         if (likely(skb)) {
2623                                 pkts_compl++;
2624                                 bytes_compl += skb->len;
2625                                 dev_consume_skb_any(skb);
2626                                 tx_q->tx_skbuff[entry] = NULL;
2627                         }
2628                 }
2629
2630                 stmmac_release_tx_desc(priv, p, priv->mode);
2631
2632                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2633         }
2634         tx_q->dirty_tx = entry;
2635
2636         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2637                                   pkts_compl, bytes_compl);
2638
2639         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2640                                                                 queue))) &&
2641             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2642
2643                 netif_dbg(priv, tx_done, priv->dev,
2644                           "%s: restart transmit\n", __func__);
2645                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2646         }
2647
2648         if (tx_q->xsk_pool) {
2649                 bool work_done;
2650
2651                 if (tx_q->xsk_frames_done)
2652                         xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2653
2654                 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2655                         xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2656
2657                 /* For XSK TX, we try to send as many as possible.
2658                  * If XSK work done (XSK TX desc empty and budget still
2659                  * available), return "budget - 1" to reenable TX IRQ.
2660                  * Else, return "budget" to make NAPI continue polling.
2661                  */
2662                 work_done = stmmac_xdp_xmit_zc(priv, queue,
2663                                                STMMAC_XSK_TX_BUDGET_MAX);
2664                 if (work_done)
2665                         xmits = budget - 1;
2666                 else
2667                         xmits = budget;
2668         }
2669
2670         if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2671             priv->eee_sw_timer_en) {
2672                 if (stmmac_enable_eee_mode(priv))
2673                         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2674         }
2675
2676         /* We still have pending packets, let's call for a new scheduling */
2677         if (tx_q->dirty_tx != tx_q->cur_tx)
2678                 hrtimer_start(&tx_q->txtimer,
2679                               STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2680                               HRTIMER_MODE_REL);
2681
2682         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2683
2684         /* Combine decisions from TX clean and XSK TX */
2685         return max(count, xmits);
2686 }
2687
2688 /**
2689  * stmmac_tx_err - to manage the tx error
2690  * @priv: driver private structure
2691  * @chan: channel index
2692  * Description: it cleans the descriptors and restarts the transmission
2693  * in case of transmission errors.
2694  */
2695 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2696 {
2697         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2698
2699         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2700
2701         stmmac_stop_tx_dma(priv, chan);
2702         dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2703         stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2704         stmmac_reset_tx_queue(priv, chan);
2705         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2706                             tx_q->dma_tx_phy, chan);
2707         stmmac_start_tx_dma(priv, chan);
2708
2709         priv->dev->stats.tx_errors++;
2710         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2711 }
2712
2713 /**
2714  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2715  *  @priv: driver private structure
2716  *  @txmode: TX operating mode
2717  *  @rxmode: RX operating mode
2718  *  @chan: channel index
2719  *  Description: it is used for configuring of the DMA operation mode in
2720  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2721  *  mode.
2722  */
2723 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2724                                           u32 rxmode, u32 chan)
2725 {
2726         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2727         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2728         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2729         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2730         int rxfifosz = priv->plat->rx_fifo_size;
2731         int txfifosz = priv->plat->tx_fifo_size;
2732
2733         if (rxfifosz == 0)
2734                 rxfifosz = priv->dma_cap.rx_fifo_size;
2735         if (txfifosz == 0)
2736                 txfifosz = priv->dma_cap.tx_fifo_size;
2737
2738         /* Adjust for real per queue fifo size */
2739         rxfifosz /= rx_channels_count;
2740         txfifosz /= tx_channels_count;
2741
2742         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2743         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2744 }
2745
2746 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2747 {
2748         int ret;
2749
2750         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2751                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2752         if (ret && (ret != -EINVAL)) {
2753                 stmmac_global_err(priv);
2754                 return true;
2755         }
2756
2757         return false;
2758 }
2759
2760 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2761 {
2762         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2763                                                  &priv->xstats, chan, dir);
2764         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2765         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2766         struct stmmac_channel *ch = &priv->channel[chan];
2767         struct napi_struct *rx_napi;
2768         struct napi_struct *tx_napi;
2769         unsigned long flags;
2770
2771         rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2772         tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2773
2774         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2775                 if (napi_schedule_prep(rx_napi)) {
2776                         spin_lock_irqsave(&ch->lock, flags);
2777                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2778                         spin_unlock_irqrestore(&ch->lock, flags);
2779                         __napi_schedule(rx_napi);
2780                 }
2781         }
2782
2783         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2784                 if (napi_schedule_prep(tx_napi)) {
2785                         spin_lock_irqsave(&ch->lock, flags);
2786                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2787                         spin_unlock_irqrestore(&ch->lock, flags);
2788                         __napi_schedule(tx_napi);
2789                 }
2790         }
2791
2792         return status;
2793 }
2794
2795 /**
2796  * stmmac_dma_interrupt - DMA ISR
2797  * @priv: driver private structure
2798  * Description: this is the DMA ISR. It is called by the main ISR.
2799  * It calls the dwmac dma routine and schedule poll method in case of some
2800  * work can be done.
2801  */
2802 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2803 {
2804         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2805         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2806         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2807                                 tx_channel_count : rx_channel_count;
2808         u32 chan;
2809         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2810
2811         /* Make sure we never check beyond our status buffer. */
2812         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2813                 channels_to_check = ARRAY_SIZE(status);
2814
2815         for (chan = 0; chan < channels_to_check; chan++)
2816                 status[chan] = stmmac_napi_check(priv, chan,
2817                                                  DMA_DIR_RXTX);
2818
2819         for (chan = 0; chan < tx_channel_count; chan++) {
2820                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2821                         /* Try to bump up the dma threshold on this failure */
2822                         stmmac_bump_dma_threshold(priv, chan);
2823                 } else if (unlikely(status[chan] == tx_hard_error)) {
2824                         stmmac_tx_err(priv, chan);
2825                 }
2826         }
2827 }
2828
2829 /**
2830  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2831  * @priv: driver private structure
2832  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2833  */
2834 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2835 {
2836         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2837                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2838
2839         stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2840
2841         if (priv->dma_cap.rmon) {
2842                 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2843                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2844         } else
2845                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2846 }
2847
2848 /**
2849  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2850  * @priv: driver private structure
2851  * Description:
2852  *  new GMAC chip generations have a new register to indicate the
2853  *  presence of the optional feature/functions.
2854  *  This can be also used to override the value passed through the
2855  *  platform and necessary for old MAC10/100 and GMAC chips.
2856  */
2857 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2858 {
2859         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2860 }
2861
2862 /**
2863  * stmmac_check_ether_addr - check if the MAC addr is valid
2864  * @priv: driver private structure
2865  * Description:
2866  * it is to verify if the MAC address is valid, in case of failures it
2867  * generates a random MAC address
2868  */
2869 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2870 {
2871         u8 addr[ETH_ALEN];
2872
2873         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2874                 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2875                 if (is_valid_ether_addr(addr))
2876                         eth_hw_addr_set(priv->dev, addr);
2877                 else
2878                         eth_hw_addr_random(priv->dev);
2879                 dev_info(priv->device, "device MAC address %pM\n",
2880                          priv->dev->dev_addr);
2881         }
2882 }
2883
2884 /**
2885  * stmmac_init_dma_engine - DMA init.
2886  * @priv: driver private structure
2887  * Description:
2888  * It inits the DMA invoking the specific MAC/GMAC callback.
2889  * Some DMA parameters can be passed from the platform;
2890  * in case of these are not passed a default is kept for the MAC or GMAC.
2891  */
2892 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2893 {
2894         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2895         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2896         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2897         struct stmmac_rx_queue *rx_q;
2898         struct stmmac_tx_queue *tx_q;
2899         u32 chan = 0;
2900         int atds = 0;
2901         int ret = 0;
2902
2903         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2904                 dev_err(priv->device, "Invalid DMA configuration\n");
2905                 return -EINVAL;
2906         }
2907
2908         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2909                 atds = 1;
2910
2911         ret = stmmac_reset(priv, priv->ioaddr);
2912         if (ret) {
2913                 dev_err(priv->device, "Failed to reset the dma\n");
2914                 return ret;
2915         }
2916
2917         /* DMA Configuration */
2918         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2919
2920         if (priv->plat->axi)
2921                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2922
2923         /* DMA CSR Channel configuration */
2924         for (chan = 0; chan < dma_csr_ch; chan++) {
2925                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2926                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2927         }
2928
2929         /* DMA RX Channel Configuration */
2930         for (chan = 0; chan < rx_channels_count; chan++) {
2931                 rx_q = &priv->dma_conf.rx_queue[chan];
2932
2933                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2934                                     rx_q->dma_rx_phy, chan);
2935
2936                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2937                                      (rx_q->buf_alloc_num *
2938                                       sizeof(struct dma_desc));
2939                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2940                                        rx_q->rx_tail_addr, chan);
2941         }
2942
2943         /* DMA TX Channel Configuration */
2944         for (chan = 0; chan < tx_channels_count; chan++) {
2945                 tx_q = &priv->dma_conf.tx_queue[chan];
2946
2947                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2948                                     tx_q->dma_tx_phy, chan);
2949
2950                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2951                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2952                                        tx_q->tx_tail_addr, chan);
2953         }
2954
2955         return ret;
2956 }
2957
2958 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2959 {
2960         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2961
2962         hrtimer_start(&tx_q->txtimer,
2963                       STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2964                       HRTIMER_MODE_REL);
2965 }
2966
2967 /**
2968  * stmmac_tx_timer - mitigation sw timer for tx.
2969  * @t: data pointer
2970  * Description:
2971  * This is the timer handler to directly invoke the stmmac_tx_clean.
2972  */
2973 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2974 {
2975         struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2976         struct stmmac_priv *priv = tx_q->priv_data;
2977         struct stmmac_channel *ch;
2978         struct napi_struct *napi;
2979
2980         ch = &priv->channel[tx_q->queue_index];
2981         napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2982
2983         if (likely(napi_schedule_prep(napi))) {
2984                 unsigned long flags;
2985
2986                 spin_lock_irqsave(&ch->lock, flags);
2987                 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2988                 spin_unlock_irqrestore(&ch->lock, flags);
2989                 __napi_schedule(napi);
2990         }
2991
2992         return HRTIMER_NORESTART;
2993 }
2994
2995 /**
2996  * stmmac_init_coalesce - init mitigation options.
2997  * @priv: driver private structure
2998  * Description:
2999  * This inits the coalesce parameters: i.e. timer rate,
3000  * timer handler and default threshold used for enabling the
3001  * interrupt on completion bit.
3002  */
3003 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3004 {
3005         u32 tx_channel_count = priv->plat->tx_queues_to_use;
3006         u32 rx_channel_count = priv->plat->rx_queues_to_use;
3007         u32 chan;
3008
3009         for (chan = 0; chan < tx_channel_count; chan++) {
3010                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3011
3012                 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3013                 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3014
3015                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3016                 tx_q->txtimer.function = stmmac_tx_timer;
3017         }
3018
3019         for (chan = 0; chan < rx_channel_count; chan++)
3020                 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3021 }
3022
3023 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3024 {
3025         u32 rx_channels_count = priv->plat->rx_queues_to_use;
3026         u32 tx_channels_count = priv->plat->tx_queues_to_use;
3027         u32 chan;
3028
3029         /* set TX ring length */
3030         for (chan = 0; chan < tx_channels_count; chan++)
3031                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3032                                        (priv->dma_conf.dma_tx_size - 1), chan);
3033
3034         /* set RX ring length */
3035         for (chan = 0; chan < rx_channels_count; chan++)
3036                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3037                                        (priv->dma_conf.dma_rx_size - 1), chan);
3038 }
3039
3040 /**
3041  *  stmmac_set_tx_queue_weight - Set TX queue weight
3042  *  @priv: driver private structure
3043  *  Description: It is used for setting TX queues weight
3044  */
3045 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3046 {
3047         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3048         u32 weight;
3049         u32 queue;
3050
3051         for (queue = 0; queue < tx_queues_count; queue++) {
3052                 weight = priv->plat->tx_queues_cfg[queue].weight;
3053                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3054         }
3055 }
3056
3057 /**
3058  *  stmmac_configure_cbs - Configure CBS in TX queue
3059  *  @priv: driver private structure
3060  *  Description: It is used for configuring CBS in AVB TX queues
3061  */
3062 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3063 {
3064         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3065         u32 mode_to_use;
3066         u32 queue;
3067
3068         /* queue 0 is reserved for legacy traffic */
3069         for (queue = 1; queue < tx_queues_count; queue++) {
3070                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3071                 if (mode_to_use == MTL_QUEUE_DCB)
3072                         continue;
3073
3074                 stmmac_config_cbs(priv, priv->hw,
3075                                 priv->plat->tx_queues_cfg[queue].send_slope,
3076                                 priv->plat->tx_queues_cfg[queue].idle_slope,
3077                                 priv->plat->tx_queues_cfg[queue].high_credit,
3078                                 priv->plat->tx_queues_cfg[queue].low_credit,
3079                                 queue);
3080         }
3081 }
3082
3083 /**
3084  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3085  *  @priv: driver private structure
3086  *  Description: It is used for mapping RX queues to RX dma channels
3087  */
3088 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3089 {
3090         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3091         u32 queue;
3092         u32 chan;
3093
3094         for (queue = 0; queue < rx_queues_count; queue++) {
3095                 chan = priv->plat->rx_queues_cfg[queue].chan;
3096                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3097         }
3098 }
3099
3100 /**
3101  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3102  *  @priv: driver private structure
3103  *  Description: It is used for configuring the RX Queue Priority
3104  */
3105 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3106 {
3107         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3108         u32 queue;
3109         u32 prio;
3110
3111         for (queue = 0; queue < rx_queues_count; queue++) {
3112                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3113                         continue;
3114
3115                 prio = priv->plat->rx_queues_cfg[queue].prio;
3116                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3117         }
3118 }
3119
3120 /**
3121  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3122  *  @priv: driver private structure
3123  *  Description: It is used for configuring the TX Queue Priority
3124  */
3125 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3126 {
3127         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3128         u32 queue;
3129         u32 prio;
3130
3131         for (queue = 0; queue < tx_queues_count; queue++) {
3132                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3133                         continue;
3134
3135                 prio = priv->plat->tx_queues_cfg[queue].prio;
3136                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3137         }
3138 }
3139
3140 /**
3141  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3142  *  @priv: driver private structure
3143  *  Description: It is used for configuring the RX queue routing
3144  */
3145 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3146 {
3147         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3148         u32 queue;
3149         u8 packet;
3150
3151         for (queue = 0; queue < rx_queues_count; queue++) {
3152                 /* no specific packet type routing specified for the queue */
3153                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3154                         continue;
3155
3156                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3157                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3158         }
3159 }
3160
3161 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3162 {
3163         if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3164                 priv->rss.enable = false;
3165                 return;
3166         }
3167
3168         if (priv->dev->features & NETIF_F_RXHASH)
3169                 priv->rss.enable = true;
3170         else
3171                 priv->rss.enable = false;
3172
3173         stmmac_rss_configure(priv, priv->hw, &priv->rss,
3174                              priv->plat->rx_queues_to_use);
3175 }
3176
3177 /**
3178  *  stmmac_mtl_configuration - Configure MTL
3179  *  @priv: driver private structure
3180  *  Description: It is used for configurring MTL
3181  */
3182 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3183 {
3184         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3185         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3186
3187         if (tx_queues_count > 1)
3188                 stmmac_set_tx_queue_weight(priv);
3189
3190         /* Configure MTL RX algorithms */
3191         if (rx_queues_count > 1)
3192                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3193                                 priv->plat->rx_sched_algorithm);
3194
3195         /* Configure MTL TX algorithms */
3196         if (tx_queues_count > 1)
3197                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3198                                 priv->plat->tx_sched_algorithm);
3199
3200         /* Configure CBS in AVB TX queues */
3201         if (tx_queues_count > 1)
3202                 stmmac_configure_cbs(priv);
3203
3204         /* Map RX MTL to DMA channels */
3205         stmmac_rx_queue_dma_chan_map(priv);
3206
3207         /* Enable MAC RX Queues */
3208         stmmac_mac_enable_rx_queues(priv);
3209
3210         /* Set RX priorities */
3211         if (rx_queues_count > 1)
3212                 stmmac_mac_config_rx_queues_prio(priv);
3213
3214         /* Set TX priorities */
3215         if (tx_queues_count > 1)
3216                 stmmac_mac_config_tx_queues_prio(priv);
3217
3218         /* Set RX routing */
3219         if (rx_queues_count > 1)
3220                 stmmac_mac_config_rx_queues_routing(priv);
3221
3222         /* Receive Side Scaling */
3223         if (rx_queues_count > 1)
3224                 stmmac_mac_config_rss(priv);
3225 }
3226
3227 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3228 {
3229         if (priv->dma_cap.asp) {
3230                 netdev_info(priv->dev, "Enabling Safety Features\n");
3231                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3232                                           priv->plat->safety_feat_cfg);
3233         } else {
3234                 netdev_info(priv->dev, "No Safety Features support found\n");
3235         }
3236 }
3237
3238 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3239 {
3240         char *name;
3241
3242         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3243         clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3244
3245         name = priv->wq_name;
3246         sprintf(name, "%s-fpe", priv->dev->name);
3247
3248         priv->fpe_wq = create_singlethread_workqueue(name);
3249         if (!priv->fpe_wq) {
3250                 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3251
3252                 return -ENOMEM;
3253         }
3254         netdev_info(priv->dev, "FPE workqueue start");
3255
3256         return 0;
3257 }
3258
3259 /**
3260  * stmmac_hw_setup - setup mac in a usable state.
3261  *  @dev : pointer to the device structure.
3262  *  @ptp_register: register PTP if set
3263  *  Description:
3264  *  this is the main function to setup the HW in a usable state because the
3265  *  dma engine is reset, the core registers are configured (e.g. AXI,
3266  *  Checksum features, timers). The DMA is ready to start receiving and
3267  *  transmitting.
3268  *  Return value:
3269  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3270  *  file on failure.
3271  */
3272 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3273 {
3274         struct stmmac_priv *priv = netdev_priv(dev);
3275         u32 rx_cnt = priv->plat->rx_queues_to_use;
3276         u32 tx_cnt = priv->plat->tx_queues_to_use;
3277         bool sph_en;
3278         u32 chan;
3279         int ret;
3280
3281         /* DMA initialization and SW reset */
3282         ret = stmmac_init_dma_engine(priv);
3283         if (ret < 0) {
3284                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3285                            __func__);
3286                 return ret;
3287         }
3288
3289         /* Copy the MAC addr into the HW  */
3290         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3291
3292         /* PS and related bits will be programmed according to the speed */
3293         if (priv->hw->pcs) {
3294                 int speed = priv->plat->mac_port_sel_speed;
3295
3296                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3297                     (speed == SPEED_1000)) {
3298                         priv->hw->ps = speed;
3299                 } else {
3300                         dev_warn(priv->device, "invalid port speed\n");
3301                         priv->hw->ps = 0;
3302                 }
3303         }
3304
3305         /* Initialize the MAC Core */
3306         stmmac_core_init(priv, priv->hw, dev);
3307
3308         /* Initialize MTL*/
3309         stmmac_mtl_configuration(priv);
3310
3311         /* Initialize Safety Features */
3312         stmmac_safety_feat_configuration(priv);
3313
3314         ret = stmmac_rx_ipc(priv, priv->hw);
3315         if (!ret) {
3316                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3317                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3318                 priv->hw->rx_csum = 0;
3319         }
3320
3321         /* Enable the MAC Rx/Tx */
3322         stmmac_mac_set(priv, priv->ioaddr, true);
3323
3324         /* Set the HW DMA mode and the COE */
3325         stmmac_dma_operation_mode(priv);
3326
3327         stmmac_mmc_setup(priv);
3328
3329         if (ptp_register) {
3330                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3331                 if (ret < 0)
3332                         netdev_warn(priv->dev,
3333                                     "failed to enable PTP reference clock: %pe\n",
3334                                     ERR_PTR(ret));
3335         }
3336
3337         ret = stmmac_init_ptp(priv);
3338         if (ret == -EOPNOTSUPP)
3339                 netdev_info(priv->dev, "PTP not supported by HW\n");
3340         else if (ret)
3341                 netdev_warn(priv->dev, "PTP init failed\n");
3342         else if (ptp_register)
3343                 stmmac_ptp_register(priv);
3344
3345         priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3346
3347         /* Convert the timer from msec to usec */
3348         if (!priv->tx_lpi_timer)
3349                 priv->tx_lpi_timer = eee_timer * 1000;
3350
3351         if (priv->use_riwt) {
3352                 u32 queue;
3353
3354                 for (queue = 0; queue < rx_cnt; queue++) {
3355                         if (!priv->rx_riwt[queue])
3356                                 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3357
3358                         stmmac_rx_watchdog(priv, priv->ioaddr,
3359                                            priv->rx_riwt[queue], queue);
3360                 }
3361         }
3362
3363         if (priv->hw->pcs)
3364                 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3365
3366         /* set TX and RX rings length */
3367         stmmac_set_rings_length(priv);
3368
3369         /* Enable TSO */
3370         if (priv->tso) {
3371                 for (chan = 0; chan < tx_cnt; chan++) {
3372                         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3373
3374                         /* TSO and TBS cannot co-exist */
3375                         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3376                                 continue;
3377
3378                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3379                 }
3380         }
3381
3382         /* Enable Split Header */
3383         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3384         for (chan = 0; chan < rx_cnt; chan++)
3385                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3386
3387
3388         /* VLAN Tag Insertion */
3389         if (priv->dma_cap.vlins)
3390                 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3391
3392         /* TBS */
3393         for (chan = 0; chan < tx_cnt; chan++) {
3394                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3395                 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3396
3397                 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3398         }
3399
3400         /* Configure real RX and TX queues */
3401         netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3402         netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3403
3404         /* Start the ball rolling... */
3405         stmmac_start_all_dma(priv);
3406
3407         if (priv->dma_cap.fpesel) {
3408                 stmmac_fpe_start_wq(priv);
3409
3410                 if (priv->plat->fpe_cfg->enable)
3411                         stmmac_fpe_handshake(priv, true);
3412         }
3413
3414         return 0;
3415 }
3416
3417 static void stmmac_hw_teardown(struct net_device *dev)
3418 {
3419         struct stmmac_priv *priv = netdev_priv(dev);
3420
3421         clk_disable_unprepare(priv->plat->clk_ptp_ref);
3422 }
3423
3424 static void stmmac_free_irq(struct net_device *dev,
3425                             enum request_irq_err irq_err, int irq_idx)
3426 {
3427         struct stmmac_priv *priv = netdev_priv(dev);
3428         int j;
3429
3430         switch (irq_err) {
3431         case REQ_IRQ_ERR_ALL:
3432                 irq_idx = priv->plat->tx_queues_to_use;
3433                 fallthrough;
3434         case REQ_IRQ_ERR_TX:
3435                 for (j = irq_idx - 1; j >= 0; j--) {
3436                         if (priv->tx_irq[j] > 0) {
3437                                 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3438                                 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3439                         }
3440                 }
3441                 irq_idx = priv->plat->rx_queues_to_use;
3442                 fallthrough;
3443         case REQ_IRQ_ERR_RX:
3444                 for (j = irq_idx - 1; j >= 0; j--) {
3445                         if (priv->rx_irq[j] > 0) {
3446                                 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3447                                 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3448                         }
3449                 }
3450
3451                 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3452                         free_irq(priv->sfty_ue_irq, dev);
3453                 fallthrough;
3454         case REQ_IRQ_ERR_SFTY_UE:
3455                 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3456                         free_irq(priv->sfty_ce_irq, dev);
3457                 fallthrough;
3458         case REQ_IRQ_ERR_SFTY_CE:
3459                 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3460                         free_irq(priv->lpi_irq, dev);
3461                 fallthrough;
3462         case REQ_IRQ_ERR_LPI:
3463                 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3464                         free_irq(priv->wol_irq, dev);
3465                 fallthrough;
3466         case REQ_IRQ_ERR_WOL:
3467                 free_irq(dev->irq, dev);
3468                 fallthrough;
3469         case REQ_IRQ_ERR_MAC:
3470         case REQ_IRQ_ERR_NO:
3471                 /* If MAC IRQ request error, no more IRQ to free */
3472                 break;
3473         }
3474 }
3475
3476 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3477 {
3478         struct stmmac_priv *priv = netdev_priv(dev);
3479         enum request_irq_err irq_err;
3480         cpumask_t cpu_mask;
3481         int irq_idx = 0;
3482         char *int_name;
3483         int ret;
3484         int i;
3485
3486         /* For common interrupt */
3487         int_name = priv->int_name_mac;
3488         sprintf(int_name, "%s:%s", dev->name, "mac");
3489         ret = request_irq(dev->irq, stmmac_mac_interrupt,
3490                           0, int_name, dev);
3491         if (unlikely(ret < 0)) {
3492                 netdev_err(priv->dev,
3493                            "%s: alloc mac MSI %d (error: %d)\n",
3494                            __func__, dev->irq, ret);
3495                 irq_err = REQ_IRQ_ERR_MAC;
3496                 goto irq_error;
3497         }
3498
3499         /* Request the Wake IRQ in case of another line
3500          * is used for WoL
3501          */
3502         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3503                 int_name = priv->int_name_wol;
3504                 sprintf(int_name, "%s:%s", dev->name, "wol");
3505                 ret = request_irq(priv->wol_irq,
3506                                   stmmac_mac_interrupt,
3507                                   0, int_name, dev);
3508                 if (unlikely(ret < 0)) {
3509                         netdev_err(priv->dev,
3510                                    "%s: alloc wol MSI %d (error: %d)\n",
3511                                    __func__, priv->wol_irq, ret);
3512                         irq_err = REQ_IRQ_ERR_WOL;
3513                         goto irq_error;
3514                 }
3515         }
3516
3517         /* Request the LPI IRQ in case of another line
3518          * is used for LPI
3519          */
3520         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3521                 int_name = priv->int_name_lpi;
3522                 sprintf(int_name, "%s:%s", dev->name, "lpi");
3523                 ret = request_irq(priv->lpi_irq,
3524                                   stmmac_mac_interrupt,
3525                                   0, int_name, dev);
3526                 if (unlikely(ret < 0)) {
3527                         netdev_err(priv->dev,
3528                                    "%s: alloc lpi MSI %d (error: %d)\n",
3529                                    __func__, priv->lpi_irq, ret);
3530                         irq_err = REQ_IRQ_ERR_LPI;
3531                         goto irq_error;
3532                 }
3533         }
3534
3535         /* Request the Safety Feature Correctible Error line in
3536          * case of another line is used
3537          */
3538         if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3539                 int_name = priv->int_name_sfty_ce;
3540                 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3541                 ret = request_irq(priv->sfty_ce_irq,
3542                                   stmmac_safety_interrupt,
3543                                   0, int_name, dev);
3544                 if (unlikely(ret < 0)) {
3545                         netdev_err(priv->dev,
3546                                    "%s: alloc sfty ce MSI %d (error: %d)\n",
3547                                    __func__, priv->sfty_ce_irq, ret);
3548                         irq_err = REQ_IRQ_ERR_SFTY_CE;
3549                         goto irq_error;
3550                 }
3551         }
3552
3553         /* Request the Safety Feature Uncorrectible Error line in
3554          * case of another line is used
3555          */
3556         if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3557                 int_name = priv->int_name_sfty_ue;
3558                 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3559                 ret = request_irq(priv->sfty_ue_irq,
3560                                   stmmac_safety_interrupt,
3561                                   0, int_name, dev);
3562                 if (unlikely(ret < 0)) {
3563                         netdev_err(priv->dev,
3564                                    "%s: alloc sfty ue MSI %d (error: %d)\n",
3565                                    __func__, priv->sfty_ue_irq, ret);
3566                         irq_err = REQ_IRQ_ERR_SFTY_UE;
3567                         goto irq_error;
3568                 }
3569         }
3570
3571         /* Request Rx MSI irq */
3572         for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3573                 if (i >= MTL_MAX_RX_QUEUES)
3574                         break;
3575                 if (priv->rx_irq[i] == 0)
3576                         continue;
3577
3578                 int_name = priv->int_name_rx_irq[i];
3579                 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3580                 ret = request_irq(priv->rx_irq[i],
3581                                   stmmac_msi_intr_rx,
3582                                   0, int_name, &priv->dma_conf.rx_queue[i]);
3583                 if (unlikely(ret < 0)) {
3584                         netdev_err(priv->dev,
3585                                    "%s: alloc rx-%d  MSI %d (error: %d)\n",
3586                                    __func__, i, priv->rx_irq[i], ret);
3587                         irq_err = REQ_IRQ_ERR_RX;
3588                         irq_idx = i;
3589                         goto irq_error;
3590                 }
3591                 cpumask_clear(&cpu_mask);
3592                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3593                 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3594         }
3595
3596         /* Request Tx MSI irq */
3597         for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3598                 if (i >= MTL_MAX_TX_QUEUES)
3599                         break;
3600                 if (priv->tx_irq[i] == 0)
3601                         continue;
3602
3603                 int_name = priv->int_name_tx_irq[i];
3604                 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3605                 ret = request_irq(priv->tx_irq[i],
3606                                   stmmac_msi_intr_tx,
3607                                   0, int_name, &priv->dma_conf.tx_queue[i]);
3608                 if (unlikely(ret < 0)) {
3609                         netdev_err(priv->dev,
3610                                    "%s: alloc tx-%d  MSI %d (error: %d)\n",
3611                                    __func__, i, priv->tx_irq[i], ret);
3612                         irq_err = REQ_IRQ_ERR_TX;
3613                         irq_idx = i;
3614                         goto irq_error;
3615                 }
3616                 cpumask_clear(&cpu_mask);
3617                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3618                 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3619         }
3620
3621         return 0;
3622
3623 irq_error:
3624         stmmac_free_irq(dev, irq_err, irq_idx);
3625         return ret;
3626 }
3627
3628 static int stmmac_request_irq_single(struct net_device *dev)
3629 {
3630         struct stmmac_priv *priv = netdev_priv(dev);
3631         enum request_irq_err irq_err;
3632         int ret;
3633
3634         ret = request_irq(dev->irq, stmmac_interrupt,
3635                           IRQF_SHARED, dev->name, dev);
3636         if (unlikely(ret < 0)) {
3637                 netdev_err(priv->dev,
3638                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3639                            __func__, dev->irq, ret);
3640                 irq_err = REQ_IRQ_ERR_MAC;
3641                 goto irq_error;
3642         }
3643
3644         /* Request the Wake IRQ in case of another line
3645          * is used for WoL
3646          */
3647         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3648                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3649                                   IRQF_SHARED, dev->name, dev);
3650                 if (unlikely(ret < 0)) {
3651                         netdev_err(priv->dev,
3652                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3653                                    __func__, priv->wol_irq, ret);
3654                         irq_err = REQ_IRQ_ERR_WOL;
3655                         goto irq_error;
3656                 }
3657         }
3658
3659         /* Request the IRQ lines */
3660         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3661                 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3662                                   IRQF_SHARED, dev->name, dev);
3663                 if (unlikely(ret < 0)) {
3664                         netdev_err(priv->dev,
3665                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3666                                    __func__, priv->lpi_irq, ret);
3667                         irq_err = REQ_IRQ_ERR_LPI;
3668                         goto irq_error;
3669                 }
3670         }
3671
3672         return 0;
3673
3674 irq_error:
3675         stmmac_free_irq(dev, irq_err, 0);
3676         return ret;
3677 }
3678
3679 static int stmmac_request_irq(struct net_device *dev)
3680 {
3681         struct stmmac_priv *priv = netdev_priv(dev);
3682         int ret;
3683
3684         /* Request the IRQ lines */
3685         if (priv->plat->multi_msi_en)
3686                 ret = stmmac_request_irq_multi_msi(dev);
3687         else
3688                 ret = stmmac_request_irq_single(dev);
3689
3690         return ret;
3691 }
3692
3693 /**
3694  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3695  *  @priv: driver private structure
3696  *  @mtu: MTU to setup the dma queue and buf with
3697  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3698  *  Allocate the Tx/Rx DMA queue and init them.
3699  *  Return value:
3700  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3701  */
3702 static struct stmmac_dma_conf *
3703 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3704 {
3705         struct stmmac_dma_conf *dma_conf;
3706         int chan, bfsize, ret;
3707
3708         dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3709         if (!dma_conf) {
3710                 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3711                            __func__);
3712                 return ERR_PTR(-ENOMEM);
3713         }
3714
3715         bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3716         if (bfsize < 0)
3717                 bfsize = 0;
3718
3719         if (bfsize < BUF_SIZE_16KiB)
3720                 bfsize = stmmac_set_bfsize(mtu, 0);
3721
3722         dma_conf->dma_buf_sz = bfsize;
3723         /* Chose the tx/rx size from the already defined one in the
3724          * priv struct. (if defined)
3725          */
3726         dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3727         dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3728
3729         if (!dma_conf->dma_tx_size)
3730                 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3731         if (!dma_conf->dma_rx_size)
3732                 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3733
3734         /* Earlier check for TBS */
3735         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3736                 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3737                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3738
3739                 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3740                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3741         }
3742
3743         ret = alloc_dma_desc_resources(priv, dma_conf);
3744         if (ret < 0) {
3745                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3746                            __func__);
3747                 goto alloc_error;
3748         }
3749
3750         ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3751         if (ret < 0) {
3752                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3753                            __func__);
3754                 goto init_error;
3755         }
3756
3757         return dma_conf;
3758
3759 init_error:
3760         free_dma_desc_resources(priv, dma_conf);
3761 alloc_error:
3762         kfree(dma_conf);
3763         return ERR_PTR(ret);
3764 }
3765
3766 /**
3767  *  __stmmac_open - open entry point of the driver
3768  *  @dev : pointer to the device structure.
3769  *  @dma_conf :  structure to take the dma data
3770  *  Description:
3771  *  This function is the open entry point of the driver.
3772  *  Return value:
3773  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3774  *  file on failure.
3775  */
3776 static int __stmmac_open(struct net_device *dev,
3777                          struct stmmac_dma_conf *dma_conf)
3778 {
3779         struct stmmac_priv *priv = netdev_priv(dev);
3780         int mode = priv->plat->phy_interface;
3781         u32 chan;
3782         int ret;
3783
3784         ret = pm_runtime_resume_and_get(priv->device);
3785         if (ret < 0)
3786                 return ret;
3787
3788         if (priv->hw->pcs != STMMAC_PCS_TBI &&
3789             priv->hw->pcs != STMMAC_PCS_RTBI &&
3790             (!priv->hw->xpcs ||
3791              xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3792                 ret = stmmac_init_phy(dev);
3793                 if (ret) {
3794                         netdev_err(priv->dev,
3795                                    "%s: Cannot attach to PHY (error: %d)\n",
3796                                    __func__, ret);
3797                         goto init_phy_error;
3798                 }
3799         }
3800
3801         /* Extra statistics */
3802         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3803         priv->xstats.threshold = tc;
3804
3805         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3806
3807         buf_sz = dma_conf->dma_buf_sz;
3808         memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3809
3810         stmmac_reset_queues_param(priv);
3811
3812         if (priv->plat->serdes_powerup) {
3813                 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3814                 if (ret < 0) {
3815                         netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3816                                    __func__);
3817                         goto init_error;
3818                 }
3819         }
3820
3821         ret = stmmac_hw_setup(dev, true);
3822         if (ret < 0) {
3823                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3824                 goto init_error;
3825         }
3826
3827         stmmac_init_coalesce(priv);
3828
3829         phylink_start(priv->phylink);
3830         /* We may have called phylink_speed_down before */
3831         phylink_speed_up(priv->phylink);
3832
3833         ret = stmmac_request_irq(dev);
3834         if (ret)
3835                 goto irq_error;
3836
3837         stmmac_enable_all_queues(priv);
3838         netif_tx_start_all_queues(priv->dev);
3839         stmmac_enable_all_dma_irq(priv);
3840
3841         return 0;
3842
3843 irq_error:
3844         phylink_stop(priv->phylink);
3845
3846         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3847                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3848
3849         stmmac_hw_teardown(dev);
3850 init_error:
3851         free_dma_desc_resources(priv, &priv->dma_conf);
3852         phylink_disconnect_phy(priv->phylink);
3853 init_phy_error:
3854         pm_runtime_put(priv->device);
3855         return ret;
3856 }
3857
3858 static int stmmac_open(struct net_device *dev)
3859 {
3860         struct stmmac_priv *priv = netdev_priv(dev);
3861         struct stmmac_dma_conf *dma_conf;
3862         int ret;
3863
3864         dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3865         if (IS_ERR(dma_conf))
3866                 return PTR_ERR(dma_conf);
3867
3868         ret = __stmmac_open(dev, dma_conf);
3869         kfree(dma_conf);
3870         return ret;
3871 }
3872
3873 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3874 {
3875         set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3876
3877         if (priv->fpe_wq)
3878                 destroy_workqueue(priv->fpe_wq);
3879
3880         netdev_info(priv->dev, "FPE workqueue stop");
3881 }
3882
3883 /**
3884  *  stmmac_release - close entry point of the driver
3885  *  @dev : device pointer.
3886  *  Description:
3887  *  This is the stop entry point of the driver.
3888  */
3889 static int stmmac_release(struct net_device *dev)
3890 {
3891         struct stmmac_priv *priv = netdev_priv(dev);
3892         u32 chan;
3893
3894         if (device_may_wakeup(priv->device))
3895                 phylink_speed_down(priv->phylink, false);
3896         /* Stop and disconnect the PHY */
3897         phylink_stop(priv->phylink);
3898         phylink_disconnect_phy(priv->phylink);
3899
3900         stmmac_disable_all_queues(priv);
3901
3902         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3903                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3904
3905         netif_tx_disable(dev);
3906
3907         /* Free the IRQ lines */
3908         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3909
3910         if (priv->eee_enabled) {
3911                 priv->tx_path_in_lpi_mode = false;
3912                 del_timer_sync(&priv->eee_ctrl_timer);
3913         }
3914
3915         /* Stop TX/RX DMA and clear the descriptors */
3916         stmmac_stop_all_dma(priv);
3917
3918         /* Release and free the Rx/Tx resources */
3919         free_dma_desc_resources(priv, &priv->dma_conf);
3920
3921         /* Disable the MAC Rx/Tx */
3922         stmmac_mac_set(priv, priv->ioaddr, false);
3923
3924         /* Powerdown Serdes if there is */
3925         if (priv->plat->serdes_powerdown)
3926                 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
3927
3928         netif_carrier_off(dev);
3929
3930         stmmac_release_ptp(priv);
3931
3932         pm_runtime_put(priv->device);
3933
3934         if (priv->dma_cap.fpesel)
3935                 stmmac_fpe_stop_wq(priv);
3936
3937         return 0;
3938 }
3939
3940 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3941                                struct stmmac_tx_queue *tx_q)
3942 {
3943         u16 tag = 0x0, inner_tag = 0x0;
3944         u32 inner_type = 0x0;
3945         struct dma_desc *p;
3946
3947         if (!priv->dma_cap.vlins)
3948                 return false;
3949         if (!skb_vlan_tag_present(skb))
3950                 return false;
3951         if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3952                 inner_tag = skb_vlan_tag_get(skb);
3953                 inner_type = STMMAC_VLAN_INSERT;
3954         }
3955
3956         tag = skb_vlan_tag_get(skb);
3957
3958         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3959                 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3960         else
3961                 p = &tx_q->dma_tx[tx_q->cur_tx];
3962
3963         if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3964                 return false;
3965
3966         stmmac_set_tx_owner(priv, p);
3967         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
3968         return true;
3969 }
3970
3971 /**
3972  *  stmmac_tso_allocator - close entry point of the driver
3973  *  @priv: driver private structure
3974  *  @des: buffer start address
3975  *  @total_len: total length to fill in descriptors
3976  *  @last_segment: condition for the last descriptor
3977  *  @queue: TX queue index
3978  *  Description:
3979  *  This function fills descriptor and request new descriptors according to
3980  *  buffer length to fill
3981  */
3982 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3983                                  int total_len, bool last_segment, u32 queue)
3984 {
3985         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3986         struct dma_desc *desc;
3987         u32 buff_size;
3988         int tmp_len;
3989
3990         tmp_len = total_len;
3991
3992         while (tmp_len > 0) {
3993                 dma_addr_t curr_addr;
3994
3995                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3996                                                 priv->dma_conf.dma_tx_size);
3997                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3998
3999                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4000                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4001                 else
4002                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4003
4004                 curr_addr = des + (total_len - tmp_len);
4005                 if (priv->dma_cap.addr64 <= 32)
4006                         desc->des0 = cpu_to_le32(curr_addr);
4007                 else
4008                         stmmac_set_desc_addr(priv, desc, curr_addr);
4009
4010                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4011                             TSO_MAX_BUFF_SIZE : tmp_len;
4012
4013                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4014                                 0, 1,
4015                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4016                                 0, 0);
4017
4018                 tmp_len -= TSO_MAX_BUFF_SIZE;
4019         }
4020 }
4021
4022 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4023 {
4024         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4025         int desc_size;
4026
4027         if (likely(priv->extend_desc))
4028                 desc_size = sizeof(struct dma_extended_desc);
4029         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4030                 desc_size = sizeof(struct dma_edesc);
4031         else
4032                 desc_size = sizeof(struct dma_desc);
4033
4034         /* The own bit must be the latest setting done when prepare the
4035          * descriptor and then barrier is needed to make sure that
4036          * all is coherent before granting the DMA engine.
4037          */
4038         wmb();
4039
4040         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4041         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4042 }
4043
4044 /**
4045  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4046  *  @skb : the socket buffer
4047  *  @dev : device pointer
4048  *  Description: this is the transmit function that is called on TSO frames
4049  *  (support available on GMAC4 and newer chips).
4050  *  Diagram below show the ring programming in case of TSO frames:
4051  *
4052  *  First Descriptor
4053  *   --------
4054  *   | DES0 |---> buffer1 = L2/L3/L4 header
4055  *   | DES1 |---> TCP Payload (can continue on next descr...)
4056  *   | DES2 |---> buffer 1 and 2 len
4057  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4058  *   --------
4059  *      |
4060  *     ...
4061  *      |
4062  *   --------
4063  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4064  *   | DES1 | --|
4065  *   | DES2 | --> buffer 1 and 2 len
4066  *   | DES3 |
4067  *   --------
4068  *
4069  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4070  */
4071 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4072 {
4073         struct dma_desc *desc, *first, *mss_desc = NULL;
4074         struct stmmac_priv *priv = netdev_priv(dev);
4075         int nfrags = skb_shinfo(skb)->nr_frags;
4076         u32 queue = skb_get_queue_mapping(skb);
4077         unsigned int first_entry, tx_packets;
4078         int tmp_pay_len = 0, first_tx;
4079         struct stmmac_tx_queue *tx_q;
4080         bool has_vlan, set_ic;
4081         u8 proto_hdr_len, hdr;
4082         u32 pay_len, mss;
4083         dma_addr_t des;
4084         int i;
4085
4086         tx_q = &priv->dma_conf.tx_queue[queue];
4087         first_tx = tx_q->cur_tx;
4088
4089         /* Compute header lengths */
4090         if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4091                 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4092                 hdr = sizeof(struct udphdr);
4093         } else {
4094                 proto_hdr_len = skb_tcp_all_headers(skb);
4095                 hdr = tcp_hdrlen(skb);
4096         }
4097
4098         /* Desc availability based on threshold should be enough safe */
4099         if (unlikely(stmmac_tx_avail(priv, queue) <
4100                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4101                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4102                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4103                                                                 queue));
4104                         /* This is a hard error, log it. */
4105                         netdev_err(priv->dev,
4106                                    "%s: Tx Ring full when queue awake\n",
4107                                    __func__);
4108                 }
4109                 return NETDEV_TX_BUSY;
4110         }
4111
4112         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4113
4114         mss = skb_shinfo(skb)->gso_size;
4115
4116         /* set new MSS value if needed */
4117         if (mss != tx_q->mss) {
4118                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4119                         mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4120                 else
4121                         mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4122
4123                 stmmac_set_mss(priv, mss_desc, mss);
4124                 tx_q->mss = mss;
4125                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4126                                                 priv->dma_conf.dma_tx_size);
4127                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4128         }
4129
4130         if (netif_msg_tx_queued(priv)) {
4131                 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4132                         __func__, hdr, proto_hdr_len, pay_len, mss);
4133                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4134                         skb->data_len);
4135         }
4136
4137         /* Check if VLAN can be inserted by HW */
4138         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4139
4140         first_entry = tx_q->cur_tx;
4141         WARN_ON(tx_q->tx_skbuff[first_entry]);
4142
4143         if (tx_q->tbs & STMMAC_TBS_AVAIL)
4144                 desc = &tx_q->dma_entx[first_entry].basic;
4145         else
4146                 desc = &tx_q->dma_tx[first_entry];
4147         first = desc;
4148
4149         if (has_vlan)
4150                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4151
4152         /* first descriptor: fill Headers on Buf1 */
4153         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4154                              DMA_TO_DEVICE);
4155         if (dma_mapping_error(priv->device, des))
4156                 goto dma_map_err;
4157
4158         tx_q->tx_skbuff_dma[first_entry].buf = des;
4159         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4160         tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4161         tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4162
4163         if (priv->dma_cap.addr64 <= 32) {
4164                 first->des0 = cpu_to_le32(des);
4165
4166                 /* Fill start of payload in buff2 of first descriptor */
4167                 if (pay_len)
4168                         first->des1 = cpu_to_le32(des + proto_hdr_len);
4169
4170                 /* If needed take extra descriptors to fill the remaining payload */
4171                 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4172         } else {
4173                 stmmac_set_desc_addr(priv, first, des);
4174                 tmp_pay_len = pay_len;
4175                 des += proto_hdr_len;
4176                 pay_len = 0;
4177         }
4178
4179         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4180
4181         /* Prepare fragments */
4182         for (i = 0; i < nfrags; i++) {
4183                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4184
4185                 des = skb_frag_dma_map(priv->device, frag, 0,
4186                                        skb_frag_size(frag),
4187                                        DMA_TO_DEVICE);
4188                 if (dma_mapping_error(priv->device, des))
4189                         goto dma_map_err;
4190
4191                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4192                                      (i == nfrags - 1), queue);
4193
4194                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4195                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4196                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4197                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4198         }
4199
4200         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4201
4202         /* Only the last descriptor gets to point to the skb. */
4203         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4204         tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4205
4206         /* Manage tx mitigation */
4207         tx_packets = (tx_q->cur_tx + 1) - first_tx;
4208         tx_q->tx_count_frames += tx_packets;
4209
4210         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4211                 set_ic = true;
4212         else if (!priv->tx_coal_frames[queue])
4213                 set_ic = false;
4214         else if (tx_packets > priv->tx_coal_frames[queue])
4215                 set_ic = true;
4216         else if ((tx_q->tx_count_frames %
4217                   priv->tx_coal_frames[queue]) < tx_packets)
4218                 set_ic = true;
4219         else
4220                 set_ic = false;
4221
4222         if (set_ic) {
4223                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4224                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4225                 else
4226                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4227
4228                 tx_q->tx_count_frames = 0;
4229                 stmmac_set_tx_ic(priv, desc);
4230                 priv->xstats.tx_set_ic_bit++;
4231         }
4232
4233         /* We've used all descriptors we need for this skb, however,
4234          * advance cur_tx so that it references a fresh descriptor.
4235          * ndo_start_xmit will fill this descriptor the next time it's
4236          * called and stmmac_tx_clean may clean up to this descriptor.
4237          */
4238         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4239
4240         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4241                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4242                           __func__);
4243                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4244         }
4245
4246         dev->stats.tx_bytes += skb->len;
4247         priv->xstats.tx_tso_frames++;
4248         priv->xstats.tx_tso_nfrags += nfrags;
4249
4250         if (priv->sarc_type)
4251                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4252
4253         skb_tx_timestamp(skb);
4254
4255         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4256                      priv->hwts_tx_en)) {
4257                 /* declare that device is doing timestamping */
4258                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4259                 stmmac_enable_tx_timestamp(priv, first);
4260         }
4261
4262         /* Complete the first descriptor before granting the DMA */
4263         stmmac_prepare_tso_tx_desc(priv, first, 1,
4264                         proto_hdr_len,
4265                         pay_len,
4266                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4267                         hdr / 4, (skb->len - proto_hdr_len));
4268
4269         /* If context desc is used to change MSS */
4270         if (mss_desc) {
4271                 /* Make sure that first descriptor has been completely
4272                  * written, including its own bit. This is because MSS is
4273                  * actually before first descriptor, so we need to make
4274                  * sure that MSS's own bit is the last thing written.
4275                  */
4276                 dma_wmb();
4277                 stmmac_set_tx_owner(priv, mss_desc);
4278         }
4279
4280         if (netif_msg_pktdata(priv)) {
4281                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4282                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4283                         tx_q->cur_tx, first, nfrags);
4284                 pr_info(">>> frame to be transmitted: ");
4285                 print_pkt(skb->data, skb_headlen(skb));
4286         }
4287
4288         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4289
4290         stmmac_flush_tx_descriptors(priv, queue);
4291         stmmac_tx_timer_arm(priv, queue);
4292
4293         return NETDEV_TX_OK;
4294
4295 dma_map_err:
4296         dev_err(priv->device, "Tx dma map failed\n");
4297         dev_kfree_skb(skb);
4298         priv->dev->stats.tx_dropped++;
4299         return NETDEV_TX_OK;
4300 }
4301
4302 /**
4303  *  stmmac_xmit - Tx entry point of the driver
4304  *  @skb : the socket buffer
4305  *  @dev : device pointer
4306  *  Description : this is the tx entry point of the driver.
4307  *  It programs the chain or the ring and supports oversized frames
4308  *  and SG feature.
4309  */
4310 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4311 {
4312         unsigned int first_entry, tx_packets, enh_desc;
4313         struct stmmac_priv *priv = netdev_priv(dev);
4314         unsigned int nopaged_len = skb_headlen(skb);
4315         int i, csum_insertion = 0, is_jumbo = 0;
4316         u32 queue = skb_get_queue_mapping(skb);
4317         int nfrags = skb_shinfo(skb)->nr_frags;
4318         int gso = skb_shinfo(skb)->gso_type;
4319         struct dma_edesc *tbs_desc = NULL;
4320         struct dma_desc *desc, *first;
4321         struct stmmac_tx_queue *tx_q;
4322         bool has_vlan, set_ic;
4323         int entry, first_tx;
4324         dma_addr_t des;
4325
4326         tx_q = &priv->dma_conf.tx_queue[queue];
4327         first_tx = tx_q->cur_tx;
4328
4329         if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4330                 stmmac_disable_eee_mode(priv);
4331
4332         /* Manage oversized TCP frames for GMAC4 device */
4333         if (skb_is_gso(skb) && priv->tso) {
4334                 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4335                         return stmmac_tso_xmit(skb, dev);
4336                 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4337                         return stmmac_tso_xmit(skb, dev);
4338         }
4339
4340         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4341                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4342                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4343                                                                 queue));
4344                         /* This is a hard error, log it. */
4345                         netdev_err(priv->dev,
4346                                    "%s: Tx Ring full when queue awake\n",
4347                                    __func__);
4348                 }
4349                 return NETDEV_TX_BUSY;
4350         }
4351
4352         /* Check if VLAN can be inserted by HW */
4353         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4354
4355         entry = tx_q->cur_tx;
4356         first_entry = entry;
4357         WARN_ON(tx_q->tx_skbuff[first_entry]);
4358
4359         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4360
4361         if (likely(priv->extend_desc))
4362                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4363         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4364                 desc = &tx_q->dma_entx[entry].basic;
4365         else
4366                 desc = tx_q->dma_tx + entry;
4367
4368         first = desc;
4369
4370         if (has_vlan)
4371                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4372
4373         enh_desc = priv->plat->enh_desc;
4374         /* To program the descriptors according to the size of the frame */
4375         if (enh_desc)
4376                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4377
4378         if (unlikely(is_jumbo)) {
4379                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4380                 if (unlikely(entry < 0) && (entry != -EINVAL))
4381                         goto dma_map_err;
4382         }
4383
4384         for (i = 0; i < nfrags; i++) {
4385                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4386                 int len = skb_frag_size(frag);
4387                 bool last_segment = (i == (nfrags - 1));
4388
4389                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4390                 WARN_ON(tx_q->tx_skbuff[entry]);
4391
4392                 if (likely(priv->extend_desc))
4393                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4394                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4395                         desc = &tx_q->dma_entx[entry].basic;
4396                 else
4397                         desc = tx_q->dma_tx + entry;
4398
4399                 des = skb_frag_dma_map(priv->device, frag, 0, len,
4400                                        DMA_TO_DEVICE);
4401                 if (dma_mapping_error(priv->device, des))
4402                         goto dma_map_err; /* should reuse desc w/o issues */
4403
4404                 tx_q->tx_skbuff_dma[entry].buf = des;
4405
4406                 stmmac_set_desc_addr(priv, desc, des);
4407
4408                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4409                 tx_q->tx_skbuff_dma[entry].len = len;
4410                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4411                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4412
4413                 /* Prepare the descriptor and set the own bit too */
4414                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4415                                 priv->mode, 1, last_segment, skb->len);
4416         }
4417
4418         /* Only the last descriptor gets to point to the skb. */
4419         tx_q->tx_skbuff[entry] = skb;
4420         tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4421
4422         /* According to the coalesce parameter the IC bit for the latest
4423          * segment is reset and the timer re-started to clean the tx status.
4424          * This approach takes care about the fragments: desc is the first
4425          * element in case of no SG.
4426          */
4427         tx_packets = (entry + 1) - first_tx;
4428         tx_q->tx_count_frames += tx_packets;
4429
4430         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4431                 set_ic = true;
4432         else if (!priv->tx_coal_frames[queue])
4433                 set_ic = false;
4434         else if (tx_packets > priv->tx_coal_frames[queue])
4435                 set_ic = true;
4436         else if ((tx_q->tx_count_frames %
4437                   priv->tx_coal_frames[queue]) < tx_packets)
4438                 set_ic = true;
4439         else
4440                 set_ic = false;
4441
4442         if (set_ic) {
4443                 if (likely(priv->extend_desc))
4444                         desc = &tx_q->dma_etx[entry].basic;
4445                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4446                         desc = &tx_q->dma_entx[entry].basic;
4447                 else
4448                         desc = &tx_q->dma_tx[entry];
4449
4450                 tx_q->tx_count_frames = 0;
4451                 stmmac_set_tx_ic(priv, desc);
4452                 priv->xstats.tx_set_ic_bit++;
4453         }
4454
4455         /* We've used all descriptors we need for this skb, however,
4456          * advance cur_tx so that it references a fresh descriptor.
4457          * ndo_start_xmit will fill this descriptor the next time it's
4458          * called and stmmac_tx_clean may clean up to this descriptor.
4459          */
4460         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4461         tx_q->cur_tx = entry;
4462
4463         if (netif_msg_pktdata(priv)) {
4464                 netdev_dbg(priv->dev,
4465                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4466                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4467                            entry, first, nfrags);
4468
4469                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4470                 print_pkt(skb->data, skb->len);
4471         }
4472
4473         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4474                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4475                           __func__);
4476                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4477         }
4478
4479         dev->stats.tx_bytes += skb->len;
4480
4481         if (priv->sarc_type)
4482                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4483
4484         skb_tx_timestamp(skb);
4485
4486         /* Ready to fill the first descriptor and set the OWN bit w/o any
4487          * problems because all the descriptors are actually ready to be
4488          * passed to the DMA engine.
4489          */
4490         if (likely(!is_jumbo)) {
4491                 bool last_segment = (nfrags == 0);
4492
4493                 des = dma_map_single(priv->device, skb->data,
4494                                      nopaged_len, DMA_TO_DEVICE);
4495                 if (dma_mapping_error(priv->device, des))
4496                         goto dma_map_err;
4497
4498                 tx_q->tx_skbuff_dma[first_entry].buf = des;
4499                 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4500                 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4501
4502                 stmmac_set_desc_addr(priv, first, des);
4503
4504                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4505                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4506
4507                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4508                              priv->hwts_tx_en)) {
4509                         /* declare that device is doing timestamping */
4510                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4511                         stmmac_enable_tx_timestamp(priv, first);
4512                 }
4513
4514                 /* Prepare the first descriptor setting the OWN bit too */
4515                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4516                                 csum_insertion, priv->mode, 0, last_segment,
4517                                 skb->len);
4518         }
4519
4520         if (tx_q->tbs & STMMAC_TBS_EN) {
4521                 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4522
4523                 tbs_desc = &tx_q->dma_entx[first_entry];
4524                 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4525         }
4526
4527         stmmac_set_tx_owner(priv, first);
4528
4529         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4530
4531         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4532
4533         stmmac_flush_tx_descriptors(priv, queue);
4534         stmmac_tx_timer_arm(priv, queue);
4535
4536         return NETDEV_TX_OK;
4537
4538 dma_map_err:
4539         netdev_err(priv->dev, "Tx DMA map failed\n");
4540         dev_kfree_skb(skb);
4541         priv->dev->stats.tx_dropped++;
4542         return NETDEV_TX_OK;
4543 }
4544
4545 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4546 {
4547         struct vlan_ethhdr *veth;
4548         __be16 vlan_proto;
4549         u16 vlanid;
4550
4551         veth = (struct vlan_ethhdr *)skb->data;
4552         vlan_proto = veth->h_vlan_proto;
4553
4554         if ((vlan_proto == htons(ETH_P_8021Q) &&
4555              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4556             (vlan_proto == htons(ETH_P_8021AD) &&
4557              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4558                 /* pop the vlan tag */
4559                 vlanid = ntohs(veth->h_vlan_TCI);
4560                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4561                 skb_pull(skb, VLAN_HLEN);
4562                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4563         }
4564 }
4565
4566 /**
4567  * stmmac_rx_refill - refill used skb preallocated buffers
4568  * @priv: driver private structure
4569  * @queue: RX queue index
4570  * Description : this is to reallocate the skb for the reception process
4571  * that is based on zero-copy.
4572  */
4573 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4574 {
4575         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4576         int dirty = stmmac_rx_dirty(priv, queue);
4577         unsigned int entry = rx_q->dirty_rx;
4578         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4579
4580         if (priv->dma_cap.addr64 <= 32)
4581                 gfp |= GFP_DMA32;
4582
4583         while (dirty-- > 0) {
4584                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4585                 struct dma_desc *p;
4586                 bool use_rx_wd;
4587
4588                 if (priv->extend_desc)
4589                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
4590                 else
4591                         p = rx_q->dma_rx + entry;
4592
4593                 if (!buf->page) {
4594                         buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4595                         if (!buf->page)
4596                                 break;
4597                 }
4598
4599                 if (priv->sph && !buf->sec_page) {
4600                         buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4601                         if (!buf->sec_page)
4602                                 break;
4603
4604                         buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4605                 }
4606
4607                 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4608
4609                 stmmac_set_desc_addr(priv, p, buf->addr);
4610                 if (priv->sph)
4611                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4612                 else
4613                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4614                 stmmac_refill_desc3(priv, rx_q, p);
4615
4616                 rx_q->rx_count_frames++;
4617                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4618                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4619                         rx_q->rx_count_frames = 0;
4620
4621                 use_rx_wd = !priv->rx_coal_frames[queue];
4622                 use_rx_wd |= rx_q->rx_count_frames > 0;
4623                 if (!priv->use_riwt)
4624                         use_rx_wd = false;
4625
4626                 dma_wmb();
4627                 stmmac_set_rx_owner(priv, p, use_rx_wd);
4628
4629                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4630         }
4631         rx_q->dirty_rx = entry;
4632         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4633                             (rx_q->dirty_rx * sizeof(struct dma_desc));
4634         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4635 }
4636
4637 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4638                                        struct dma_desc *p,
4639                                        int status, unsigned int len)
4640 {
4641         unsigned int plen = 0, hlen = 0;
4642         int coe = priv->hw->rx_csum;
4643
4644         /* Not first descriptor, buffer is always zero */
4645         if (priv->sph && len)
4646                 return 0;
4647
4648         /* First descriptor, get split header length */
4649         stmmac_get_rx_header_len(priv, p, &hlen);
4650         if (priv->sph && hlen) {
4651                 priv->xstats.rx_split_hdr_pkt_n++;
4652                 return hlen;
4653         }
4654
4655         /* First descriptor, not last descriptor and not split header */
4656         if (status & rx_not_ls)
4657                 return priv->dma_conf.dma_buf_sz;
4658
4659         plen = stmmac_get_rx_frame_len(priv, p, coe);
4660
4661         /* First descriptor and last descriptor and not split header */
4662         return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4663 }
4664
4665 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4666                                        struct dma_desc *p,
4667                                        int status, unsigned int len)
4668 {
4669         int coe = priv->hw->rx_csum;
4670         unsigned int plen = 0;
4671
4672         /* Not split header, buffer is not available */
4673         if (!priv->sph)
4674                 return 0;
4675
4676         /* Not last descriptor */
4677         if (status & rx_not_ls)
4678                 return priv->dma_conf.dma_buf_sz;
4679
4680         plen = stmmac_get_rx_frame_len(priv, p, coe);
4681
4682         /* Last descriptor */
4683         return plen - len;
4684 }
4685
4686 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4687                                 struct xdp_frame *xdpf, bool dma_map)
4688 {
4689         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4690         unsigned int entry = tx_q->cur_tx;
4691         struct dma_desc *tx_desc;
4692         dma_addr_t dma_addr;
4693         bool set_ic;
4694
4695         if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4696                 return STMMAC_XDP_CONSUMED;
4697
4698         if (likely(priv->extend_desc))
4699                 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4700         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4701                 tx_desc = &tx_q->dma_entx[entry].basic;
4702         else
4703                 tx_desc = tx_q->dma_tx + entry;
4704
4705         if (dma_map) {
4706                 dma_addr = dma_map_single(priv->device, xdpf->data,
4707                                           xdpf->len, DMA_TO_DEVICE);
4708                 if (dma_mapping_error(priv->device, dma_addr))
4709                         return STMMAC_XDP_CONSUMED;
4710
4711                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4712         } else {
4713                 struct page *page = virt_to_page(xdpf->data);
4714
4715                 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4716                            xdpf->headroom;
4717                 dma_sync_single_for_device(priv->device, dma_addr,
4718                                            xdpf->len, DMA_BIDIRECTIONAL);
4719
4720                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4721         }
4722
4723         tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4724         tx_q->tx_skbuff_dma[entry].map_as_page = false;
4725         tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4726         tx_q->tx_skbuff_dma[entry].last_segment = true;
4727         tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4728
4729         tx_q->xdpf[entry] = xdpf;
4730
4731         stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4732
4733         stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4734                                true, priv->mode, true, true,
4735                                xdpf->len);
4736
4737         tx_q->tx_count_frames++;
4738
4739         if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4740                 set_ic = true;
4741         else
4742                 set_ic = false;
4743
4744         if (set_ic) {
4745                 tx_q->tx_count_frames = 0;
4746                 stmmac_set_tx_ic(priv, tx_desc);
4747                 priv->xstats.tx_set_ic_bit++;
4748         }
4749
4750         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4751
4752         entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4753         tx_q->cur_tx = entry;
4754
4755         return STMMAC_XDP_TX;
4756 }
4757
4758 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4759                                    int cpu)
4760 {
4761         int index = cpu;
4762
4763         if (unlikely(index < 0))
4764                 index = 0;
4765
4766         while (index >= priv->plat->tx_queues_to_use)
4767                 index -= priv->plat->tx_queues_to_use;
4768
4769         return index;
4770 }
4771
4772 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4773                                 struct xdp_buff *xdp)
4774 {
4775         struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4776         int cpu = smp_processor_id();
4777         struct netdev_queue *nq;
4778         int queue;
4779         int res;
4780
4781         if (unlikely(!xdpf))
4782                 return STMMAC_XDP_CONSUMED;
4783
4784         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4785         nq = netdev_get_tx_queue(priv->dev, queue);
4786
4787         __netif_tx_lock(nq, cpu);
4788         /* Avoids TX time-out as we are sharing with slow path */
4789         txq_trans_cond_update(nq);
4790
4791         res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4792         if (res == STMMAC_XDP_TX)
4793                 stmmac_flush_tx_descriptors(priv, queue);
4794
4795         __netif_tx_unlock(nq);
4796
4797         return res;
4798 }
4799
4800 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4801                                  struct bpf_prog *prog,
4802                                  struct xdp_buff *xdp)
4803 {
4804         u32 act;
4805         int res;
4806
4807         act = bpf_prog_run_xdp(prog, xdp);
4808         switch (act) {
4809         case XDP_PASS:
4810                 res = STMMAC_XDP_PASS;
4811                 break;
4812         case XDP_TX:
4813                 res = stmmac_xdp_xmit_back(priv, xdp);
4814                 break;
4815         case XDP_REDIRECT:
4816                 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4817                         res = STMMAC_XDP_CONSUMED;
4818                 else
4819                         res = STMMAC_XDP_REDIRECT;
4820                 break;
4821         default:
4822                 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4823                 fallthrough;
4824         case XDP_ABORTED:
4825                 trace_xdp_exception(priv->dev, prog, act);
4826                 fallthrough;
4827         case XDP_DROP:
4828                 res = STMMAC_XDP_CONSUMED;
4829                 break;
4830         }
4831
4832         return res;
4833 }
4834
4835 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4836                                            struct xdp_buff *xdp)
4837 {
4838         struct bpf_prog *prog;
4839         int res;
4840
4841         prog = READ_ONCE(priv->xdp_prog);
4842         if (!prog) {
4843                 res = STMMAC_XDP_PASS;
4844                 goto out;
4845         }
4846
4847         res = __stmmac_xdp_run_prog(priv, prog, xdp);
4848 out:
4849         return ERR_PTR(-res);
4850 }
4851
4852 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4853                                    int xdp_status)
4854 {
4855         int cpu = smp_processor_id();
4856         int queue;
4857
4858         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4859
4860         if (xdp_status & STMMAC_XDP_TX)
4861                 stmmac_tx_timer_arm(priv, queue);
4862
4863         if (xdp_status & STMMAC_XDP_REDIRECT)
4864                 xdp_do_flush();
4865 }
4866
4867 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4868                                                struct xdp_buff *xdp)
4869 {
4870         unsigned int metasize = xdp->data - xdp->data_meta;
4871         unsigned int datasize = xdp->data_end - xdp->data;
4872         struct sk_buff *skb;
4873
4874         skb = __napi_alloc_skb(&ch->rxtx_napi,
4875                                xdp->data_end - xdp->data_hard_start,
4876                                GFP_ATOMIC | __GFP_NOWARN);
4877         if (unlikely(!skb))
4878                 return NULL;
4879
4880         skb_reserve(skb, xdp->data - xdp->data_hard_start);
4881         memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4882         if (metasize)
4883                 skb_metadata_set(skb, metasize);
4884
4885         return skb;
4886 }
4887
4888 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4889                                    struct dma_desc *p, struct dma_desc *np,
4890                                    struct xdp_buff *xdp)
4891 {
4892         struct stmmac_channel *ch = &priv->channel[queue];
4893         unsigned int len = xdp->data_end - xdp->data;
4894         enum pkt_hash_types hash_type;
4895         int coe = priv->hw->rx_csum;
4896         struct sk_buff *skb;
4897         u32 hash;
4898
4899         skb = stmmac_construct_skb_zc(ch, xdp);
4900         if (!skb) {
4901                 priv->dev->stats.rx_dropped++;
4902                 return;
4903         }
4904
4905         stmmac_get_rx_hwtstamp(priv, p, np, skb);
4906         stmmac_rx_vlan(priv->dev, skb);
4907         skb->protocol = eth_type_trans(skb, priv->dev);
4908
4909         if (unlikely(!coe))
4910                 skb_checksum_none_assert(skb);
4911         else
4912                 skb->ip_summed = CHECKSUM_UNNECESSARY;
4913
4914         if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4915                 skb_set_hash(skb, hash, hash_type);
4916
4917         skb_record_rx_queue(skb, queue);
4918         napi_gro_receive(&ch->rxtx_napi, skb);
4919
4920         priv->dev->stats.rx_packets++;
4921         priv->dev->stats.rx_bytes += len;
4922 }
4923
4924 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4925 {
4926         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4927         unsigned int entry = rx_q->dirty_rx;
4928         struct dma_desc *rx_desc = NULL;
4929         bool ret = true;
4930
4931         budget = min(budget, stmmac_rx_dirty(priv, queue));
4932
4933         while (budget-- > 0 && entry != rx_q->cur_rx) {
4934                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4935                 dma_addr_t dma_addr;
4936                 bool use_rx_wd;
4937
4938                 if (!buf->xdp) {
4939                         buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4940                         if (!buf->xdp) {
4941                                 ret = false;
4942                                 break;
4943                         }
4944                 }
4945
4946                 if (priv->extend_desc)
4947                         rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4948                 else
4949                         rx_desc = rx_q->dma_rx + entry;
4950
4951                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4952                 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4953                 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4954                 stmmac_refill_desc3(priv, rx_q, rx_desc);
4955
4956                 rx_q->rx_count_frames++;
4957                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4958                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4959                         rx_q->rx_count_frames = 0;
4960
4961                 use_rx_wd = !priv->rx_coal_frames[queue];
4962                 use_rx_wd |= rx_q->rx_count_frames > 0;
4963                 if (!priv->use_riwt)
4964                         use_rx_wd = false;
4965
4966                 dma_wmb();
4967                 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4968
4969                 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4970         }
4971
4972         if (rx_desc) {
4973                 rx_q->dirty_rx = entry;
4974                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4975                                      (rx_q->dirty_rx * sizeof(struct dma_desc));
4976                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4977         }
4978
4979         return ret;
4980 }
4981
4982 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4983 {
4984         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4985         unsigned int count = 0, error = 0, len = 0;
4986         int dirty = stmmac_rx_dirty(priv, queue);
4987         unsigned int next_entry = rx_q->cur_rx;
4988         unsigned int desc_size;
4989         struct bpf_prog *prog;
4990         bool failure = false;
4991         int xdp_status = 0;
4992         int status = 0;
4993
4994         if (netif_msg_rx_status(priv)) {
4995                 void *rx_head;
4996
4997                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4998                 if (priv->extend_desc) {
4999                         rx_head = (void *)rx_q->dma_erx;
5000                         desc_size = sizeof(struct dma_extended_desc);
5001                 } else {
5002                         rx_head = (void *)rx_q->dma_rx;
5003                         desc_size = sizeof(struct dma_desc);
5004                 }
5005
5006                 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5007                                     rx_q->dma_rx_phy, desc_size);
5008         }
5009         while (count < limit) {
5010                 struct stmmac_rx_buffer *buf;
5011                 unsigned int buf1_len = 0;
5012                 struct dma_desc *np, *p;
5013                 int entry;
5014                 int res;
5015
5016                 if (!count && rx_q->state_saved) {
5017                         error = rx_q->state.error;
5018                         len = rx_q->state.len;
5019                 } else {
5020                         rx_q->state_saved = false;
5021                         error = 0;
5022                         len = 0;
5023                 }
5024
5025                 if (count >= limit)
5026                         break;
5027
5028 read_again:
5029                 buf1_len = 0;
5030                 entry = next_entry;
5031                 buf = &rx_q->buf_pool[entry];
5032
5033                 if (dirty >= STMMAC_RX_FILL_BATCH) {
5034                         failure = failure ||
5035                                   !stmmac_rx_refill_zc(priv, queue, dirty);
5036                         dirty = 0;
5037                 }
5038
5039                 if (priv->extend_desc)
5040                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5041                 else
5042                         p = rx_q->dma_rx + entry;
5043
5044                 /* read the status of the incoming frame */
5045                 status = stmmac_rx_status(priv, &priv->dev->stats,
5046                                           &priv->xstats, p);
5047                 /* check if managed by the DMA otherwise go ahead */
5048                 if (unlikely(status & dma_own))
5049                         break;
5050
5051                 /* Prefetch the next RX descriptor */
5052                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5053                                                 priv->dma_conf.dma_rx_size);
5054                 next_entry = rx_q->cur_rx;
5055
5056                 if (priv->extend_desc)
5057                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5058                 else
5059                         np = rx_q->dma_rx + next_entry;
5060
5061                 prefetch(np);
5062
5063                 /* Ensure a valid XSK buffer before proceed */
5064                 if (!buf->xdp)
5065                         break;
5066
5067                 if (priv->extend_desc)
5068                         stmmac_rx_extended_status(priv, &priv->dev->stats,
5069                                                   &priv->xstats,
5070                                                   rx_q->dma_erx + entry);
5071                 if (unlikely(status == discard_frame)) {
5072                         xsk_buff_free(buf->xdp);
5073                         buf->xdp = NULL;
5074                         dirty++;
5075                         error = 1;
5076                         if (!priv->hwts_rx_en)
5077                                 priv->dev->stats.rx_errors++;
5078                 }
5079
5080                 if (unlikely(error && (status & rx_not_ls)))
5081                         goto read_again;
5082                 if (unlikely(error)) {
5083                         count++;
5084                         continue;
5085                 }
5086
5087                 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5088                 if (likely(status & rx_not_ls)) {
5089                         xsk_buff_free(buf->xdp);
5090                         buf->xdp = NULL;
5091                         dirty++;
5092                         count++;
5093                         goto read_again;
5094                 }
5095
5096                 /* XDP ZC Frame only support primary buffers for now */
5097                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5098                 len += buf1_len;
5099
5100                 /* ACS is disabled; strip manually. */
5101                 if (likely(!(status & rx_not_ls))) {
5102                         buf1_len -= ETH_FCS_LEN;
5103                         len -= ETH_FCS_LEN;
5104                 }
5105
5106                 /* RX buffer is good and fit into a XSK pool buffer */
5107                 buf->xdp->data_end = buf->xdp->data + buf1_len;
5108                 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5109
5110                 prog = READ_ONCE(priv->xdp_prog);
5111                 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5112
5113                 switch (res) {
5114                 case STMMAC_XDP_PASS:
5115                         stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5116                         xsk_buff_free(buf->xdp);
5117                         break;
5118                 case STMMAC_XDP_CONSUMED:
5119                         xsk_buff_free(buf->xdp);
5120                         priv->dev->stats.rx_dropped++;
5121                         break;
5122                 case STMMAC_XDP_TX:
5123                 case STMMAC_XDP_REDIRECT:
5124                         xdp_status |= res;
5125                         break;
5126                 }
5127
5128                 buf->xdp = NULL;
5129                 dirty++;
5130                 count++;
5131         }
5132
5133         if (status & rx_not_ls) {
5134                 rx_q->state_saved = true;
5135                 rx_q->state.error = error;
5136                 rx_q->state.len = len;
5137         }
5138
5139         stmmac_finalize_xdp_rx(priv, xdp_status);
5140
5141         priv->xstats.rx_pkt_n += count;
5142         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5143
5144         if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5145                 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5146                         xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5147                 else
5148                         xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5149
5150                 return (int)count;
5151         }
5152
5153         return failure ? limit : (int)count;
5154 }
5155
5156 /**
5157  * stmmac_rx - manage the receive process
5158  * @priv: driver private structure
5159  * @limit: napi bugget
5160  * @queue: RX queue index.
5161  * Description :  this the function called by the napi poll method.
5162  * It gets all the frames inside the ring.
5163  */
5164 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5165 {
5166         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5167         struct stmmac_channel *ch = &priv->channel[queue];
5168         unsigned int count = 0, error = 0, len = 0;
5169         int status = 0, coe = priv->hw->rx_csum;
5170         unsigned int next_entry = rx_q->cur_rx;
5171         enum dma_data_direction dma_dir;
5172         unsigned int desc_size;
5173         struct sk_buff *skb = NULL;
5174         struct xdp_buff xdp;
5175         int xdp_status = 0;
5176         int buf_sz;
5177
5178         dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5179         buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5180
5181         if (netif_msg_rx_status(priv)) {
5182                 void *rx_head;
5183
5184                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5185                 if (priv->extend_desc) {
5186                         rx_head = (void *)rx_q->dma_erx;
5187                         desc_size = sizeof(struct dma_extended_desc);
5188                 } else {
5189                         rx_head = (void *)rx_q->dma_rx;
5190                         desc_size = sizeof(struct dma_desc);
5191                 }
5192
5193                 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5194                                     rx_q->dma_rx_phy, desc_size);
5195         }
5196         while (count < limit) {
5197                 unsigned int buf1_len = 0, buf2_len = 0;
5198                 enum pkt_hash_types hash_type;
5199                 struct stmmac_rx_buffer *buf;
5200                 struct dma_desc *np, *p;
5201                 int entry;
5202                 u32 hash;
5203
5204                 if (!count && rx_q->state_saved) {
5205                         skb = rx_q->state.skb;
5206                         error = rx_q->state.error;
5207                         len = rx_q->state.len;
5208                 } else {
5209                         rx_q->state_saved = false;
5210                         skb = NULL;
5211                         error = 0;
5212                         len = 0;
5213                 }
5214
5215                 if (count >= limit)
5216                         break;
5217
5218 read_again:
5219                 buf1_len = 0;
5220                 buf2_len = 0;
5221                 entry = next_entry;
5222                 buf = &rx_q->buf_pool[entry];
5223
5224                 if (priv->extend_desc)
5225                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5226                 else
5227                         p = rx_q->dma_rx + entry;
5228
5229                 /* read the status of the incoming frame */
5230                 status = stmmac_rx_status(priv, &priv->dev->stats,
5231                                 &priv->xstats, p);
5232                 /* check if managed by the DMA otherwise go ahead */
5233                 if (unlikely(status & dma_own))
5234                         break;
5235
5236                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5237                                                 priv->dma_conf.dma_rx_size);
5238                 next_entry = rx_q->cur_rx;
5239
5240                 if (priv->extend_desc)
5241                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5242                 else
5243                         np = rx_q->dma_rx + next_entry;
5244
5245                 prefetch(np);
5246
5247                 if (priv->extend_desc)
5248                         stmmac_rx_extended_status(priv, &priv->dev->stats,
5249                                         &priv->xstats, rx_q->dma_erx + entry);
5250                 if (unlikely(status == discard_frame)) {
5251                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5252                         buf->page = NULL;
5253                         error = 1;
5254                         if (!priv->hwts_rx_en)
5255                                 priv->dev->stats.rx_errors++;
5256                 }
5257
5258                 if (unlikely(error && (status & rx_not_ls)))
5259                         goto read_again;
5260                 if (unlikely(error)) {
5261                         dev_kfree_skb(skb);
5262                         skb = NULL;
5263                         count++;
5264                         continue;
5265                 }
5266
5267                 /* Buffer is good. Go on. */
5268
5269                 prefetch(page_address(buf->page) + buf->page_offset);
5270                 if (buf->sec_page)
5271                         prefetch(page_address(buf->sec_page));
5272
5273                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5274                 len += buf1_len;
5275                 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5276                 len += buf2_len;
5277
5278                 /* ACS is disabled; strip manually. */
5279                 if (likely(!(status & rx_not_ls))) {
5280                         if (buf2_len) {
5281                                 buf2_len -= ETH_FCS_LEN;
5282                                 len -= ETH_FCS_LEN;
5283                         } else if (buf1_len) {
5284                                 buf1_len -= ETH_FCS_LEN;
5285                                 len -= ETH_FCS_LEN;
5286                         }
5287                 }
5288
5289                 if (!skb) {
5290                         unsigned int pre_len, sync_len;
5291
5292                         dma_sync_single_for_cpu(priv->device, buf->addr,
5293                                                 buf1_len, dma_dir);
5294
5295                         xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5296                         xdp_prepare_buff(&xdp, page_address(buf->page),
5297                                          buf->page_offset, buf1_len, false);
5298
5299                         pre_len = xdp.data_end - xdp.data_hard_start -
5300                                   buf->page_offset;
5301                         skb = stmmac_xdp_run_prog(priv, &xdp);
5302                         /* Due xdp_adjust_tail: DMA sync for_device
5303                          * cover max len CPU touch
5304                          */
5305                         sync_len = xdp.data_end - xdp.data_hard_start -
5306                                    buf->page_offset;
5307                         sync_len = max(sync_len, pre_len);
5308
5309                         /* For Not XDP_PASS verdict */
5310                         if (IS_ERR(skb)) {
5311                                 unsigned int xdp_res = -PTR_ERR(skb);
5312
5313                                 if (xdp_res & STMMAC_XDP_CONSUMED) {
5314                                         page_pool_put_page(rx_q->page_pool,
5315                                                            virt_to_head_page(xdp.data),
5316                                                            sync_len, true);
5317                                         buf->page = NULL;
5318                                         priv->dev->stats.rx_dropped++;
5319
5320                                         /* Clear skb as it was set as
5321                                          * status by XDP program.
5322                                          */
5323                                         skb = NULL;
5324
5325                                         if (unlikely((status & rx_not_ls)))
5326                                                 goto read_again;
5327
5328                                         count++;
5329                                         continue;
5330                                 } else if (xdp_res & (STMMAC_XDP_TX |
5331                                                       STMMAC_XDP_REDIRECT)) {
5332                                         xdp_status |= xdp_res;
5333                                         buf->page = NULL;
5334                                         skb = NULL;
5335                                         count++;
5336                                         continue;
5337                                 }
5338                         }
5339                 }
5340
5341                 if (!skb) {
5342                         /* XDP program may expand or reduce tail */
5343                         buf1_len = xdp.data_end - xdp.data;
5344
5345                         skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5346                         if (!skb) {
5347                                 priv->dev->stats.rx_dropped++;
5348                                 count++;
5349                                 goto drain_data;
5350                         }
5351
5352                         /* XDP program may adjust header */
5353                         skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5354                         skb_put(skb, buf1_len);
5355
5356                         /* Data payload copied into SKB, page ready for recycle */
5357                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5358                         buf->page = NULL;
5359                 } else if (buf1_len) {
5360                         dma_sync_single_for_cpu(priv->device, buf->addr,
5361                                                 buf1_len, dma_dir);
5362                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5363                                         buf->page, buf->page_offset, buf1_len,
5364                                         priv->dma_conf.dma_buf_sz);
5365
5366                         /* Data payload appended into SKB */
5367                         page_pool_release_page(rx_q->page_pool, buf->page);
5368                         buf->page = NULL;
5369                 }
5370
5371                 if (buf2_len) {
5372                         dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5373                                                 buf2_len, dma_dir);
5374                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5375                                         buf->sec_page, 0, buf2_len,
5376                                         priv->dma_conf.dma_buf_sz);
5377
5378                         /* Data payload appended into SKB */
5379                         page_pool_release_page(rx_q->page_pool, buf->sec_page);
5380                         buf->sec_page = NULL;
5381                 }
5382
5383 drain_data:
5384                 if (likely(status & rx_not_ls))
5385                         goto read_again;
5386                 if (!skb)
5387                         continue;
5388
5389                 /* Got entire packet into SKB. Finish it. */
5390
5391                 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5392                 stmmac_rx_vlan(priv->dev, skb);
5393                 skb->protocol = eth_type_trans(skb, priv->dev);
5394
5395                 if (unlikely(!coe))
5396                         skb_checksum_none_assert(skb);
5397                 else
5398                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5399
5400                 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5401                         skb_set_hash(skb, hash, hash_type);
5402
5403                 skb_record_rx_queue(skb, queue);
5404                 napi_gro_receive(&ch->rx_napi, skb);
5405                 skb = NULL;
5406
5407                 priv->dev->stats.rx_packets++;
5408                 priv->dev->stats.rx_bytes += len;
5409                 count++;
5410         }
5411
5412         if (status & rx_not_ls || skb) {
5413                 rx_q->state_saved = true;
5414                 rx_q->state.skb = skb;
5415                 rx_q->state.error = error;
5416                 rx_q->state.len = len;
5417         }
5418
5419         stmmac_finalize_xdp_rx(priv, xdp_status);
5420
5421         stmmac_rx_refill(priv, queue);
5422
5423         priv->xstats.rx_pkt_n += count;
5424         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5425
5426         return count;
5427 }
5428
5429 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5430 {
5431         struct stmmac_channel *ch =
5432                 container_of(napi, struct stmmac_channel, rx_napi);
5433         struct stmmac_priv *priv = ch->priv_data;
5434         u32 chan = ch->index;
5435         int work_done;
5436
5437         priv->xstats.napi_poll++;
5438
5439         work_done = stmmac_rx(priv, budget, chan);
5440         if (work_done < budget && napi_complete_done(napi, work_done)) {
5441                 unsigned long flags;
5442
5443                 spin_lock_irqsave(&ch->lock, flags);
5444                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5445                 spin_unlock_irqrestore(&ch->lock, flags);
5446         }
5447
5448         return work_done;
5449 }
5450
5451 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5452 {
5453         struct stmmac_channel *ch =
5454                 container_of(napi, struct stmmac_channel, tx_napi);
5455         struct stmmac_priv *priv = ch->priv_data;
5456         u32 chan = ch->index;
5457         int work_done;
5458
5459         priv->xstats.napi_poll++;
5460
5461         work_done = stmmac_tx_clean(priv, budget, chan);
5462         work_done = min(work_done, budget);
5463
5464         if (work_done < budget && napi_complete_done(napi, work_done)) {
5465                 unsigned long flags;
5466
5467                 spin_lock_irqsave(&ch->lock, flags);
5468                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5469                 spin_unlock_irqrestore(&ch->lock, flags);
5470         }
5471
5472         return work_done;
5473 }
5474
5475 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5476 {
5477         struct stmmac_channel *ch =
5478                 container_of(napi, struct stmmac_channel, rxtx_napi);
5479         struct stmmac_priv *priv = ch->priv_data;
5480         int rx_done, tx_done, rxtx_done;
5481         u32 chan = ch->index;
5482
5483         priv->xstats.napi_poll++;
5484
5485         tx_done = stmmac_tx_clean(priv, budget, chan);
5486         tx_done = min(tx_done, budget);
5487
5488         rx_done = stmmac_rx_zc(priv, budget, chan);
5489
5490         rxtx_done = max(tx_done, rx_done);
5491
5492         /* If either TX or RX work is not complete, return budget
5493          * and keep pooling
5494          */
5495         if (rxtx_done >= budget)
5496                 return budget;
5497
5498         /* all work done, exit the polling mode */
5499         if (napi_complete_done(napi, rxtx_done)) {
5500                 unsigned long flags;
5501
5502                 spin_lock_irqsave(&ch->lock, flags);
5503                 /* Both RX and TX work done are compelte,
5504                  * so enable both RX & TX IRQs.
5505                  */
5506                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5507                 spin_unlock_irqrestore(&ch->lock, flags);
5508         }
5509
5510         return min(rxtx_done, budget - 1);
5511 }
5512
5513 /**
5514  *  stmmac_tx_timeout
5515  *  @dev : Pointer to net device structure
5516  *  @txqueue: the index of the hanging transmit queue
5517  *  Description: this function is called when a packet transmission fails to
5518  *   complete within a reasonable time. The driver will mark the error in the
5519  *   netdev structure and arrange for the device to be reset to a sane state
5520  *   in order to transmit a new packet.
5521  */
5522 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5523 {
5524         struct stmmac_priv *priv = netdev_priv(dev);
5525
5526         stmmac_global_err(priv);
5527 }
5528
5529 /**
5530  *  stmmac_set_rx_mode - entry point for multicast addressing
5531  *  @dev : pointer to the device structure
5532  *  Description:
5533  *  This function is a driver entry point which gets called by the kernel
5534  *  whenever multicast addresses must be enabled/disabled.
5535  *  Return value:
5536  *  void.
5537  */
5538 static void stmmac_set_rx_mode(struct net_device *dev)
5539 {
5540         struct stmmac_priv *priv = netdev_priv(dev);
5541
5542         stmmac_set_filter(priv, priv->hw, dev);
5543 }
5544
5545 /**
5546  *  stmmac_change_mtu - entry point to change MTU size for the device.
5547  *  @dev : device pointer.
5548  *  @new_mtu : the new MTU size for the device.
5549  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5550  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5551  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5552  *  Return value:
5553  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5554  *  file on failure.
5555  */
5556 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5557 {
5558         struct stmmac_priv *priv = netdev_priv(dev);
5559         int txfifosz = priv->plat->tx_fifo_size;
5560         struct stmmac_dma_conf *dma_conf;
5561         const int mtu = new_mtu;
5562         int ret;
5563
5564         if (txfifosz == 0)
5565                 txfifosz = priv->dma_cap.tx_fifo_size;
5566
5567         txfifosz /= priv->plat->tx_queues_to_use;
5568
5569         if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5570                 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5571                 return -EINVAL;
5572         }
5573
5574         new_mtu = STMMAC_ALIGN(new_mtu);
5575
5576         /* If condition true, FIFO is too small or MTU too large */
5577         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5578                 return -EINVAL;
5579
5580         if (netif_running(dev)) {
5581                 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5582                 /* Try to allocate the new DMA conf with the new mtu */
5583                 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5584                 if (IS_ERR(dma_conf)) {
5585                         netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5586                                    mtu);
5587                         return PTR_ERR(dma_conf);
5588                 }
5589
5590                 stmmac_release(dev);
5591
5592                 ret = __stmmac_open(dev, dma_conf);
5593                 kfree(dma_conf);
5594                 if (ret) {
5595                         netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5596                         return ret;
5597                 }
5598
5599                 stmmac_set_rx_mode(dev);
5600         }
5601
5602         dev->mtu = mtu;
5603         netdev_update_features(dev);
5604
5605         return 0;
5606 }
5607
5608 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5609                                              netdev_features_t features)
5610 {
5611         struct stmmac_priv *priv = netdev_priv(dev);
5612
5613         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5614                 features &= ~NETIF_F_RXCSUM;
5615
5616         if (!priv->plat->tx_coe)
5617                 features &= ~NETIF_F_CSUM_MASK;
5618
5619         /* Some GMAC devices have a bugged Jumbo frame support that
5620          * needs to have the Tx COE disabled for oversized frames
5621          * (due to limited buffer sizes). In this case we disable
5622          * the TX csum insertion in the TDES and not use SF.
5623          */
5624         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5625                 features &= ~NETIF_F_CSUM_MASK;
5626
5627         /* Disable tso if asked by ethtool */
5628         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5629                 if (features & NETIF_F_TSO)
5630                         priv->tso = true;
5631                 else
5632                         priv->tso = false;
5633         }
5634
5635         return features;
5636 }
5637
5638 static int stmmac_set_features(struct net_device *netdev,
5639                                netdev_features_t features)
5640 {
5641         struct stmmac_priv *priv = netdev_priv(netdev);
5642
5643         /* Keep the COE Type in case of csum is supporting */
5644         if (features & NETIF_F_RXCSUM)
5645                 priv->hw->rx_csum = priv->plat->rx_coe;
5646         else
5647                 priv->hw->rx_csum = 0;
5648         /* No check needed because rx_coe has been set before and it will be
5649          * fixed in case of issue.
5650          */
5651         stmmac_rx_ipc(priv, priv->hw);
5652
5653         if (priv->sph_cap) {
5654                 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5655                 u32 chan;
5656
5657                 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5658                         stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5659         }
5660
5661         return 0;
5662 }
5663
5664 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5665 {
5666         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5667         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5668         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5669         bool *hs_enable = &fpe_cfg->hs_enable;
5670
5671         if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5672                 return;
5673
5674         /* If LP has sent verify mPacket, LP is FPE capable */
5675         if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5676                 if (*lp_state < FPE_STATE_CAPABLE)
5677                         *lp_state = FPE_STATE_CAPABLE;
5678
5679                 /* If user has requested FPE enable, quickly response */
5680                 if (*hs_enable)
5681                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5682                                                 MPACKET_RESPONSE);
5683         }
5684
5685         /* If Local has sent verify mPacket, Local is FPE capable */
5686         if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5687                 if (*lo_state < FPE_STATE_CAPABLE)
5688                         *lo_state = FPE_STATE_CAPABLE;
5689         }
5690
5691         /* If LP has sent response mPacket, LP is entering FPE ON */
5692         if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5693                 *lp_state = FPE_STATE_ENTERING_ON;
5694
5695         /* If Local has sent response mPacket, Local is entering FPE ON */
5696         if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5697                 *lo_state = FPE_STATE_ENTERING_ON;
5698
5699         if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5700             !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5701             priv->fpe_wq) {
5702                 queue_work(priv->fpe_wq, &priv->fpe_task);
5703         }
5704 }
5705
5706 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5707 {
5708         u32 rx_cnt = priv->plat->rx_queues_to_use;
5709         u32 tx_cnt = priv->plat->tx_queues_to_use;
5710         u32 queues_count;
5711         u32 queue;
5712         bool xmac;
5713
5714         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5715         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5716
5717         if (priv->irq_wake)
5718                 pm_wakeup_event(priv->device, 0);
5719
5720         if (priv->dma_cap.estsel)
5721                 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5722                                       &priv->xstats, tx_cnt);
5723
5724         if (priv->dma_cap.fpesel) {
5725                 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5726                                                    priv->dev);
5727
5728                 stmmac_fpe_event_status(priv, status);
5729         }
5730
5731         /* To handle GMAC own interrupts */
5732         if ((priv->plat->has_gmac) || xmac) {
5733                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5734
5735                 if (unlikely(status)) {
5736                         /* For LPI we need to save the tx status */
5737                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5738                                 priv->tx_path_in_lpi_mode = true;
5739                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5740                                 priv->tx_path_in_lpi_mode = false;
5741                 }
5742
5743                 for (queue = 0; queue < queues_count; queue++) {
5744                         status = stmmac_host_mtl_irq_status(priv, priv->hw,
5745                                                             queue);
5746                 }
5747
5748                 /* PCS link status */
5749                 if (priv->hw->pcs) {
5750                         if (priv->xstats.pcs_link)
5751                                 netif_carrier_on(priv->dev);
5752                         else
5753                                 netif_carrier_off(priv->dev);
5754                 }
5755
5756                 stmmac_timestamp_interrupt(priv, priv);
5757         }
5758 }
5759
5760 /**
5761  *  stmmac_interrupt - main ISR
5762  *  @irq: interrupt number.
5763  *  @dev_id: to pass the net device pointer.
5764  *  Description: this is the main driver interrupt service routine.
5765  *  It can call:
5766  *  o DMA service routine (to manage incoming frame reception and transmission
5767  *    status)
5768  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5769  *    interrupts.
5770  */
5771 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5772 {
5773         struct net_device *dev = (struct net_device *)dev_id;
5774         struct stmmac_priv *priv = netdev_priv(dev);
5775
5776         /* Check if adapter is up */
5777         if (test_bit(STMMAC_DOWN, &priv->state))
5778                 return IRQ_HANDLED;
5779
5780         /* Check if a fatal error happened */
5781         if (stmmac_safety_feat_interrupt(priv))
5782                 return IRQ_HANDLED;
5783
5784         /* To handle Common interrupts */
5785         stmmac_common_interrupt(priv);
5786
5787         /* To handle DMA interrupts */
5788         stmmac_dma_interrupt(priv);
5789
5790         return IRQ_HANDLED;
5791 }
5792
5793 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5794 {
5795         struct net_device *dev = (struct net_device *)dev_id;
5796         struct stmmac_priv *priv = netdev_priv(dev);
5797
5798         if (unlikely(!dev)) {
5799                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5800                 return IRQ_NONE;
5801         }
5802
5803         /* Check if adapter is up */
5804         if (test_bit(STMMAC_DOWN, &priv->state))
5805                 return IRQ_HANDLED;
5806
5807         /* To handle Common interrupts */
5808         stmmac_common_interrupt(priv);
5809
5810         return IRQ_HANDLED;
5811 }
5812
5813 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5814 {
5815         struct net_device *dev = (struct net_device *)dev_id;
5816         struct stmmac_priv *priv = netdev_priv(dev);
5817
5818         if (unlikely(!dev)) {
5819                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5820                 return IRQ_NONE;
5821         }
5822
5823         /* Check if adapter is up */
5824         if (test_bit(STMMAC_DOWN, &priv->state))
5825                 return IRQ_HANDLED;
5826
5827         /* Check if a fatal error happened */
5828         stmmac_safety_feat_interrupt(priv);
5829
5830         return IRQ_HANDLED;
5831 }
5832
5833 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5834 {
5835         struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5836         struct stmmac_dma_conf *dma_conf;
5837         int chan = tx_q->queue_index;
5838         struct stmmac_priv *priv;
5839         int status;
5840
5841         dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
5842         priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5843
5844         if (unlikely(!data)) {
5845                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5846                 return IRQ_NONE;
5847         }
5848
5849         /* Check if adapter is up */
5850         if (test_bit(STMMAC_DOWN, &priv->state))
5851                 return IRQ_HANDLED;
5852
5853         status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5854
5855         if (unlikely(status & tx_hard_error_bump_tc)) {
5856                 /* Try to bump up the dma threshold on this failure */
5857                 stmmac_bump_dma_threshold(priv, chan);
5858         } else if (unlikely(status == tx_hard_error)) {
5859                 stmmac_tx_err(priv, chan);
5860         }
5861
5862         return IRQ_HANDLED;
5863 }
5864
5865 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5866 {
5867         struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5868         struct stmmac_dma_conf *dma_conf;
5869         int chan = rx_q->queue_index;
5870         struct stmmac_priv *priv;
5871
5872         dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
5873         priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
5874
5875         if (unlikely(!data)) {
5876                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5877                 return IRQ_NONE;
5878         }
5879
5880         /* Check if adapter is up */
5881         if (test_bit(STMMAC_DOWN, &priv->state))
5882                 return IRQ_HANDLED;
5883
5884         stmmac_napi_check(priv, chan, DMA_DIR_RX);
5885
5886         return IRQ_HANDLED;
5887 }
5888
5889 #ifdef CONFIG_NET_POLL_CONTROLLER
5890 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5891  * to allow network I/O with interrupts disabled.
5892  */
5893 static void stmmac_poll_controller(struct net_device *dev)
5894 {
5895         struct stmmac_priv *priv = netdev_priv(dev);
5896         int i;
5897
5898         /* If adapter is down, do nothing */
5899         if (test_bit(STMMAC_DOWN, &priv->state))
5900                 return;
5901
5902         if (priv->plat->multi_msi_en) {
5903                 for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5904                         stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
5905
5906                 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5907                         stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
5908         } else {
5909                 disable_irq(dev->irq);
5910                 stmmac_interrupt(dev->irq, dev);
5911                 enable_irq(dev->irq);
5912         }
5913 }
5914 #endif
5915
5916 /**
5917  *  stmmac_ioctl - Entry point for the Ioctl
5918  *  @dev: Device pointer.
5919  *  @rq: An IOCTL specefic structure, that can contain a pointer to
5920  *  a proprietary structure used to pass information to the driver.
5921  *  @cmd: IOCTL command
5922  *  Description:
5923  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5924  */
5925 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5926 {
5927         struct stmmac_priv *priv = netdev_priv (dev);
5928         int ret = -EOPNOTSUPP;
5929
5930         if (!netif_running(dev))
5931                 return -EINVAL;
5932
5933         switch (cmd) {
5934         case SIOCGMIIPHY:
5935         case SIOCGMIIREG:
5936         case SIOCSMIIREG:
5937                 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5938                 break;
5939         case SIOCSHWTSTAMP:
5940                 ret = stmmac_hwtstamp_set(dev, rq);
5941                 break;
5942         case SIOCGHWTSTAMP:
5943                 ret = stmmac_hwtstamp_get(dev, rq);
5944                 break;
5945         default:
5946                 break;
5947         }
5948
5949         return ret;
5950 }
5951
5952 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5953                                     void *cb_priv)
5954 {
5955         struct stmmac_priv *priv = cb_priv;
5956         int ret = -EOPNOTSUPP;
5957
5958         if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5959                 return ret;
5960
5961         __stmmac_disable_all_queues(priv);
5962
5963         switch (type) {
5964         case TC_SETUP_CLSU32:
5965                 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5966                 break;
5967         case TC_SETUP_CLSFLOWER:
5968                 ret = stmmac_tc_setup_cls(priv, priv, type_data);
5969                 break;
5970         default:
5971                 break;
5972         }
5973
5974         stmmac_enable_all_queues(priv);
5975         return ret;
5976 }
5977
5978 static LIST_HEAD(stmmac_block_cb_list);
5979
5980 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5981                            void *type_data)
5982 {
5983         struct stmmac_priv *priv = netdev_priv(ndev);
5984
5985         switch (type) {
5986         case TC_SETUP_BLOCK:
5987                 return flow_block_cb_setup_simple(type_data,
5988                                                   &stmmac_block_cb_list,
5989                                                   stmmac_setup_tc_block_cb,
5990                                                   priv, priv, true);
5991         case TC_SETUP_QDISC_CBS:
5992                 return stmmac_tc_setup_cbs(priv, priv, type_data);
5993         case TC_SETUP_QDISC_TAPRIO:
5994                 return stmmac_tc_setup_taprio(priv, priv, type_data);
5995         case TC_SETUP_QDISC_ETF:
5996                 return stmmac_tc_setup_etf(priv, priv, type_data);
5997         default:
5998                 return -EOPNOTSUPP;
5999         }
6000 }
6001
6002 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6003                                struct net_device *sb_dev)
6004 {
6005         int gso = skb_shinfo(skb)->gso_type;
6006
6007         if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6008                 /*
6009                  * There is no way to determine the number of TSO/USO
6010                  * capable Queues. Let's use always the Queue 0
6011                  * because if TSO/USO is supported then at least this
6012                  * one will be capable.
6013                  */
6014                 return 0;
6015         }
6016
6017         return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6018 }
6019
6020 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6021 {
6022         struct stmmac_priv *priv = netdev_priv(ndev);
6023         int ret = 0;
6024
6025         ret = pm_runtime_resume_and_get(priv->device);
6026         if (ret < 0)
6027                 return ret;
6028
6029         ret = eth_mac_addr(ndev, addr);
6030         if (ret)
6031                 goto set_mac_error;
6032
6033         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6034
6035 set_mac_error:
6036         pm_runtime_put(priv->device);
6037
6038         return ret;
6039 }
6040
6041 #ifdef CONFIG_DEBUG_FS
6042 static struct dentry *stmmac_fs_dir;
6043
6044 static void sysfs_display_ring(void *head, int size, int extend_desc,
6045                                struct seq_file *seq, dma_addr_t dma_phy_addr)
6046 {
6047         int i;
6048         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6049         struct dma_desc *p = (struct dma_desc *)head;
6050         dma_addr_t dma_addr;
6051
6052         for (i = 0; i < size; i++) {
6053                 if (extend_desc) {
6054                         dma_addr = dma_phy_addr + i * sizeof(*ep);
6055                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6056                                    i, &dma_addr,
6057                                    le32_to_cpu(ep->basic.des0),
6058                                    le32_to_cpu(ep->basic.des1),
6059                                    le32_to_cpu(ep->basic.des2),
6060                                    le32_to_cpu(ep->basic.des3));
6061                         ep++;
6062                 } else {
6063                         dma_addr = dma_phy_addr + i * sizeof(*p);
6064                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6065                                    i, &dma_addr,
6066                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6067                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6068                         p++;
6069                 }
6070                 seq_printf(seq, "\n");
6071         }
6072 }
6073
6074 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6075 {
6076         struct net_device *dev = seq->private;
6077         struct stmmac_priv *priv = netdev_priv(dev);
6078         u32 rx_count = priv->plat->rx_queues_to_use;
6079         u32 tx_count = priv->plat->tx_queues_to_use;
6080         u32 queue;
6081
6082         if ((dev->flags & IFF_UP) == 0)
6083                 return 0;
6084
6085         for (queue = 0; queue < rx_count; queue++) {
6086                 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6087
6088                 seq_printf(seq, "RX Queue %d:\n", queue);
6089
6090                 if (priv->extend_desc) {
6091                         seq_printf(seq, "Extended descriptor ring:\n");
6092                         sysfs_display_ring((void *)rx_q->dma_erx,
6093                                            priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6094                 } else {
6095                         seq_printf(seq, "Descriptor ring:\n");
6096                         sysfs_display_ring((void *)rx_q->dma_rx,
6097                                            priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6098                 }
6099         }
6100
6101         for (queue = 0; queue < tx_count; queue++) {
6102                 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6103
6104                 seq_printf(seq, "TX Queue %d:\n", queue);
6105
6106                 if (priv->extend_desc) {
6107                         seq_printf(seq, "Extended descriptor ring:\n");
6108                         sysfs_display_ring((void *)tx_q->dma_etx,
6109                                            priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6110                 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6111                         seq_printf(seq, "Descriptor ring:\n");
6112                         sysfs_display_ring((void *)tx_q->dma_tx,
6113                                            priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6114                 }
6115         }
6116
6117         return 0;
6118 }
6119 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6120
6121 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6122 {
6123         struct net_device *dev = seq->private;
6124         struct stmmac_priv *priv = netdev_priv(dev);
6125
6126         if (!priv->hw_cap_support) {
6127                 seq_printf(seq, "DMA HW features not supported\n");
6128                 return 0;
6129         }
6130
6131         seq_printf(seq, "==============================\n");
6132         seq_printf(seq, "\tDMA HW features\n");
6133         seq_printf(seq, "==============================\n");
6134
6135         seq_printf(seq, "\t10/100 Mbps: %s\n",
6136                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6137         seq_printf(seq, "\t1000 Mbps: %s\n",
6138                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
6139         seq_printf(seq, "\tHalf duplex: %s\n",
6140                    (priv->dma_cap.half_duplex) ? "Y" : "N");
6141         seq_printf(seq, "\tHash Filter: %s\n",
6142                    (priv->dma_cap.hash_filter) ? "Y" : "N");
6143         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6144                    (priv->dma_cap.multi_addr) ? "Y" : "N");
6145         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6146                    (priv->dma_cap.pcs) ? "Y" : "N");
6147         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6148                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
6149         seq_printf(seq, "\tPMT Remote wake up: %s\n",
6150                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6151         seq_printf(seq, "\tPMT Magic Frame: %s\n",
6152                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6153         seq_printf(seq, "\tRMON module: %s\n",
6154                    (priv->dma_cap.rmon) ? "Y" : "N");
6155         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6156                    (priv->dma_cap.time_stamp) ? "Y" : "N");
6157         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6158                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
6159         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6160                    (priv->dma_cap.eee) ? "Y" : "N");
6161         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6162         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6163                    (priv->dma_cap.tx_coe) ? "Y" : "N");
6164         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6165                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6166                            (priv->dma_cap.rx_coe) ? "Y" : "N");
6167         } else {
6168                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6169                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6170                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6171                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6172         }
6173         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6174                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6175         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6176                    priv->dma_cap.number_rx_channel);
6177         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6178                    priv->dma_cap.number_tx_channel);
6179         seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6180                    priv->dma_cap.number_rx_queues);
6181         seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6182                    priv->dma_cap.number_tx_queues);
6183         seq_printf(seq, "\tEnhanced descriptors: %s\n",
6184                    (priv->dma_cap.enh_desc) ? "Y" : "N");
6185         seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6186         seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6187         seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6188         seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6189         seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6190                    priv->dma_cap.pps_out_num);
6191         seq_printf(seq, "\tSafety Features: %s\n",
6192                    priv->dma_cap.asp ? "Y" : "N");
6193         seq_printf(seq, "\tFlexible RX Parser: %s\n",
6194                    priv->dma_cap.frpsel ? "Y" : "N");
6195         seq_printf(seq, "\tEnhanced Addressing: %d\n",
6196                    priv->dma_cap.addr64);
6197         seq_printf(seq, "\tReceive Side Scaling: %s\n",
6198                    priv->dma_cap.rssen ? "Y" : "N");
6199         seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6200                    priv->dma_cap.vlhash ? "Y" : "N");
6201         seq_printf(seq, "\tSplit Header: %s\n",
6202                    priv->dma_cap.sphen ? "Y" : "N");
6203         seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6204                    priv->dma_cap.vlins ? "Y" : "N");
6205         seq_printf(seq, "\tDouble VLAN: %s\n",
6206                    priv->dma_cap.dvlan ? "Y" : "N");
6207         seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6208                    priv->dma_cap.l3l4fnum);
6209         seq_printf(seq, "\tARP Offloading: %s\n",
6210                    priv->dma_cap.arpoffsel ? "Y" : "N");
6211         seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6212                    priv->dma_cap.estsel ? "Y" : "N");
6213         seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6214                    priv->dma_cap.fpesel ? "Y" : "N");
6215         seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6216                    priv->dma_cap.tbssel ? "Y" : "N");
6217         return 0;
6218 }
6219 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6220
6221 /* Use network device events to rename debugfs file entries.
6222  */
6223 static int stmmac_device_event(struct notifier_block *unused,
6224                                unsigned long event, void *ptr)
6225 {
6226         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6227         struct stmmac_priv *priv = netdev_priv(dev);
6228
6229         if (dev->netdev_ops != &stmmac_netdev_ops)
6230                 goto done;
6231
6232         switch (event) {
6233         case NETDEV_CHANGENAME:
6234                 if (priv->dbgfs_dir)
6235                         priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6236                                                          priv->dbgfs_dir,
6237                                                          stmmac_fs_dir,
6238                                                          dev->name);
6239                 break;
6240         }
6241 done:
6242         return NOTIFY_DONE;
6243 }
6244
6245 static struct notifier_block stmmac_notifier = {
6246         .notifier_call = stmmac_device_event,
6247 };
6248
6249 static void stmmac_init_fs(struct net_device *dev)
6250 {
6251         struct stmmac_priv *priv = netdev_priv(dev);
6252
6253         rtnl_lock();
6254
6255         /* Create per netdev entries */
6256         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6257
6258         /* Entry to report DMA RX/TX rings */
6259         debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6260                             &stmmac_rings_status_fops);
6261
6262         /* Entry to report the DMA HW features */
6263         debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6264                             &stmmac_dma_cap_fops);
6265
6266         rtnl_unlock();
6267 }
6268
6269 static void stmmac_exit_fs(struct net_device *dev)
6270 {
6271         struct stmmac_priv *priv = netdev_priv(dev);
6272
6273         debugfs_remove_recursive(priv->dbgfs_dir);
6274 }
6275 #endif /* CONFIG_DEBUG_FS */
6276
6277 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6278 {
6279         unsigned char *data = (unsigned char *)&vid_le;
6280         unsigned char data_byte = 0;
6281         u32 crc = ~0x0;
6282         u32 temp = 0;
6283         int i, bits;
6284
6285         bits = get_bitmask_order(VLAN_VID_MASK);
6286         for (i = 0; i < bits; i++) {
6287                 if ((i % 8) == 0)
6288                         data_byte = data[i / 8];
6289
6290                 temp = ((crc & 1) ^ data_byte) & 1;
6291                 crc >>= 1;
6292                 data_byte >>= 1;
6293
6294                 if (temp)
6295                         crc ^= 0xedb88320;
6296         }
6297
6298         return crc;
6299 }
6300
6301 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6302 {
6303         u32 crc, hash = 0;
6304         __le16 pmatch = 0;
6305         int count = 0;
6306         u16 vid = 0;
6307
6308         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6309                 __le16 vid_le = cpu_to_le16(vid);
6310                 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6311                 hash |= (1 << crc);
6312                 count++;
6313         }
6314
6315         if (!priv->dma_cap.vlhash) {
6316                 if (count > 2) /* VID = 0 always passes filter */
6317                         return -EOPNOTSUPP;
6318
6319                 pmatch = cpu_to_le16(vid);
6320                 hash = 0;
6321         }
6322
6323         return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6324 }
6325
6326 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6327 {
6328         struct stmmac_priv *priv = netdev_priv(ndev);
6329         bool is_double = false;
6330         int ret;
6331
6332         if (be16_to_cpu(proto) == ETH_P_8021AD)
6333                 is_double = true;
6334
6335         set_bit(vid, priv->active_vlans);
6336         ret = stmmac_vlan_update(priv, is_double);
6337         if (ret) {
6338                 clear_bit(vid, priv->active_vlans);
6339                 return ret;
6340         }
6341
6342         if (priv->hw->num_vlan) {
6343                 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6344                 if (ret)
6345                         return ret;
6346         }
6347
6348         return 0;
6349 }
6350
6351 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6352 {
6353         struct stmmac_priv *priv = netdev_priv(ndev);
6354         bool is_double = false;
6355         int ret;
6356
6357         ret = pm_runtime_resume_and_get(priv->device);
6358         if (ret < 0)
6359                 return ret;
6360
6361         if (be16_to_cpu(proto) == ETH_P_8021AD)
6362                 is_double = true;
6363
6364         clear_bit(vid, priv->active_vlans);
6365
6366         if (priv->hw->num_vlan) {
6367                 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6368                 if (ret)
6369                         goto del_vlan_error;
6370         }
6371
6372         ret = stmmac_vlan_update(priv, is_double);
6373
6374 del_vlan_error:
6375         pm_runtime_put(priv->device);
6376
6377         return ret;
6378 }
6379
6380 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6381 {
6382         struct stmmac_priv *priv = netdev_priv(dev);
6383
6384         switch (bpf->command) {
6385         case XDP_SETUP_PROG:
6386                 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6387         case XDP_SETUP_XSK_POOL:
6388                 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6389                                              bpf->xsk.queue_id);
6390         default:
6391                 return -EOPNOTSUPP;
6392         }
6393 }
6394
6395 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6396                            struct xdp_frame **frames, u32 flags)
6397 {
6398         struct stmmac_priv *priv = netdev_priv(dev);
6399         int cpu = smp_processor_id();
6400         struct netdev_queue *nq;
6401         int i, nxmit = 0;
6402         int queue;
6403
6404         if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6405                 return -ENETDOWN;
6406
6407         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6408                 return -EINVAL;
6409
6410         queue = stmmac_xdp_get_tx_queue(priv, cpu);
6411         nq = netdev_get_tx_queue(priv->dev, queue);
6412
6413         __netif_tx_lock(nq, cpu);
6414         /* Avoids TX time-out as we are sharing with slow path */
6415         txq_trans_cond_update(nq);
6416
6417         for (i = 0; i < num_frames; i++) {
6418                 int res;
6419
6420                 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6421                 if (res == STMMAC_XDP_CONSUMED)
6422                         break;
6423
6424                 nxmit++;
6425         }
6426
6427         if (flags & XDP_XMIT_FLUSH) {
6428                 stmmac_flush_tx_descriptors(priv, queue);
6429                 stmmac_tx_timer_arm(priv, queue);
6430         }
6431
6432         __netif_tx_unlock(nq);
6433
6434         return nxmit;
6435 }
6436
6437 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6438 {
6439         struct stmmac_channel *ch = &priv->channel[queue];
6440         unsigned long flags;
6441
6442         spin_lock_irqsave(&ch->lock, flags);
6443         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6444         spin_unlock_irqrestore(&ch->lock, flags);
6445
6446         stmmac_stop_rx_dma(priv, queue);
6447         __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6448 }
6449
6450 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6451 {
6452         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6453         struct stmmac_channel *ch = &priv->channel[queue];
6454         unsigned long flags;
6455         u32 buf_size;
6456         int ret;
6457
6458         ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6459         if (ret) {
6460                 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6461                 return;
6462         }
6463
6464         ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6465         if (ret) {
6466                 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6467                 netdev_err(priv->dev, "Failed to init RX desc.\n");
6468                 return;
6469         }
6470
6471         stmmac_reset_rx_queue(priv, queue);
6472         stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6473
6474         stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6475                             rx_q->dma_rx_phy, rx_q->queue_index);
6476
6477         rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6478                              sizeof(struct dma_desc));
6479         stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6480                                rx_q->rx_tail_addr, rx_q->queue_index);
6481
6482         if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6483                 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6484                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6485                                       buf_size,
6486                                       rx_q->queue_index);
6487         } else {
6488                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6489                                       priv->dma_conf.dma_buf_sz,
6490                                       rx_q->queue_index);
6491         }
6492
6493         stmmac_start_rx_dma(priv, queue);
6494
6495         spin_lock_irqsave(&ch->lock, flags);
6496         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6497         spin_unlock_irqrestore(&ch->lock, flags);
6498 }
6499
6500 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6501 {
6502         struct stmmac_channel *ch = &priv->channel[queue];
6503         unsigned long flags;
6504
6505         spin_lock_irqsave(&ch->lock, flags);
6506         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6507         spin_unlock_irqrestore(&ch->lock, flags);
6508
6509         stmmac_stop_tx_dma(priv, queue);
6510         __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6511 }
6512
6513 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6514 {
6515         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6516         struct stmmac_channel *ch = &priv->channel[queue];
6517         unsigned long flags;
6518         int ret;
6519
6520         ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6521         if (ret) {
6522                 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6523                 return;
6524         }
6525
6526         ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6527         if (ret) {
6528                 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6529                 netdev_err(priv->dev, "Failed to init TX desc.\n");
6530                 return;
6531         }
6532
6533         stmmac_reset_tx_queue(priv, queue);
6534         stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6535
6536         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6537                             tx_q->dma_tx_phy, tx_q->queue_index);
6538
6539         if (tx_q->tbs & STMMAC_TBS_AVAIL)
6540                 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6541
6542         tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6543         stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6544                                tx_q->tx_tail_addr, tx_q->queue_index);
6545
6546         stmmac_start_tx_dma(priv, queue);
6547
6548         spin_lock_irqsave(&ch->lock, flags);
6549         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6550         spin_unlock_irqrestore(&ch->lock, flags);
6551 }
6552
6553 void stmmac_xdp_release(struct net_device *dev)
6554 {
6555         struct stmmac_priv *priv = netdev_priv(dev);
6556         u32 chan;
6557
6558         /* Ensure tx function is not running */
6559         netif_tx_disable(dev);
6560
6561         /* Disable NAPI process */
6562         stmmac_disable_all_queues(priv);
6563
6564         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6565                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6566
6567         /* Free the IRQ lines */
6568         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6569
6570         /* Stop TX/RX DMA channels */
6571         stmmac_stop_all_dma(priv);
6572
6573         /* Release and free the Rx/Tx resources */
6574         free_dma_desc_resources(priv, &priv->dma_conf);
6575
6576         /* Disable the MAC Rx/Tx */
6577         stmmac_mac_set(priv, priv->ioaddr, false);
6578
6579         /* set trans_start so we don't get spurious
6580          * watchdogs during reset
6581          */
6582         netif_trans_update(dev);
6583         netif_carrier_off(dev);
6584 }
6585
6586 int stmmac_xdp_open(struct net_device *dev)
6587 {
6588         struct stmmac_priv *priv = netdev_priv(dev);
6589         u32 rx_cnt = priv->plat->rx_queues_to_use;
6590         u32 tx_cnt = priv->plat->tx_queues_to_use;
6591         u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6592         struct stmmac_rx_queue *rx_q;
6593         struct stmmac_tx_queue *tx_q;
6594         u32 buf_size;
6595         bool sph_en;
6596         u32 chan;
6597         int ret;
6598
6599         ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6600         if (ret < 0) {
6601                 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6602                            __func__);
6603                 goto dma_desc_error;
6604         }
6605
6606         ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6607         if (ret < 0) {
6608                 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6609                            __func__);
6610                 goto init_error;
6611         }
6612
6613         /* DMA CSR Channel configuration */
6614         for (chan = 0; chan < dma_csr_ch; chan++) {
6615                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6616                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6617         }
6618
6619         /* Adjust Split header */
6620         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6621
6622         /* DMA RX Channel Configuration */
6623         for (chan = 0; chan < rx_cnt; chan++) {
6624                 rx_q = &priv->dma_conf.rx_queue[chan];
6625
6626                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6627                                     rx_q->dma_rx_phy, chan);
6628
6629                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6630                                      (rx_q->buf_alloc_num *
6631                                       sizeof(struct dma_desc));
6632                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6633                                        rx_q->rx_tail_addr, chan);
6634
6635                 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6636                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6637                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6638                                               buf_size,
6639                                               rx_q->queue_index);
6640                 } else {
6641                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6642                                               priv->dma_conf.dma_buf_sz,
6643                                               rx_q->queue_index);
6644                 }
6645
6646                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6647         }
6648
6649         /* DMA TX Channel Configuration */
6650         for (chan = 0; chan < tx_cnt; chan++) {
6651                 tx_q = &priv->dma_conf.tx_queue[chan];
6652
6653                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6654                                     tx_q->dma_tx_phy, chan);
6655
6656                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6657                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6658                                        tx_q->tx_tail_addr, chan);
6659
6660                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6661                 tx_q->txtimer.function = stmmac_tx_timer;
6662         }
6663
6664         /* Enable the MAC Rx/Tx */
6665         stmmac_mac_set(priv, priv->ioaddr, true);
6666
6667         /* Start Rx & Tx DMA Channels */
6668         stmmac_start_all_dma(priv);
6669
6670         ret = stmmac_request_irq(dev);
6671         if (ret)
6672                 goto irq_error;
6673
6674         /* Enable NAPI process*/
6675         stmmac_enable_all_queues(priv);
6676         netif_carrier_on(dev);
6677         netif_tx_start_all_queues(dev);
6678         stmmac_enable_all_dma_irq(priv);
6679
6680         return 0;
6681
6682 irq_error:
6683         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6684                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6685
6686         stmmac_hw_teardown(dev);
6687 init_error:
6688         free_dma_desc_resources(priv, &priv->dma_conf);
6689 dma_desc_error:
6690         return ret;
6691 }
6692
6693 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6694 {
6695         struct stmmac_priv *priv = netdev_priv(dev);
6696         struct stmmac_rx_queue *rx_q;
6697         struct stmmac_tx_queue *tx_q;
6698         struct stmmac_channel *ch;
6699
6700         if (test_bit(STMMAC_DOWN, &priv->state) ||
6701             !netif_carrier_ok(priv->dev))
6702                 return -ENETDOWN;
6703
6704         if (!stmmac_xdp_is_enabled(priv))
6705                 return -EINVAL;
6706
6707         if (queue >= priv->plat->rx_queues_to_use ||
6708             queue >= priv->plat->tx_queues_to_use)
6709                 return -EINVAL;
6710
6711         rx_q = &priv->dma_conf.rx_queue[queue];
6712         tx_q = &priv->dma_conf.tx_queue[queue];
6713         ch = &priv->channel[queue];
6714
6715         if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6716                 return -EINVAL;
6717
6718         if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6719                 /* EQoS does not have per-DMA channel SW interrupt,
6720                  * so we schedule RX Napi straight-away.
6721                  */
6722                 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6723                         __napi_schedule(&ch->rxtx_napi);
6724         }
6725
6726         return 0;
6727 }
6728
6729 static const struct net_device_ops stmmac_netdev_ops = {
6730         .ndo_open = stmmac_open,
6731         .ndo_start_xmit = stmmac_xmit,
6732         .ndo_stop = stmmac_release,
6733         .ndo_change_mtu = stmmac_change_mtu,
6734         .ndo_fix_features = stmmac_fix_features,
6735         .ndo_set_features = stmmac_set_features,
6736         .ndo_set_rx_mode = stmmac_set_rx_mode,
6737         .ndo_tx_timeout = stmmac_tx_timeout,
6738         .ndo_eth_ioctl = stmmac_ioctl,
6739         .ndo_setup_tc = stmmac_setup_tc,
6740         .ndo_select_queue = stmmac_select_queue,
6741 #ifdef CONFIG_NET_POLL_CONTROLLER
6742         .ndo_poll_controller = stmmac_poll_controller,
6743 #endif
6744         .ndo_set_mac_address = stmmac_set_mac_address,
6745         .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6746         .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6747         .ndo_bpf = stmmac_bpf,
6748         .ndo_xdp_xmit = stmmac_xdp_xmit,
6749         .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6750 };
6751
6752 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6753 {
6754         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6755                 return;
6756         if (test_bit(STMMAC_DOWN, &priv->state))
6757                 return;
6758
6759         netdev_err(priv->dev, "Reset adapter.\n");
6760
6761         rtnl_lock();
6762         netif_trans_update(priv->dev);
6763         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6764                 usleep_range(1000, 2000);
6765
6766         set_bit(STMMAC_DOWN, &priv->state);
6767         dev_close(priv->dev);
6768         dev_open(priv->dev, NULL);
6769         clear_bit(STMMAC_DOWN, &priv->state);
6770         clear_bit(STMMAC_RESETING, &priv->state);
6771         rtnl_unlock();
6772 }
6773
6774 static void stmmac_service_task(struct work_struct *work)
6775 {
6776         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6777                         service_task);
6778
6779         stmmac_reset_subtask(priv);
6780         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6781 }
6782
6783 /**
6784  *  stmmac_hw_init - Init the MAC device
6785  *  @priv: driver private structure
6786  *  Description: this function is to configure the MAC device according to
6787  *  some platform parameters or the HW capability register. It prepares the
6788  *  driver to use either ring or chain modes and to setup either enhanced or
6789  *  normal descriptors.
6790  */
6791 static int stmmac_hw_init(struct stmmac_priv *priv)
6792 {
6793         int ret;
6794
6795         /* dwmac-sun8i only work in chain mode */
6796         if (priv->plat->has_sun8i)
6797                 chain_mode = 1;
6798         priv->chain_mode = chain_mode;
6799
6800         /* Initialize HW Interface */
6801         ret = stmmac_hwif_init(priv);
6802         if (ret)
6803                 return ret;
6804
6805         /* Get the HW capability (new GMAC newer than 3.50a) */
6806         priv->hw_cap_support = stmmac_get_hw_features(priv);
6807         if (priv->hw_cap_support) {
6808                 dev_info(priv->device, "DMA HW capability register supported\n");
6809
6810                 /* We can override some gmac/dma configuration fields: e.g.
6811                  * enh_desc, tx_coe (e.g. that are passed through the
6812                  * platform) with the values from the HW capability
6813                  * register (if supported).
6814                  */
6815                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
6816                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6817                                 !priv->plat->use_phy_wol;
6818                 priv->hw->pmt = priv->plat->pmt;
6819                 if (priv->dma_cap.hash_tb_sz) {
6820                         priv->hw->multicast_filter_bins =
6821                                         (BIT(priv->dma_cap.hash_tb_sz) << 5);
6822                         priv->hw->mcast_bits_log2 =
6823                                         ilog2(priv->hw->multicast_filter_bins);
6824                 }
6825
6826                 /* TXCOE doesn't work in thresh DMA mode */
6827                 if (priv->plat->force_thresh_dma_mode)
6828                         priv->plat->tx_coe = 0;
6829                 else
6830                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
6831
6832                 /* In case of GMAC4 rx_coe is from HW cap register. */
6833                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
6834
6835                 if (priv->dma_cap.rx_coe_type2)
6836                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6837                 else if (priv->dma_cap.rx_coe_type1)
6838                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6839
6840         } else {
6841                 dev_info(priv->device, "No HW DMA feature register supported\n");
6842         }
6843
6844         if (priv->plat->rx_coe) {
6845                 priv->hw->rx_csum = priv->plat->rx_coe;
6846                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6847                 if (priv->synopsys_id < DWMAC_CORE_4_00)
6848                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6849         }
6850         if (priv->plat->tx_coe)
6851                 dev_info(priv->device, "TX Checksum insertion supported\n");
6852
6853         if (priv->plat->pmt) {
6854                 dev_info(priv->device, "Wake-Up On Lan supported\n");
6855                 device_set_wakeup_capable(priv->device, 1);
6856         }
6857
6858         if (priv->dma_cap.tsoen)
6859                 dev_info(priv->device, "TSO supported\n");
6860
6861         priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6862         priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6863
6864         /* Run HW quirks, if any */
6865         if (priv->hwif_quirks) {
6866                 ret = priv->hwif_quirks(priv);
6867                 if (ret)
6868                         return ret;
6869         }
6870
6871         /* Rx Watchdog is available in the COREs newer than the 3.40.
6872          * In some case, for example on bugged HW this feature
6873          * has to be disable and this can be done by passing the
6874          * riwt_off field from the platform.
6875          */
6876         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6877             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6878                 priv->use_riwt = 1;
6879                 dev_info(priv->device,
6880                          "Enable RX Mitigation via HW Watchdog Timer\n");
6881         }
6882
6883         return 0;
6884 }
6885
6886 static void stmmac_napi_add(struct net_device *dev)
6887 {
6888         struct stmmac_priv *priv = netdev_priv(dev);
6889         u32 queue, maxq;
6890
6891         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6892
6893         for (queue = 0; queue < maxq; queue++) {
6894                 struct stmmac_channel *ch = &priv->channel[queue];
6895
6896                 ch->priv_data = priv;
6897                 ch->index = queue;
6898                 spin_lock_init(&ch->lock);
6899
6900                 if (queue < priv->plat->rx_queues_to_use) {
6901                         netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
6902                 }
6903                 if (queue < priv->plat->tx_queues_to_use) {
6904                         netif_napi_add_tx(dev, &ch->tx_napi,
6905                                           stmmac_napi_poll_tx);
6906                 }
6907                 if (queue < priv->plat->rx_queues_to_use &&
6908                     queue < priv->plat->tx_queues_to_use) {
6909                         netif_napi_add(dev, &ch->rxtx_napi,
6910                                        stmmac_napi_poll_rxtx);
6911                 }
6912         }
6913 }
6914
6915 static void stmmac_napi_del(struct net_device *dev)
6916 {
6917         struct stmmac_priv *priv = netdev_priv(dev);
6918         u32 queue, maxq;
6919
6920         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6921
6922         for (queue = 0; queue < maxq; queue++) {
6923                 struct stmmac_channel *ch = &priv->channel[queue];
6924
6925                 if (queue < priv->plat->rx_queues_to_use)
6926                         netif_napi_del(&ch->rx_napi);
6927                 if (queue < priv->plat->tx_queues_to_use)
6928                         netif_napi_del(&ch->tx_napi);
6929                 if (queue < priv->plat->rx_queues_to_use &&
6930                     queue < priv->plat->tx_queues_to_use) {
6931                         netif_napi_del(&ch->rxtx_napi);
6932                 }
6933         }
6934 }
6935
6936 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6937 {
6938         struct stmmac_priv *priv = netdev_priv(dev);
6939         int ret = 0;
6940
6941         if (netif_running(dev))
6942                 stmmac_release(dev);
6943
6944         stmmac_napi_del(dev);
6945
6946         priv->plat->rx_queues_to_use = rx_cnt;
6947         priv->plat->tx_queues_to_use = tx_cnt;
6948
6949         stmmac_napi_add(dev);
6950
6951         if (netif_running(dev))
6952                 ret = stmmac_open(dev);
6953
6954         return ret;
6955 }
6956
6957 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6958 {
6959         struct stmmac_priv *priv = netdev_priv(dev);
6960         int ret = 0;
6961
6962         if (netif_running(dev))
6963                 stmmac_release(dev);
6964
6965         priv->dma_conf.dma_rx_size = rx_size;
6966         priv->dma_conf.dma_tx_size = tx_size;
6967
6968         if (netif_running(dev))
6969                 ret = stmmac_open(dev);
6970
6971         return ret;
6972 }
6973
6974 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6975 static void stmmac_fpe_lp_task(struct work_struct *work)
6976 {
6977         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6978                                                 fpe_task);
6979         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6980         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6981         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6982         bool *hs_enable = &fpe_cfg->hs_enable;
6983         bool *enable = &fpe_cfg->enable;
6984         int retries = 20;
6985
6986         while (retries-- > 0) {
6987                 /* Bail out immediately if FPE handshake is OFF */
6988                 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6989                         break;
6990
6991                 if (*lo_state == FPE_STATE_ENTERING_ON &&
6992                     *lp_state == FPE_STATE_ENTERING_ON) {
6993                         stmmac_fpe_configure(priv, priv->ioaddr,
6994                                              priv->plat->tx_queues_to_use,
6995                                              priv->plat->rx_queues_to_use,
6996                                              *enable);
6997
6998                         netdev_info(priv->dev, "configured FPE\n");
6999
7000                         *lo_state = FPE_STATE_ON;
7001                         *lp_state = FPE_STATE_ON;
7002                         netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7003                         break;
7004                 }
7005
7006                 if ((*lo_state == FPE_STATE_CAPABLE ||
7007                      *lo_state == FPE_STATE_ENTERING_ON) &&
7008                      *lp_state != FPE_STATE_ON) {
7009                         netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7010                                     *lo_state, *lp_state);
7011                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7012                                                 MPACKET_VERIFY);
7013                 }
7014                 /* Sleep then retry */
7015                 msleep(500);
7016         }
7017
7018         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7019 }
7020
7021 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7022 {
7023         if (priv->plat->fpe_cfg->hs_enable != enable) {
7024                 if (enable) {
7025                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7026                                                 MPACKET_VERIFY);
7027                 } else {
7028                         priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7029                         priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7030                 }
7031
7032                 priv->plat->fpe_cfg->hs_enable = enable;
7033         }
7034 }
7035
7036 /**
7037  * stmmac_dvr_probe
7038  * @device: device pointer
7039  * @plat_dat: platform data pointer
7040  * @res: stmmac resource pointer
7041  * Description: this is the main probe function used to
7042  * call the alloc_etherdev, allocate the priv structure.
7043  * Return:
7044  * returns 0 on success, otherwise errno.
7045  */
7046 int stmmac_dvr_probe(struct device *device,
7047                      struct plat_stmmacenet_data *plat_dat,
7048                      struct stmmac_resources *res)
7049 {
7050         struct net_device *ndev = NULL;
7051         struct stmmac_priv *priv;
7052         u32 rxq;
7053         int i, ret = 0;
7054
7055         ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7056                                        MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7057         if (!ndev)
7058                 return -ENOMEM;
7059
7060         SET_NETDEV_DEV(ndev, device);
7061
7062         priv = netdev_priv(ndev);
7063         priv->device = device;
7064         priv->dev = ndev;
7065
7066         stmmac_set_ethtool_ops(ndev);
7067         priv->pause = pause;
7068         priv->plat = plat_dat;
7069         priv->ioaddr = res->addr;
7070         priv->dev->base_addr = (unsigned long)res->addr;
7071         priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
7072
7073         priv->dev->irq = res->irq;
7074         priv->wol_irq = res->wol_irq;
7075         priv->lpi_irq = res->lpi_irq;
7076         priv->sfty_ce_irq = res->sfty_ce_irq;
7077         priv->sfty_ue_irq = res->sfty_ue_irq;
7078         for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7079                 priv->rx_irq[i] = res->rx_irq[i];
7080         for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7081                 priv->tx_irq[i] = res->tx_irq[i];
7082
7083         if (!is_zero_ether_addr(res->mac))
7084                 eth_hw_addr_set(priv->dev, res->mac);
7085
7086         dev_set_drvdata(device, priv->dev);
7087
7088         /* Verify driver arguments */
7089         stmmac_verify_args();
7090
7091         priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7092         if (!priv->af_xdp_zc_qps)
7093                 return -ENOMEM;
7094
7095         /* Allocate workqueue */
7096         priv->wq = create_singlethread_workqueue("stmmac_wq");
7097         if (!priv->wq) {
7098                 dev_err(priv->device, "failed to create workqueue\n");
7099                 return -ENOMEM;
7100         }
7101
7102         INIT_WORK(&priv->service_task, stmmac_service_task);
7103
7104         /* Initialize Link Partner FPE workqueue */
7105         INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7106
7107         /* Override with kernel parameters if supplied XXX CRS XXX
7108          * this needs to have multiple instances
7109          */
7110         if ((phyaddr >= 0) && (phyaddr <= 31))
7111                 priv->plat->phy_addr = phyaddr;
7112
7113         if (priv->plat->stmmac_rst) {
7114                 ret = reset_control_assert(priv->plat->stmmac_rst);
7115                 reset_control_deassert(priv->plat->stmmac_rst);
7116                 /* Some reset controllers have only reset callback instead of
7117                  * assert + deassert callbacks pair.
7118                  */
7119                 if (ret == -ENOTSUPP)
7120                         reset_control_reset(priv->plat->stmmac_rst);
7121         }
7122
7123         ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7124         if (ret == -ENOTSUPP)
7125                 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7126                         ERR_PTR(ret));
7127
7128         /* Init MAC and get the capabilities */
7129         ret = stmmac_hw_init(priv);
7130         if (ret)
7131                 goto error_hw_init;
7132
7133         /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7134          */
7135         if (priv->synopsys_id < DWMAC_CORE_5_20)
7136                 priv->plat->dma_cfg->dche = false;
7137
7138         stmmac_check_ether_addr(priv);
7139
7140         ndev->netdev_ops = &stmmac_netdev_ops;
7141
7142         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7143                             NETIF_F_RXCSUM;
7144
7145         ret = stmmac_tc_init(priv, priv);
7146         if (!ret) {
7147                 ndev->hw_features |= NETIF_F_HW_TC;
7148         }
7149
7150         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
7151                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7152                 if (priv->plat->has_gmac4)
7153                         ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7154                 priv->tso = true;
7155                 dev_info(priv->device, "TSO feature enabled\n");
7156         }
7157
7158         if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
7159                 ndev->hw_features |= NETIF_F_GRO;
7160                 priv->sph_cap = true;
7161                 priv->sph = priv->sph_cap;
7162                 dev_info(priv->device, "SPH feature enabled\n");
7163         }
7164
7165         /* The current IP register MAC_HW_Feature1[ADDR64] only define
7166          * 32/40/64 bit width, but some SOC support others like i.MX8MP
7167          * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
7168          * So overwrite dma_cap.addr64 according to HW real design.
7169          */
7170         if (priv->plat->addr64)
7171                 priv->dma_cap.addr64 = priv->plat->addr64;
7172
7173         if (priv->dma_cap.addr64) {
7174                 ret = dma_set_mask_and_coherent(device,
7175                                 DMA_BIT_MASK(priv->dma_cap.addr64));
7176                 if (!ret) {
7177                         dev_info(priv->device, "Using %d bits DMA width\n",
7178                                  priv->dma_cap.addr64);
7179
7180                         /*
7181                          * If more than 32 bits can be addressed, make sure to
7182                          * enable enhanced addressing mode.
7183                          */
7184                         if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7185                                 priv->plat->dma_cfg->eame = true;
7186                 } else {
7187                         ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7188                         if (ret) {
7189                                 dev_err(priv->device, "Failed to set DMA Mask\n");
7190                                 goto error_hw_init;
7191                         }
7192
7193                         priv->dma_cap.addr64 = 32;
7194                 }
7195         }
7196
7197         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7198         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7199 #ifdef STMMAC_VLAN_TAG_USED
7200         /* Both mac100 and gmac support receive VLAN tag detection */
7201         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7202         if (priv->dma_cap.vlhash) {
7203                 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7204                 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7205         }
7206         if (priv->dma_cap.vlins) {
7207                 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7208                 if (priv->dma_cap.dvlan)
7209                         ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7210         }
7211 #endif
7212         priv->msg_enable = netif_msg_init(debug, default_msg_level);
7213
7214         /* Initialize RSS */
7215         rxq = priv->plat->rx_queues_to_use;
7216         netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7217         for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7218                 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7219
7220         if (priv->dma_cap.rssen && priv->plat->rss_en)
7221                 ndev->features |= NETIF_F_RXHASH;
7222
7223         /* MTU range: 46 - hw-specific max */
7224         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7225         if (priv->plat->has_xgmac)
7226                 ndev->max_mtu = XGMAC_JUMBO_LEN;
7227         else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7228                 ndev->max_mtu = JUMBO_LEN;
7229         else
7230                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7231         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7232          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7233          */
7234         if ((priv->plat->maxmtu < ndev->max_mtu) &&
7235             (priv->plat->maxmtu >= ndev->min_mtu))
7236                 ndev->max_mtu = priv->plat->maxmtu;
7237         else if (priv->plat->maxmtu < ndev->min_mtu)
7238                 dev_warn(priv->device,
7239                          "%s: warning: maxmtu having invalid value (%d)\n",
7240                          __func__, priv->plat->maxmtu);
7241
7242         if (flow_ctrl)
7243                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
7244
7245         /* Setup channels NAPI */
7246         stmmac_napi_add(ndev);
7247
7248         mutex_init(&priv->lock);
7249
7250         /* If a specific clk_csr value is passed from the platform
7251          * this means that the CSR Clock Range selection cannot be
7252          * changed at run-time and it is fixed. Viceversa the driver'll try to
7253          * set the MDC clock dynamically according to the csr actual
7254          * clock input.
7255          */
7256         if (priv->plat->clk_csr >= 0)
7257                 priv->clk_csr = priv->plat->clk_csr;
7258         else
7259                 stmmac_clk_csr_set(priv);
7260
7261         stmmac_check_pcs_mode(priv);
7262
7263         pm_runtime_get_noresume(device);
7264         pm_runtime_set_active(device);
7265         if (!pm_runtime_enabled(device))
7266                 pm_runtime_enable(device);
7267
7268         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7269             priv->hw->pcs != STMMAC_PCS_RTBI) {
7270                 /* MDIO bus Registration */
7271                 ret = stmmac_mdio_register(ndev);
7272                 if (ret < 0) {
7273                         dev_err_probe(priv->device, ret,
7274                                       "%s: MDIO bus (id: %d) registration failed\n",
7275                                       __func__, priv->plat->bus_id);
7276                         goto error_mdio_register;
7277                 }
7278         }
7279
7280         if (priv->plat->speed_mode_2500)
7281                 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7282
7283         if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7284                 ret = stmmac_xpcs_setup(priv->mii);
7285                 if (ret)
7286                         goto error_xpcs_setup;
7287         }
7288
7289         ret = stmmac_phy_setup(priv);
7290         if (ret) {
7291                 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7292                 goto error_phy_setup;
7293         }
7294
7295         ret = register_netdev(ndev);
7296         if (ret) {
7297                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7298                         __func__, ret);
7299                 goto error_netdev_register;
7300         }
7301
7302 #ifdef CONFIG_DEBUG_FS
7303         stmmac_init_fs(ndev);
7304 #endif
7305
7306         if (priv->plat->dump_debug_regs)
7307                 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7308
7309         /* Let pm_runtime_put() disable the clocks.
7310          * If CONFIG_PM is not enabled, the clocks will stay powered.
7311          */
7312         pm_runtime_put(device);
7313
7314         return ret;
7315
7316 error_netdev_register:
7317         phylink_destroy(priv->phylink);
7318 error_xpcs_setup:
7319 error_phy_setup:
7320         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7321             priv->hw->pcs != STMMAC_PCS_RTBI)
7322                 stmmac_mdio_unregister(ndev);
7323 error_mdio_register:
7324         stmmac_napi_del(ndev);
7325 error_hw_init:
7326         destroy_workqueue(priv->wq);
7327         bitmap_free(priv->af_xdp_zc_qps);
7328
7329         return ret;
7330 }
7331 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7332
7333 /**
7334  * stmmac_dvr_remove
7335  * @dev: device pointer
7336  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7337  * changes the link status, releases the DMA descriptor rings.
7338  */
7339 int stmmac_dvr_remove(struct device *dev)
7340 {
7341         struct net_device *ndev = dev_get_drvdata(dev);
7342         struct stmmac_priv *priv = netdev_priv(ndev);
7343
7344         netdev_info(priv->dev, "%s: removing driver", __func__);
7345
7346         pm_runtime_get_sync(dev);
7347
7348         stmmac_stop_all_dma(priv);
7349         stmmac_mac_set(priv, priv->ioaddr, false);
7350         netif_carrier_off(ndev);
7351         unregister_netdev(ndev);
7352
7353         /* Serdes power down needs to happen after VLAN filter
7354          * is deleted that is triggered by unregister_netdev().
7355          */
7356         if (priv->plat->serdes_powerdown)
7357                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7358
7359 #ifdef CONFIG_DEBUG_FS
7360         stmmac_exit_fs(ndev);
7361 #endif
7362         phylink_destroy(priv->phylink);
7363         if (priv->plat->stmmac_rst)
7364                 reset_control_assert(priv->plat->stmmac_rst);
7365         reset_control_assert(priv->plat->stmmac_ahb_rst);
7366         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7367             priv->hw->pcs != STMMAC_PCS_RTBI)
7368                 stmmac_mdio_unregister(ndev);
7369         destroy_workqueue(priv->wq);
7370         mutex_destroy(&priv->lock);
7371         bitmap_free(priv->af_xdp_zc_qps);
7372
7373         pm_runtime_disable(dev);
7374         pm_runtime_put_noidle(dev);
7375
7376         return 0;
7377 }
7378 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7379
7380 /**
7381  * stmmac_suspend - suspend callback
7382  * @dev: device pointer
7383  * Description: this is the function to suspend the device and it is called
7384  * by the platform driver to stop the network queue, release the resources,
7385  * program the PMT register (for WoL), clean and release driver resources.
7386  */
7387 int stmmac_suspend(struct device *dev)
7388 {
7389         struct net_device *ndev = dev_get_drvdata(dev);
7390         struct stmmac_priv *priv = netdev_priv(ndev);
7391         u32 chan;
7392
7393         if (!ndev || !netif_running(ndev))
7394                 return 0;
7395
7396         mutex_lock(&priv->lock);
7397
7398         netif_device_detach(ndev);
7399
7400         stmmac_disable_all_queues(priv);
7401
7402         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7403                 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7404
7405         if (priv->eee_enabled) {
7406                 priv->tx_path_in_lpi_mode = false;
7407                 del_timer_sync(&priv->eee_ctrl_timer);
7408         }
7409
7410         /* Stop TX/RX DMA */
7411         stmmac_stop_all_dma(priv);
7412
7413         if (priv->plat->serdes_powerdown)
7414                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7415
7416         /* Enable Power down mode by programming the PMT regs */
7417         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7418                 stmmac_pmt(priv, priv->hw, priv->wolopts);
7419                 priv->irq_wake = 1;
7420         } else {
7421                 stmmac_mac_set(priv, priv->ioaddr, false);
7422                 pinctrl_pm_select_sleep_state(priv->device);
7423         }
7424
7425         mutex_unlock(&priv->lock);
7426
7427         rtnl_lock();
7428         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7429                 phylink_suspend(priv->phylink, true);
7430         } else {
7431                 if (device_may_wakeup(priv->device))
7432                         phylink_speed_down(priv->phylink, false);
7433                 phylink_suspend(priv->phylink, false);
7434         }
7435         rtnl_unlock();
7436
7437         if (priv->dma_cap.fpesel) {
7438                 /* Disable FPE */
7439                 stmmac_fpe_configure(priv, priv->ioaddr,
7440                                      priv->plat->tx_queues_to_use,
7441                                      priv->plat->rx_queues_to_use, false);
7442
7443                 stmmac_fpe_handshake(priv, false);
7444                 stmmac_fpe_stop_wq(priv);
7445         }
7446
7447         priv->speed = SPEED_UNKNOWN;
7448         return 0;
7449 }
7450 EXPORT_SYMBOL_GPL(stmmac_suspend);
7451
7452 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7453 {
7454         struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7455
7456         rx_q->cur_rx = 0;
7457         rx_q->dirty_rx = 0;
7458 }
7459
7460 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7461 {
7462         struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7463
7464         tx_q->cur_tx = 0;
7465         tx_q->dirty_tx = 0;
7466         tx_q->mss = 0;
7467
7468         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7469 }
7470
7471 /**
7472  * stmmac_reset_queues_param - reset queue parameters
7473  * @priv: device pointer
7474  */
7475 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7476 {
7477         u32 rx_cnt = priv->plat->rx_queues_to_use;
7478         u32 tx_cnt = priv->plat->tx_queues_to_use;
7479         u32 queue;
7480
7481         for (queue = 0; queue < rx_cnt; queue++)
7482                 stmmac_reset_rx_queue(priv, queue);
7483
7484         for (queue = 0; queue < tx_cnt; queue++)
7485                 stmmac_reset_tx_queue(priv, queue);
7486 }
7487
7488 /**
7489  * stmmac_resume - resume callback
7490  * @dev: device pointer
7491  * Description: when resume this function is invoked to setup the DMA and CORE
7492  * in a usable state.
7493  */
7494 int stmmac_resume(struct device *dev)
7495 {
7496         struct net_device *ndev = dev_get_drvdata(dev);
7497         struct stmmac_priv *priv = netdev_priv(ndev);
7498         int ret;
7499
7500         if (!netif_running(ndev))
7501                 return 0;
7502
7503         /* Power Down bit, into the PM register, is cleared
7504          * automatically as soon as a magic packet or a Wake-up frame
7505          * is received. Anyway, it's better to manually clear
7506          * this bit because it can generate problems while resuming
7507          * from another devices (e.g. serial console).
7508          */
7509         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7510                 mutex_lock(&priv->lock);
7511                 stmmac_pmt(priv, priv->hw, 0);
7512                 mutex_unlock(&priv->lock);
7513                 priv->irq_wake = 0;
7514         } else {
7515                 pinctrl_pm_select_default_state(priv->device);
7516                 /* reset the phy so that it's ready */
7517                 if (priv->mii)
7518                         stmmac_mdio_reset(priv->mii);
7519         }
7520
7521         if (priv->plat->serdes_powerup) {
7522                 ret = priv->plat->serdes_powerup(ndev,
7523                                                  priv->plat->bsp_priv);
7524
7525                 if (ret < 0)
7526                         return ret;
7527         }
7528
7529         rtnl_lock();
7530         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7531                 phylink_resume(priv->phylink);
7532         } else {
7533                 phylink_resume(priv->phylink);
7534                 if (device_may_wakeup(priv->device))
7535                         phylink_speed_up(priv->phylink);
7536         }
7537         rtnl_unlock();
7538
7539         rtnl_lock();
7540         mutex_lock(&priv->lock);
7541
7542         stmmac_reset_queues_param(priv);
7543
7544         stmmac_free_tx_skbufs(priv);
7545         stmmac_clear_descriptors(priv, &priv->dma_conf);
7546
7547         stmmac_hw_setup(ndev, false);
7548         stmmac_init_coalesce(priv);
7549         stmmac_set_rx_mode(ndev);
7550
7551         stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7552
7553         stmmac_enable_all_queues(priv);
7554         stmmac_enable_all_dma_irq(priv);
7555
7556         mutex_unlock(&priv->lock);
7557         rtnl_unlock();
7558
7559         netif_device_attach(ndev);
7560
7561         return 0;
7562 }
7563 EXPORT_SYMBOL_GPL(stmmac_resume);
7564
7565 #ifndef MODULE
7566 static int __init stmmac_cmdline_opt(char *str)
7567 {
7568         char *opt;
7569
7570         if (!str || !*str)
7571                 return 1;
7572         while ((opt = strsep(&str, ",")) != NULL) {
7573                 if (!strncmp(opt, "debug:", 6)) {
7574                         if (kstrtoint(opt + 6, 0, &debug))
7575                                 goto err;
7576                 } else if (!strncmp(opt, "phyaddr:", 8)) {
7577                         if (kstrtoint(opt + 8, 0, &phyaddr))
7578                                 goto err;
7579                 } else if (!strncmp(opt, "buf_sz:", 7)) {
7580                         if (kstrtoint(opt + 7, 0, &buf_sz))
7581                                 goto err;
7582                 } else if (!strncmp(opt, "tc:", 3)) {
7583                         if (kstrtoint(opt + 3, 0, &tc))
7584                                 goto err;
7585                 } else if (!strncmp(opt, "watchdog:", 9)) {
7586                         if (kstrtoint(opt + 9, 0, &watchdog))
7587                                 goto err;
7588                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7589                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
7590                                 goto err;
7591                 } else if (!strncmp(opt, "pause:", 6)) {
7592                         if (kstrtoint(opt + 6, 0, &pause))
7593                                 goto err;
7594                 } else if (!strncmp(opt, "eee_timer:", 10)) {
7595                         if (kstrtoint(opt + 10, 0, &eee_timer))
7596                                 goto err;
7597                 } else if (!strncmp(opt, "chain_mode:", 11)) {
7598                         if (kstrtoint(opt + 11, 0, &chain_mode))
7599                                 goto err;
7600                 }
7601         }
7602         return 1;
7603
7604 err:
7605         pr_err("%s: ERROR broken module parameter conversion", __func__);
7606         return 1;
7607 }
7608
7609 __setup("stmmaceth=", stmmac_cmdline_opt);
7610 #endif /* MODULE */
7611
7612 static int __init stmmac_init(void)
7613 {
7614 #ifdef CONFIG_DEBUG_FS
7615         /* Create debugfs main directory if it doesn't exist yet */
7616         if (!stmmac_fs_dir)
7617                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7618         register_netdevice_notifier(&stmmac_notifier);
7619 #endif
7620
7621         return 0;
7622 }
7623
7624 static void __exit stmmac_exit(void)
7625 {
7626 #ifdef CONFIG_DEBUG_FS
7627         unregister_netdevice_notifier(&stmmac_notifier);
7628         debugfs_remove_recursive(stmmac_fs_dir);
7629 #endif
7630 }
7631
7632 module_init(stmmac_init)
7633 module_exit(stmmac_exit)
7634
7635 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7636 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7637 MODULE_LICENSE("GPL");