net: stmmac: remove phylink_config.pcs_poll usage
[linux-2.6-microblaze.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5
6         Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11   Documentation available at:
12         http://www.stlinux.com
13   Support available at:
14         https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
45 #include "stmmac.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
50 #include "dwxgmac2.h"
51 #include "hwif.h"
52
53 /* As long as the interface is active, we keep the timestamping counter enabled
54  * with fine resolution and binary rollover. This avoid non-monotonic behavior
55  * (clock jumps) when changing timestamping settings at runtime.
56  */
57 #define STMMAC_HWTS_ACTIVE      (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58                                  PTP_TCR_TSCTRLSSR)
59
60 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
62
63 /* Module parameters */
64 #define TX_TIMEO        5000
65 static int watchdog = TX_TIMEO;
66 module_param(watchdog, int, 0644);
67 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
68
69 static int debug = -1;
70 module_param(debug, int, 0644);
71 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
72
73 static int phyaddr = -1;
74 module_param(phyaddr, int, 0444);
75 MODULE_PARM_DESC(phyaddr, "Physical device address");
76
77 #define STMMAC_TX_THRESH(x)     ((x)->dma_tx_size / 4)
78 #define STMMAC_RX_THRESH(x)     ((x)->dma_rx_size / 4)
79
80 /* Limit to make sure XDP TX and slow path can coexist */
81 #define STMMAC_XSK_TX_BUDGET_MAX        256
82 #define STMMAC_TX_XSK_AVAIL             16
83 #define STMMAC_RX_FILL_BATCH            16
84
85 #define STMMAC_XDP_PASS         0
86 #define STMMAC_XDP_CONSUMED     BIT(0)
87 #define STMMAC_XDP_TX           BIT(1)
88 #define STMMAC_XDP_REDIRECT     BIT(2)
89
90 static int flow_ctrl = FLOW_AUTO;
91 module_param(flow_ctrl, int, 0644);
92 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
93
94 static int pause = PAUSE_TIME;
95 module_param(pause, int, 0644);
96 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
97
98 #define TC_DEFAULT 64
99 static int tc = TC_DEFAULT;
100 module_param(tc, int, 0644);
101 MODULE_PARM_DESC(tc, "DMA threshold control value");
102
103 #define DEFAULT_BUFSIZE 1536
104 static int buf_sz = DEFAULT_BUFSIZE;
105 module_param(buf_sz, int, 0644);
106 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
107
108 #define STMMAC_RX_COPYBREAK     256
109
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
112                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
114 #define STMMAC_DEFAULT_LPI_TIMER        1000
115 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, int, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121  * but allow user to force to use the chain instead of the ring
122  */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
135 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
136                                           u32 rxmode, u32 chan);
137
138 #ifdef CONFIG_DEBUG_FS
139 static const struct net_device_ops stmmac_netdev_ops;
140 static void stmmac_init_fs(struct net_device *dev);
141 static void stmmac_exit_fs(struct net_device *dev);
142 #endif
143
144 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
145
146 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
147 {
148         int ret = 0;
149
150         if (enabled) {
151                 ret = clk_prepare_enable(priv->plat->stmmac_clk);
152                 if (ret)
153                         return ret;
154                 ret = clk_prepare_enable(priv->plat->pclk);
155                 if (ret) {
156                         clk_disable_unprepare(priv->plat->stmmac_clk);
157                         return ret;
158                 }
159                 if (priv->plat->clks_config) {
160                         ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
161                         if (ret) {
162                                 clk_disable_unprepare(priv->plat->stmmac_clk);
163                                 clk_disable_unprepare(priv->plat->pclk);
164                                 return ret;
165                         }
166                 }
167         } else {
168                 clk_disable_unprepare(priv->plat->stmmac_clk);
169                 clk_disable_unprepare(priv->plat->pclk);
170                 if (priv->plat->clks_config)
171                         priv->plat->clks_config(priv->plat->bsp_priv, enabled);
172         }
173
174         return ret;
175 }
176 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
177
178 /**
179  * stmmac_verify_args - verify the driver parameters.
180  * Description: it checks the driver parameters and set a default in case of
181  * errors.
182  */
183 static void stmmac_verify_args(void)
184 {
185         if (unlikely(watchdog < 0))
186                 watchdog = TX_TIMEO;
187         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
188                 buf_sz = DEFAULT_BUFSIZE;
189         if (unlikely(flow_ctrl > 1))
190                 flow_ctrl = FLOW_AUTO;
191         else if (likely(flow_ctrl < 0))
192                 flow_ctrl = FLOW_OFF;
193         if (unlikely((pause < 0) || (pause > 0xffff)))
194                 pause = PAUSE_TIME;
195         if (eee_timer < 0)
196                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
197 }
198
199 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
200 {
201         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
202         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
203         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
204         u32 queue;
205
206         for (queue = 0; queue < maxq; queue++) {
207                 struct stmmac_channel *ch = &priv->channel[queue];
208
209                 if (stmmac_xdp_is_enabled(priv) &&
210                     test_bit(queue, priv->af_xdp_zc_qps)) {
211                         napi_disable(&ch->rxtx_napi);
212                         continue;
213                 }
214
215                 if (queue < rx_queues_cnt)
216                         napi_disable(&ch->rx_napi);
217                 if (queue < tx_queues_cnt)
218                         napi_disable(&ch->tx_napi);
219         }
220 }
221
222 /**
223  * stmmac_disable_all_queues - Disable all queues
224  * @priv: driver private structure
225  */
226 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
227 {
228         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
229         struct stmmac_rx_queue *rx_q;
230         u32 queue;
231
232         /* synchronize_rcu() needed for pending XDP buffers to drain */
233         for (queue = 0; queue < rx_queues_cnt; queue++) {
234                 rx_q = &priv->rx_queue[queue];
235                 if (rx_q->xsk_pool) {
236                         synchronize_rcu();
237                         break;
238                 }
239         }
240
241         __stmmac_disable_all_queues(priv);
242 }
243
244 /**
245  * stmmac_enable_all_queues - Enable all queues
246  * @priv: driver private structure
247  */
248 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
249 {
250         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
251         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
252         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
253         u32 queue;
254
255         for (queue = 0; queue < maxq; queue++) {
256                 struct stmmac_channel *ch = &priv->channel[queue];
257
258                 if (stmmac_xdp_is_enabled(priv) &&
259                     test_bit(queue, priv->af_xdp_zc_qps)) {
260                         napi_enable(&ch->rxtx_napi);
261                         continue;
262                 }
263
264                 if (queue < rx_queues_cnt)
265                         napi_enable(&ch->rx_napi);
266                 if (queue < tx_queues_cnt)
267                         napi_enable(&ch->tx_napi);
268         }
269 }
270
271 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
272 {
273         if (!test_bit(STMMAC_DOWN, &priv->state) &&
274             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
275                 queue_work(priv->wq, &priv->service_task);
276 }
277
278 static void stmmac_global_err(struct stmmac_priv *priv)
279 {
280         netif_carrier_off(priv->dev);
281         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
282         stmmac_service_event_schedule(priv);
283 }
284
285 /**
286  * stmmac_clk_csr_set - dynamically set the MDC clock
287  * @priv: driver private structure
288  * Description: this is to dynamically set the MDC clock according to the csr
289  * clock input.
290  * Note:
291  *      If a specific clk_csr value is passed from the platform
292  *      this means that the CSR Clock Range selection cannot be
293  *      changed at run-time and it is fixed (as reported in the driver
294  *      documentation). Viceversa the driver will try to set the MDC
295  *      clock dynamically according to the actual clock input.
296  */
297 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
298 {
299         u32 clk_rate;
300
301         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
302
303         /* Platform provided default clk_csr would be assumed valid
304          * for all other cases except for the below mentioned ones.
305          * For values higher than the IEEE 802.3 specified frequency
306          * we can not estimate the proper divider as it is not known
307          * the frequency of clk_csr_i. So we do not change the default
308          * divider.
309          */
310         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
311                 if (clk_rate < CSR_F_35M)
312                         priv->clk_csr = STMMAC_CSR_20_35M;
313                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
314                         priv->clk_csr = STMMAC_CSR_35_60M;
315                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
316                         priv->clk_csr = STMMAC_CSR_60_100M;
317                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
318                         priv->clk_csr = STMMAC_CSR_100_150M;
319                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
320                         priv->clk_csr = STMMAC_CSR_150_250M;
321                 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
322                         priv->clk_csr = STMMAC_CSR_250_300M;
323         }
324
325         if (priv->plat->has_sun8i) {
326                 if (clk_rate > 160000000)
327                         priv->clk_csr = 0x03;
328                 else if (clk_rate > 80000000)
329                         priv->clk_csr = 0x02;
330                 else if (clk_rate > 40000000)
331                         priv->clk_csr = 0x01;
332                 else
333                         priv->clk_csr = 0;
334         }
335
336         if (priv->plat->has_xgmac) {
337                 if (clk_rate > 400000000)
338                         priv->clk_csr = 0x5;
339                 else if (clk_rate > 350000000)
340                         priv->clk_csr = 0x4;
341                 else if (clk_rate > 300000000)
342                         priv->clk_csr = 0x3;
343                 else if (clk_rate > 250000000)
344                         priv->clk_csr = 0x2;
345                 else if (clk_rate > 150000000)
346                         priv->clk_csr = 0x1;
347                 else
348                         priv->clk_csr = 0x0;
349         }
350 }
351
352 static void print_pkt(unsigned char *buf, int len)
353 {
354         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
355         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
356 }
357
358 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
359 {
360         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
361         u32 avail;
362
363         if (tx_q->dirty_tx > tx_q->cur_tx)
364                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
365         else
366                 avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
367
368         return avail;
369 }
370
371 /**
372  * stmmac_rx_dirty - Get RX queue dirty
373  * @priv: driver private structure
374  * @queue: RX queue index
375  */
376 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
377 {
378         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
379         u32 dirty;
380
381         if (rx_q->dirty_rx <= rx_q->cur_rx)
382                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
383         else
384                 dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
385
386         return dirty;
387 }
388
389 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
390 {
391         int tx_lpi_timer;
392
393         /* Clear/set the SW EEE timer flag based on LPI ET enablement */
394         priv->eee_sw_timer_en = en ? 0 : 1;
395         tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
396         stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
397 }
398
399 /**
400  * stmmac_enable_eee_mode - check and enter in LPI mode
401  * @priv: driver private structure
402  * Description: this function is to verify and enter in LPI mode in case of
403  * EEE.
404  */
405 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
406 {
407         u32 tx_cnt = priv->plat->tx_queues_to_use;
408         u32 queue;
409
410         /* check if all TX queues have the work finished */
411         for (queue = 0; queue < tx_cnt; queue++) {
412                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
413
414                 if (tx_q->dirty_tx != tx_q->cur_tx)
415                         return; /* still unfinished work */
416         }
417
418         /* Check and enter in LPI mode */
419         if (!priv->tx_path_in_lpi_mode)
420                 stmmac_set_eee_mode(priv, priv->hw,
421                                 priv->plat->en_tx_lpi_clockgating);
422 }
423
424 /**
425  * stmmac_disable_eee_mode - disable and exit from LPI mode
426  * @priv: driver private structure
427  * Description: this function is to exit and disable EEE in case of
428  * LPI state is true. This is called by the xmit.
429  */
430 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
431 {
432         if (!priv->eee_sw_timer_en) {
433                 stmmac_lpi_entry_timer_config(priv, 0);
434                 return;
435         }
436
437         stmmac_reset_eee_mode(priv, priv->hw);
438         del_timer_sync(&priv->eee_ctrl_timer);
439         priv->tx_path_in_lpi_mode = false;
440 }
441
442 /**
443  * stmmac_eee_ctrl_timer - EEE TX SW timer.
444  * @t:  timer_list struct containing private info
445  * Description:
446  *  if there is no data transfer and if we are not in LPI state,
447  *  then MAC Transmitter can be moved to LPI state.
448  */
449 static void stmmac_eee_ctrl_timer(struct timer_list *t)
450 {
451         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
452
453         stmmac_enable_eee_mode(priv);
454         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
455 }
456
457 /**
458  * stmmac_eee_init - init EEE
459  * @priv: driver private structure
460  * Description:
461  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
462  *  can also manage EEE, this function enable the LPI state and start related
463  *  timer.
464  */
465 bool stmmac_eee_init(struct stmmac_priv *priv)
466 {
467         int eee_tw_timer = priv->eee_tw_timer;
468
469         /* Using PCS we cannot dial with the phy registers at this stage
470          * so we do not support extra feature like EEE.
471          */
472         if (priv->hw->pcs == STMMAC_PCS_TBI ||
473             priv->hw->pcs == STMMAC_PCS_RTBI)
474                 return false;
475
476         /* Check if MAC core supports the EEE feature. */
477         if (!priv->dma_cap.eee)
478                 return false;
479
480         mutex_lock(&priv->lock);
481
482         /* Check if it needs to be deactivated */
483         if (!priv->eee_active) {
484                 if (priv->eee_enabled) {
485                         netdev_dbg(priv->dev, "disable EEE\n");
486                         stmmac_lpi_entry_timer_config(priv, 0);
487                         del_timer_sync(&priv->eee_ctrl_timer);
488                         stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
489                         if (priv->hw->xpcs)
490                                 xpcs_config_eee(priv->hw->xpcs,
491                                                 priv->plat->mult_fact_100ns,
492                                                 false);
493                 }
494                 mutex_unlock(&priv->lock);
495                 return false;
496         }
497
498         if (priv->eee_active && !priv->eee_enabled) {
499                 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
500                 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
501                                      eee_tw_timer);
502                 if (priv->hw->xpcs)
503                         xpcs_config_eee(priv->hw->xpcs,
504                                         priv->plat->mult_fact_100ns,
505                                         true);
506         }
507
508         if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
509                 del_timer_sync(&priv->eee_ctrl_timer);
510                 priv->tx_path_in_lpi_mode = false;
511                 stmmac_lpi_entry_timer_config(priv, 1);
512         } else {
513                 stmmac_lpi_entry_timer_config(priv, 0);
514                 mod_timer(&priv->eee_ctrl_timer,
515                           STMMAC_LPI_T(priv->tx_lpi_timer));
516         }
517
518         mutex_unlock(&priv->lock);
519         netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
520         return true;
521 }
522
523 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
524  * @priv: driver private structure
525  * @p : descriptor pointer
526  * @skb : the socket buffer
527  * Description :
528  * This function will read timestamp from the descriptor & pass it to stack.
529  * and also perform some sanity checks.
530  */
531 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
532                                    struct dma_desc *p, struct sk_buff *skb)
533 {
534         struct skb_shared_hwtstamps shhwtstamp;
535         bool found = false;
536         u64 ns = 0;
537
538         if (!priv->hwts_tx_en)
539                 return;
540
541         /* exit if skb doesn't support hw tstamp */
542         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
543                 return;
544
545         /* check tx tstamp status */
546         if (stmmac_get_tx_timestamp_status(priv, p)) {
547                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
548                 found = true;
549         } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
550                 found = true;
551         }
552
553         if (found) {
554                 ns -= priv->plat->cdc_error_adj;
555
556                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
557                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
558
559                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
560                 /* pass tstamp to stack */
561                 skb_tstamp_tx(skb, &shhwtstamp);
562         }
563 }
564
565 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
566  * @priv: driver private structure
567  * @p : descriptor pointer
568  * @np : next descriptor pointer
569  * @skb : the socket buffer
570  * Description :
571  * This function will read received packet's timestamp from the descriptor
572  * and pass it to stack. It also perform some sanity checks.
573  */
574 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
575                                    struct dma_desc *np, struct sk_buff *skb)
576 {
577         struct skb_shared_hwtstamps *shhwtstamp = NULL;
578         struct dma_desc *desc = p;
579         u64 ns = 0;
580
581         if (!priv->hwts_rx_en)
582                 return;
583         /* For GMAC4, the valid timestamp is from CTX next desc. */
584         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
585                 desc = np;
586
587         /* Check if timestamp is available */
588         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
589                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
590
591                 ns -= priv->plat->cdc_error_adj;
592
593                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
594                 shhwtstamp = skb_hwtstamps(skb);
595                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
596                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
597         } else  {
598                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
599         }
600 }
601
602 /**
603  *  stmmac_hwtstamp_set - control hardware timestamping.
604  *  @dev: device pointer.
605  *  @ifr: An IOCTL specific structure, that can contain a pointer to
606  *  a proprietary structure used to pass information to the driver.
607  *  Description:
608  *  This function configures the MAC to enable/disable both outgoing(TX)
609  *  and incoming(RX) packets time stamping based on user input.
610  *  Return Value:
611  *  0 on success and an appropriate -ve integer on failure.
612  */
613 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
614 {
615         struct stmmac_priv *priv = netdev_priv(dev);
616         struct hwtstamp_config config;
617         u32 ptp_v2 = 0;
618         u32 tstamp_all = 0;
619         u32 ptp_over_ipv4_udp = 0;
620         u32 ptp_over_ipv6_udp = 0;
621         u32 ptp_over_ethernet = 0;
622         u32 snap_type_sel = 0;
623         u32 ts_master_en = 0;
624         u32 ts_event_en = 0;
625
626         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
627                 netdev_alert(priv->dev, "No support for HW time stamping\n");
628                 priv->hwts_tx_en = 0;
629                 priv->hwts_rx_en = 0;
630
631                 return -EOPNOTSUPP;
632         }
633
634         if (copy_from_user(&config, ifr->ifr_data,
635                            sizeof(config)))
636                 return -EFAULT;
637
638         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
639                    __func__, config.flags, config.tx_type, config.rx_filter);
640
641         if (config.tx_type != HWTSTAMP_TX_OFF &&
642             config.tx_type != HWTSTAMP_TX_ON)
643                 return -ERANGE;
644
645         if (priv->adv_ts) {
646                 switch (config.rx_filter) {
647                 case HWTSTAMP_FILTER_NONE:
648                         /* time stamp no incoming packet at all */
649                         config.rx_filter = HWTSTAMP_FILTER_NONE;
650                         break;
651
652                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
653                         /* PTP v1, UDP, any kind of event packet */
654                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
655                         /* 'xmac' hardware can support Sync, Pdelay_Req and
656                          * Pdelay_resp by setting bit14 and bits17/16 to 01
657                          * This leaves Delay_Req timestamps out.
658                          * Enable all events *and* general purpose message
659                          * timestamping
660                          */
661                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
662                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
663                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
664                         break;
665
666                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
667                         /* PTP v1, UDP, Sync packet */
668                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
669                         /* take time stamp for SYNC messages only */
670                         ts_event_en = PTP_TCR_TSEVNTENA;
671
672                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
673                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
674                         break;
675
676                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
677                         /* PTP v1, UDP, Delay_req packet */
678                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
679                         /* take time stamp for Delay_Req messages only */
680                         ts_master_en = PTP_TCR_TSMSTRENA;
681                         ts_event_en = PTP_TCR_TSEVNTENA;
682
683                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
684                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
685                         break;
686
687                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
688                         /* PTP v2, UDP, any kind of event packet */
689                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
690                         ptp_v2 = PTP_TCR_TSVER2ENA;
691                         /* take time stamp for all event messages */
692                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
693
694                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
695                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
696                         break;
697
698                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
699                         /* PTP v2, UDP, Sync packet */
700                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
701                         ptp_v2 = PTP_TCR_TSVER2ENA;
702                         /* take time stamp for SYNC messages only */
703                         ts_event_en = PTP_TCR_TSEVNTENA;
704
705                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
706                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
707                         break;
708
709                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
710                         /* PTP v2, UDP, Delay_req packet */
711                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
712                         ptp_v2 = PTP_TCR_TSVER2ENA;
713                         /* take time stamp for Delay_Req messages only */
714                         ts_master_en = PTP_TCR_TSMSTRENA;
715                         ts_event_en = PTP_TCR_TSEVNTENA;
716
717                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
718                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
719                         break;
720
721                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
722                         /* PTP v2/802.AS1 any layer, any kind of event packet */
723                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
724                         ptp_v2 = PTP_TCR_TSVER2ENA;
725                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
726                         if (priv->synopsys_id < DWMAC_CORE_4_10)
727                                 ts_event_en = PTP_TCR_TSEVNTENA;
728                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
729                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
730                         ptp_over_ethernet = PTP_TCR_TSIPENA;
731                         break;
732
733                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
734                         /* PTP v2/802.AS1, any layer, Sync packet */
735                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
736                         ptp_v2 = PTP_TCR_TSVER2ENA;
737                         /* take time stamp for SYNC messages only */
738                         ts_event_en = PTP_TCR_TSEVNTENA;
739
740                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
741                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
742                         ptp_over_ethernet = PTP_TCR_TSIPENA;
743                         break;
744
745                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
746                         /* PTP v2/802.AS1, any layer, Delay_req packet */
747                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
748                         ptp_v2 = PTP_TCR_TSVER2ENA;
749                         /* take time stamp for Delay_Req messages only */
750                         ts_master_en = PTP_TCR_TSMSTRENA;
751                         ts_event_en = PTP_TCR_TSEVNTENA;
752
753                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
754                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
755                         ptp_over_ethernet = PTP_TCR_TSIPENA;
756                         break;
757
758                 case HWTSTAMP_FILTER_NTP_ALL:
759                 case HWTSTAMP_FILTER_ALL:
760                         /* time stamp any incoming packet */
761                         config.rx_filter = HWTSTAMP_FILTER_ALL;
762                         tstamp_all = PTP_TCR_TSENALL;
763                         break;
764
765                 default:
766                         return -ERANGE;
767                 }
768         } else {
769                 switch (config.rx_filter) {
770                 case HWTSTAMP_FILTER_NONE:
771                         config.rx_filter = HWTSTAMP_FILTER_NONE;
772                         break;
773                 default:
774                         /* PTP v1, UDP, any kind of event packet */
775                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
776                         break;
777                 }
778         }
779         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
780         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
781
782         priv->systime_flags = STMMAC_HWTS_ACTIVE;
783
784         if (priv->hwts_tx_en || priv->hwts_rx_en) {
785                 priv->systime_flags |= tstamp_all | ptp_v2 |
786                                        ptp_over_ethernet | ptp_over_ipv6_udp |
787                                        ptp_over_ipv4_udp | ts_event_en |
788                                        ts_master_en | snap_type_sel;
789         }
790
791         stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
792
793         memcpy(&priv->tstamp_config, &config, sizeof(config));
794
795         return copy_to_user(ifr->ifr_data, &config,
796                             sizeof(config)) ? -EFAULT : 0;
797 }
798
799 /**
800  *  stmmac_hwtstamp_get - read hardware timestamping.
801  *  @dev: device pointer.
802  *  @ifr: An IOCTL specific structure, that can contain a pointer to
803  *  a proprietary structure used to pass information to the driver.
804  *  Description:
805  *  This function obtain the current hardware timestamping settings
806  *  as requested.
807  */
808 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
809 {
810         struct stmmac_priv *priv = netdev_priv(dev);
811         struct hwtstamp_config *config = &priv->tstamp_config;
812
813         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
814                 return -EOPNOTSUPP;
815
816         return copy_to_user(ifr->ifr_data, config,
817                             sizeof(*config)) ? -EFAULT : 0;
818 }
819
820 /**
821  * stmmac_init_tstamp_counter - init hardware timestamping counter
822  * @priv: driver private structure
823  * @systime_flags: timestamping flags
824  * Description:
825  * Initialize hardware counter for packet timestamping.
826  * This is valid as long as the interface is open and not suspended.
827  * Will be rerun after resuming from suspend, case in which the timestamping
828  * flags updated by stmmac_hwtstamp_set() also need to be restored.
829  */
830 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
831 {
832         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
833         struct timespec64 now;
834         u32 sec_inc = 0;
835         u64 temp = 0;
836         int ret;
837
838         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
839                 return -EOPNOTSUPP;
840
841         ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
842         if (ret < 0) {
843                 netdev_warn(priv->dev,
844                             "failed to enable PTP reference clock: %pe\n",
845                             ERR_PTR(ret));
846                 return ret;
847         }
848
849         stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
850         priv->systime_flags = systime_flags;
851
852         /* program Sub Second Increment reg */
853         stmmac_config_sub_second_increment(priv, priv->ptpaddr,
854                                            priv->plat->clk_ptp_rate,
855                                            xmac, &sec_inc);
856         temp = div_u64(1000000000ULL, sec_inc);
857
858         /* Store sub second increment for later use */
859         priv->sub_second_inc = sec_inc;
860
861         /* calculate default added value:
862          * formula is :
863          * addend = (2^32)/freq_div_ratio;
864          * where, freq_div_ratio = 1e9ns/sec_inc
865          */
866         temp = (u64)(temp << 32);
867         priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
868         stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
869
870         /* initialize system time */
871         ktime_get_real_ts64(&now);
872
873         /* lower 32 bits of tv_sec are safe until y2106 */
874         stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
875
876         return 0;
877 }
878 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
879
880 /**
881  * stmmac_init_ptp - init PTP
882  * @priv: driver private structure
883  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
884  * This is done by looking at the HW cap. register.
885  * This function also registers the ptp driver.
886  */
887 static int stmmac_init_ptp(struct stmmac_priv *priv)
888 {
889         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
890         int ret;
891
892         ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
893         if (ret)
894                 return ret;
895
896         priv->adv_ts = 0;
897         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
898         if (xmac && priv->dma_cap.atime_stamp)
899                 priv->adv_ts = 1;
900         /* Dwmac 3.x core with extend_desc can support adv_ts */
901         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
902                 priv->adv_ts = 1;
903
904         if (priv->dma_cap.time_stamp)
905                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
906
907         if (priv->adv_ts)
908                 netdev_info(priv->dev,
909                             "IEEE 1588-2008 Advanced Timestamp supported\n");
910
911         priv->hwts_tx_en = 0;
912         priv->hwts_rx_en = 0;
913
914         stmmac_ptp_register(priv);
915
916         return 0;
917 }
918
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921         clk_disable_unprepare(priv->plat->clk_ptp_ref);
922         stmmac_ptp_unregister(priv);
923 }
924
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933         u32 tx_cnt = priv->plat->tx_queues_to_use;
934
935         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936                         priv->pause, tx_cnt);
937 }
938
939 static void stmmac_validate(struct phylink_config *config,
940                             unsigned long *supported,
941                             struct phylink_link_state *state)
942 {
943         __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
944
945         /* This is very similar to phylink_generic_validate() except that
946          * we always use PHY_INTERFACE_MODE_INTERNAL to get all capabilities.
947          * This is because we don't always have config->supported_interfaces
948          * populated (only when we have the XPCS.)
949          *
950          * When we do have an XPCS, we could pass state->interface, as XPCS
951          * limits to a subset of the ethtool link modes allowed here.
952          */
953         phylink_set(mac_supported, Autoneg);
954         phylink_set_port_modes(mac_supported);
955         phylink_get_linkmodes(mac_supported, PHY_INTERFACE_MODE_INTERNAL,
956                               config->mac_capabilities);
957
958         linkmode_and(supported, supported, mac_supported);
959         linkmode_and(state->advertising, state->advertising, mac_supported);
960 }
961
962 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
963                               const struct phylink_link_state *state)
964 {
965         /* Nothing to do, xpcs_config() handles everything */
966 }
967
968 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
969 {
970         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
971         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
972         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
973         bool *hs_enable = &fpe_cfg->hs_enable;
974
975         if (is_up && *hs_enable) {
976                 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
977         } else {
978                 *lo_state = FPE_STATE_OFF;
979                 *lp_state = FPE_STATE_OFF;
980         }
981 }
982
983 static void stmmac_mac_link_down(struct phylink_config *config,
984                                  unsigned int mode, phy_interface_t interface)
985 {
986         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
987
988         stmmac_mac_set(priv, priv->ioaddr, false);
989         priv->eee_active = false;
990         priv->tx_lpi_enabled = false;
991         priv->eee_enabled = stmmac_eee_init(priv);
992         stmmac_set_eee_pls(priv, priv->hw, false);
993
994         if (priv->dma_cap.fpesel)
995                 stmmac_fpe_link_state_handle(priv, false);
996 }
997
998 static void stmmac_mac_link_up(struct phylink_config *config,
999                                struct phy_device *phy,
1000                                unsigned int mode, phy_interface_t interface,
1001                                int speed, int duplex,
1002                                bool tx_pause, bool rx_pause)
1003 {
1004         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1005         u32 ctrl;
1006
1007         ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1008         ctrl &= ~priv->hw->link.speed_mask;
1009
1010         if (interface == PHY_INTERFACE_MODE_USXGMII) {
1011                 switch (speed) {
1012                 case SPEED_10000:
1013                         ctrl |= priv->hw->link.xgmii.speed10000;
1014                         break;
1015                 case SPEED_5000:
1016                         ctrl |= priv->hw->link.xgmii.speed5000;
1017                         break;
1018                 case SPEED_2500:
1019                         ctrl |= priv->hw->link.xgmii.speed2500;
1020                         break;
1021                 default:
1022                         return;
1023                 }
1024         } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1025                 switch (speed) {
1026                 case SPEED_100000:
1027                         ctrl |= priv->hw->link.xlgmii.speed100000;
1028                         break;
1029                 case SPEED_50000:
1030                         ctrl |= priv->hw->link.xlgmii.speed50000;
1031                         break;
1032                 case SPEED_40000:
1033                         ctrl |= priv->hw->link.xlgmii.speed40000;
1034                         break;
1035                 case SPEED_25000:
1036                         ctrl |= priv->hw->link.xlgmii.speed25000;
1037                         break;
1038                 case SPEED_10000:
1039                         ctrl |= priv->hw->link.xgmii.speed10000;
1040                         break;
1041                 case SPEED_2500:
1042                         ctrl |= priv->hw->link.speed2500;
1043                         break;
1044                 case SPEED_1000:
1045                         ctrl |= priv->hw->link.speed1000;
1046                         break;
1047                 default:
1048                         return;
1049                 }
1050         } else {
1051                 switch (speed) {
1052                 case SPEED_2500:
1053                         ctrl |= priv->hw->link.speed2500;
1054                         break;
1055                 case SPEED_1000:
1056                         ctrl |= priv->hw->link.speed1000;
1057                         break;
1058                 case SPEED_100:
1059                         ctrl |= priv->hw->link.speed100;
1060                         break;
1061                 case SPEED_10:
1062                         ctrl |= priv->hw->link.speed10;
1063                         break;
1064                 default:
1065                         return;
1066                 }
1067         }
1068
1069         priv->speed = speed;
1070
1071         if (priv->plat->fix_mac_speed)
1072                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1073
1074         if (!duplex)
1075                 ctrl &= ~priv->hw->link.duplex;
1076         else
1077                 ctrl |= priv->hw->link.duplex;
1078
1079         /* Flow Control operation */
1080         if (tx_pause && rx_pause)
1081                 stmmac_mac_flow_ctrl(priv, duplex);
1082
1083         writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1084
1085         stmmac_mac_set(priv, priv->ioaddr, true);
1086         if (phy && priv->dma_cap.eee) {
1087                 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1088                 priv->eee_enabled = stmmac_eee_init(priv);
1089                 priv->tx_lpi_enabled = priv->eee_enabled;
1090                 stmmac_set_eee_pls(priv, priv->hw, true);
1091         }
1092
1093         if (priv->dma_cap.fpesel)
1094                 stmmac_fpe_link_state_handle(priv, true);
1095 }
1096
1097 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1098         .validate = stmmac_validate,
1099         .mac_config = stmmac_mac_config,
1100         .mac_link_down = stmmac_mac_link_down,
1101         .mac_link_up = stmmac_mac_link_up,
1102 };
1103
1104 /**
1105  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1106  * @priv: driver private structure
1107  * Description: this is to verify if the HW supports the PCS.
1108  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1109  * configured for the TBI, RTBI, or SGMII PHY interface.
1110  */
1111 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1112 {
1113         int interface = priv->plat->interface;
1114
1115         if (priv->dma_cap.pcs) {
1116                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1117                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1118                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1119                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1120                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1121                         priv->hw->pcs = STMMAC_PCS_RGMII;
1122                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1123                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1124                         priv->hw->pcs = STMMAC_PCS_SGMII;
1125                 }
1126         }
1127 }
1128
1129 /**
1130  * stmmac_init_phy - PHY initialization
1131  * @dev: net device structure
1132  * Description: it initializes the driver's PHY state, and attaches the PHY
1133  * to the mac driver.
1134  *  Return value:
1135  *  0 on success
1136  */
1137 static int stmmac_init_phy(struct net_device *dev)
1138 {
1139         struct stmmac_priv *priv = netdev_priv(dev);
1140         struct device_node *node;
1141         int ret;
1142
1143         node = priv->plat->phylink_node;
1144
1145         if (node)
1146                 ret = phylink_of_phy_connect(priv->phylink, node, 0);
1147
1148         /* Some DT bindings do not set-up the PHY handle. Let's try to
1149          * manually parse it
1150          */
1151         if (!node || ret) {
1152                 int addr = priv->plat->phy_addr;
1153                 struct phy_device *phydev;
1154
1155                 phydev = mdiobus_get_phy(priv->mii, addr);
1156                 if (!phydev) {
1157                         netdev_err(priv->dev, "no phy at addr %d\n", addr);
1158                         return -ENODEV;
1159                 }
1160
1161                 ret = phylink_connect_phy(priv->phylink, phydev);
1162         }
1163
1164         if (!priv->plat->pmt) {
1165                 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1166
1167                 phylink_ethtool_get_wol(priv->phylink, &wol);
1168                 device_set_wakeup_capable(priv->device, !!wol.supported);
1169         }
1170
1171         return ret;
1172 }
1173
1174 static int stmmac_phy_setup(struct stmmac_priv *priv)
1175 {
1176         struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
1177         struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1178         int max_speed = priv->plat->max_speed;
1179         int mode = priv->plat->phy_interface;
1180         struct phylink *phylink;
1181
1182         priv->phylink_config.dev = &priv->dev->dev;
1183         priv->phylink_config.type = PHYLINK_NETDEV;
1184         if (priv->plat->mdio_bus_data)
1185                 priv->phylink_config.ovr_an_inband =
1186                         mdio_bus_data->xpcs_an_inband;
1187
1188         if (!fwnode)
1189                 fwnode = dev_fwnode(priv->device);
1190
1191         /* Set the platform/firmware specified interface mode */
1192         __set_bit(mode, priv->phylink_config.supported_interfaces);
1193
1194         /* If we have an xpcs, it defines which PHY interfaces are supported. */
1195         if (priv->hw->xpcs)
1196                 xpcs_get_interfaces(priv->hw->xpcs,
1197                                     priv->phylink_config.supported_interfaces);
1198
1199         priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1200                 MAC_10 | MAC_100;
1201
1202         if (!max_speed || max_speed >= 1000)
1203                 priv->phylink_config.mac_capabilities |= MAC_1000;
1204
1205         if (priv->plat->has_gmac4) {
1206                 if (!max_speed || max_speed >= 2500)
1207                         priv->phylink_config.mac_capabilities |= MAC_2500FD;
1208         } else if (priv->plat->has_xgmac) {
1209                 if (!max_speed || max_speed >= 2500)
1210                         priv->phylink_config.mac_capabilities |= MAC_2500FD;
1211                 if (!max_speed || max_speed >= 5000)
1212                         priv->phylink_config.mac_capabilities |= MAC_5000FD;
1213                 if (!max_speed || max_speed >= 10000)
1214                         priv->phylink_config.mac_capabilities |= MAC_10000FD;
1215                 if (!max_speed || max_speed >= 25000)
1216                         priv->phylink_config.mac_capabilities |= MAC_25000FD;
1217                 if (!max_speed || max_speed >= 40000)
1218                         priv->phylink_config.mac_capabilities |= MAC_40000FD;
1219                 if (!max_speed || max_speed >= 50000)
1220                         priv->phylink_config.mac_capabilities |= MAC_50000FD;
1221                 if (!max_speed || max_speed >= 100000)
1222                         priv->phylink_config.mac_capabilities |= MAC_100000FD;
1223         }
1224
1225         /* Half-Duplex can only work with single queue */
1226         if (priv->plat->tx_queues_to_use > 1)
1227                 priv->phylink_config.mac_capabilities &=
1228                         ~(MAC_10HD | MAC_100HD | MAC_1000HD);
1229
1230         phylink = phylink_create(&priv->phylink_config, fwnode,
1231                                  mode, &stmmac_phylink_mac_ops);
1232         if (IS_ERR(phylink))
1233                 return PTR_ERR(phylink);
1234
1235         if (priv->hw->xpcs)
1236                 phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
1237
1238         priv->phylink = phylink;
1239         return 0;
1240 }
1241
1242 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1243 {
1244         u32 rx_cnt = priv->plat->rx_queues_to_use;
1245         unsigned int desc_size;
1246         void *head_rx;
1247         u32 queue;
1248
1249         /* Display RX rings */
1250         for (queue = 0; queue < rx_cnt; queue++) {
1251                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1252
1253                 pr_info("\tRX Queue %u rings\n", queue);
1254
1255                 if (priv->extend_desc) {
1256                         head_rx = (void *)rx_q->dma_erx;
1257                         desc_size = sizeof(struct dma_extended_desc);
1258                 } else {
1259                         head_rx = (void *)rx_q->dma_rx;
1260                         desc_size = sizeof(struct dma_desc);
1261                 }
1262
1263                 /* Display RX ring */
1264                 stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1265                                     rx_q->dma_rx_phy, desc_size);
1266         }
1267 }
1268
1269 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1270 {
1271         u32 tx_cnt = priv->plat->tx_queues_to_use;
1272         unsigned int desc_size;
1273         void *head_tx;
1274         u32 queue;
1275
1276         /* Display TX rings */
1277         for (queue = 0; queue < tx_cnt; queue++) {
1278                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1279
1280                 pr_info("\tTX Queue %d rings\n", queue);
1281
1282                 if (priv->extend_desc) {
1283                         head_tx = (void *)tx_q->dma_etx;
1284                         desc_size = sizeof(struct dma_extended_desc);
1285                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1286                         head_tx = (void *)tx_q->dma_entx;
1287                         desc_size = sizeof(struct dma_edesc);
1288                 } else {
1289                         head_tx = (void *)tx_q->dma_tx;
1290                         desc_size = sizeof(struct dma_desc);
1291                 }
1292
1293                 stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1294                                     tx_q->dma_tx_phy, desc_size);
1295         }
1296 }
1297
1298 static void stmmac_display_rings(struct stmmac_priv *priv)
1299 {
1300         /* Display RX ring */
1301         stmmac_display_rx_rings(priv);
1302
1303         /* Display TX ring */
1304         stmmac_display_tx_rings(priv);
1305 }
1306
1307 static int stmmac_set_bfsize(int mtu, int bufsize)
1308 {
1309         int ret = bufsize;
1310
1311         if (mtu >= BUF_SIZE_8KiB)
1312                 ret = BUF_SIZE_16KiB;
1313         else if (mtu >= BUF_SIZE_4KiB)
1314                 ret = BUF_SIZE_8KiB;
1315         else if (mtu >= BUF_SIZE_2KiB)
1316                 ret = BUF_SIZE_4KiB;
1317         else if (mtu > DEFAULT_BUFSIZE)
1318                 ret = BUF_SIZE_2KiB;
1319         else
1320                 ret = DEFAULT_BUFSIZE;
1321
1322         return ret;
1323 }
1324
1325 /**
1326  * stmmac_clear_rx_descriptors - clear RX descriptors
1327  * @priv: driver private structure
1328  * @queue: RX queue index
1329  * Description: this function is called to clear the RX descriptors
1330  * in case of both basic and extended descriptors are used.
1331  */
1332 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1333 {
1334         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1335         int i;
1336
1337         /* Clear the RX descriptors */
1338         for (i = 0; i < priv->dma_rx_size; i++)
1339                 if (priv->extend_desc)
1340                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1341                                         priv->use_riwt, priv->mode,
1342                                         (i == priv->dma_rx_size - 1),
1343                                         priv->dma_buf_sz);
1344                 else
1345                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1346                                         priv->use_riwt, priv->mode,
1347                                         (i == priv->dma_rx_size - 1),
1348                                         priv->dma_buf_sz);
1349 }
1350
1351 /**
1352  * stmmac_clear_tx_descriptors - clear tx descriptors
1353  * @priv: driver private structure
1354  * @queue: TX queue index.
1355  * Description: this function is called to clear the TX descriptors
1356  * in case of both basic and extended descriptors are used.
1357  */
1358 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1359 {
1360         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1361         int i;
1362
1363         /* Clear the TX descriptors */
1364         for (i = 0; i < priv->dma_tx_size; i++) {
1365                 int last = (i == (priv->dma_tx_size - 1));
1366                 struct dma_desc *p;
1367
1368                 if (priv->extend_desc)
1369                         p = &tx_q->dma_etx[i].basic;
1370                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1371                         p = &tx_q->dma_entx[i].basic;
1372                 else
1373                         p = &tx_q->dma_tx[i];
1374
1375                 stmmac_init_tx_desc(priv, p, priv->mode, last);
1376         }
1377 }
1378
1379 /**
1380  * stmmac_clear_descriptors - clear descriptors
1381  * @priv: driver private structure
1382  * Description: this function is called to clear the TX and RX descriptors
1383  * in case of both basic and extended descriptors are used.
1384  */
1385 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1386 {
1387         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1388         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1389         u32 queue;
1390
1391         /* Clear the RX descriptors */
1392         for (queue = 0; queue < rx_queue_cnt; queue++)
1393                 stmmac_clear_rx_descriptors(priv, queue);
1394
1395         /* Clear the TX descriptors */
1396         for (queue = 0; queue < tx_queue_cnt; queue++)
1397                 stmmac_clear_tx_descriptors(priv, queue);
1398 }
1399
1400 /**
1401  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1402  * @priv: driver private structure
1403  * @p: descriptor pointer
1404  * @i: descriptor index
1405  * @flags: gfp flag
1406  * @queue: RX queue index
1407  * Description: this function is called to allocate a receive buffer, perform
1408  * the DMA mapping and init the descriptor.
1409  */
1410 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1411                                   int i, gfp_t flags, u32 queue)
1412 {
1413         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1414         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1415         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1416
1417         if (priv->dma_cap.addr64 <= 32)
1418                 gfp |= GFP_DMA32;
1419
1420         if (!buf->page) {
1421                 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1422                 if (!buf->page)
1423                         return -ENOMEM;
1424                 buf->page_offset = stmmac_rx_offset(priv);
1425         }
1426
1427         if (priv->sph && !buf->sec_page) {
1428                 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1429                 if (!buf->sec_page)
1430                         return -ENOMEM;
1431
1432                 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1433                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1434         } else {
1435                 buf->sec_page = NULL;
1436                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1437         }
1438
1439         buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1440
1441         stmmac_set_desc_addr(priv, p, buf->addr);
1442         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1443                 stmmac_init_desc3(priv, p);
1444
1445         return 0;
1446 }
1447
1448 /**
1449  * stmmac_free_rx_buffer - free RX dma buffers
1450  * @priv: private structure
1451  * @queue: RX queue index
1452  * @i: buffer index.
1453  */
1454 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1455 {
1456         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1457         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1458
1459         if (buf->page)
1460                 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1461         buf->page = NULL;
1462
1463         if (buf->sec_page)
1464                 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1465         buf->sec_page = NULL;
1466 }
1467
1468 /**
1469  * stmmac_free_tx_buffer - free RX dma buffers
1470  * @priv: private structure
1471  * @queue: RX queue index
1472  * @i: buffer index.
1473  */
1474 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1475 {
1476         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1477
1478         if (tx_q->tx_skbuff_dma[i].buf &&
1479             tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1480                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1481                         dma_unmap_page(priv->device,
1482                                        tx_q->tx_skbuff_dma[i].buf,
1483                                        tx_q->tx_skbuff_dma[i].len,
1484                                        DMA_TO_DEVICE);
1485                 else
1486                         dma_unmap_single(priv->device,
1487                                          tx_q->tx_skbuff_dma[i].buf,
1488                                          tx_q->tx_skbuff_dma[i].len,
1489                                          DMA_TO_DEVICE);
1490         }
1491
1492         if (tx_q->xdpf[i] &&
1493             (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1494              tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1495                 xdp_return_frame(tx_q->xdpf[i]);
1496                 tx_q->xdpf[i] = NULL;
1497         }
1498
1499         if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1500                 tx_q->xsk_frames_done++;
1501
1502         if (tx_q->tx_skbuff[i] &&
1503             tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1504                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1505                 tx_q->tx_skbuff[i] = NULL;
1506         }
1507
1508         tx_q->tx_skbuff_dma[i].buf = 0;
1509         tx_q->tx_skbuff_dma[i].map_as_page = false;
1510 }
1511
1512 /**
1513  * dma_free_rx_skbufs - free RX dma buffers
1514  * @priv: private structure
1515  * @queue: RX queue index
1516  */
1517 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1518 {
1519         int i;
1520
1521         for (i = 0; i < priv->dma_rx_size; i++)
1522                 stmmac_free_rx_buffer(priv, queue, i);
1523 }
1524
1525 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
1526                                    gfp_t flags)
1527 {
1528         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1529         int i;
1530
1531         for (i = 0; i < priv->dma_rx_size; i++) {
1532                 struct dma_desc *p;
1533                 int ret;
1534
1535                 if (priv->extend_desc)
1536                         p = &((rx_q->dma_erx + i)->basic);
1537                 else
1538                         p = rx_q->dma_rx + i;
1539
1540                 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1541                                              queue);
1542                 if (ret)
1543                         return ret;
1544
1545                 rx_q->buf_alloc_num++;
1546         }
1547
1548         return 0;
1549 }
1550
1551 /**
1552  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1553  * @priv: private structure
1554  * @queue: RX queue index
1555  */
1556 static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
1557 {
1558         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1559         int i;
1560
1561         for (i = 0; i < priv->dma_rx_size; i++) {
1562                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1563
1564                 if (!buf->xdp)
1565                         continue;
1566
1567                 xsk_buff_free(buf->xdp);
1568                 buf->xdp = NULL;
1569         }
1570 }
1571
1572 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
1573 {
1574         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1575         int i;
1576
1577         for (i = 0; i < priv->dma_rx_size; i++) {
1578                 struct stmmac_rx_buffer *buf;
1579                 dma_addr_t dma_addr;
1580                 struct dma_desc *p;
1581
1582                 if (priv->extend_desc)
1583                         p = (struct dma_desc *)(rx_q->dma_erx + i);
1584                 else
1585                         p = rx_q->dma_rx + i;
1586
1587                 buf = &rx_q->buf_pool[i];
1588
1589                 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1590                 if (!buf->xdp)
1591                         return -ENOMEM;
1592
1593                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1594                 stmmac_set_desc_addr(priv, p, dma_addr);
1595                 rx_q->buf_alloc_num++;
1596         }
1597
1598         return 0;
1599 }
1600
1601 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1602 {
1603         if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1604                 return NULL;
1605
1606         return xsk_get_pool_from_qid(priv->dev, queue);
1607 }
1608
1609 /**
1610  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1611  * @priv: driver private structure
1612  * @queue: RX queue index
1613  * @flags: gfp flag.
1614  * Description: this function initializes the DMA RX descriptors
1615  * and allocates the socket buffers. It supports the chained and ring
1616  * modes.
1617  */
1618 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
1619 {
1620         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1621         int ret;
1622
1623         netif_dbg(priv, probe, priv->dev,
1624                   "(%s) dma_rx_phy=0x%08x\n", __func__,
1625                   (u32)rx_q->dma_rx_phy);
1626
1627         stmmac_clear_rx_descriptors(priv, queue);
1628
1629         xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1630
1631         rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1632
1633         if (rx_q->xsk_pool) {
1634                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1635                                                    MEM_TYPE_XSK_BUFF_POOL,
1636                                                    NULL));
1637                 netdev_info(priv->dev,
1638                             "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1639                             rx_q->queue_index);
1640                 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1641         } else {
1642                 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1643                                                    MEM_TYPE_PAGE_POOL,
1644                                                    rx_q->page_pool));
1645                 netdev_info(priv->dev,
1646                             "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1647                             rx_q->queue_index);
1648         }
1649
1650         if (rx_q->xsk_pool) {
1651                 /* RX XDP ZC buffer pool may not be populated, e.g.
1652                  * xdpsock TX-only.
1653                  */
1654                 stmmac_alloc_rx_buffers_zc(priv, queue);
1655         } else {
1656                 ret = stmmac_alloc_rx_buffers(priv, queue, flags);
1657                 if (ret < 0)
1658                         return -ENOMEM;
1659         }
1660
1661         rx_q->cur_rx = 0;
1662         rx_q->dirty_rx = 0;
1663
1664         /* Setup the chained descriptor addresses */
1665         if (priv->mode == STMMAC_CHAIN_MODE) {
1666                 if (priv->extend_desc)
1667                         stmmac_mode_init(priv, rx_q->dma_erx,
1668                                          rx_q->dma_rx_phy,
1669                                          priv->dma_rx_size, 1);
1670                 else
1671                         stmmac_mode_init(priv, rx_q->dma_rx,
1672                                          rx_q->dma_rx_phy,
1673                                          priv->dma_rx_size, 0);
1674         }
1675
1676         return 0;
1677 }
1678
1679 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1680 {
1681         struct stmmac_priv *priv = netdev_priv(dev);
1682         u32 rx_count = priv->plat->rx_queues_to_use;
1683         u32 queue;
1684         int ret;
1685
1686         /* RX INITIALIZATION */
1687         netif_dbg(priv, probe, priv->dev,
1688                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1689
1690         for (queue = 0; queue < rx_count; queue++) {
1691                 ret = __init_dma_rx_desc_rings(priv, queue, flags);
1692                 if (ret)
1693                         goto err_init_rx_buffers;
1694         }
1695
1696         return 0;
1697
1698 err_init_rx_buffers:
1699         while (queue >= 0) {
1700                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1701
1702                 if (rx_q->xsk_pool)
1703                         dma_free_rx_xskbufs(priv, queue);
1704                 else
1705                         dma_free_rx_skbufs(priv, queue);
1706
1707                 rx_q->buf_alloc_num = 0;
1708                 rx_q->xsk_pool = NULL;
1709
1710                 if (queue == 0)
1711                         break;
1712
1713                 queue--;
1714         }
1715
1716         return ret;
1717 }
1718
1719 /**
1720  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1721  * @priv: driver private structure
1722  * @queue : TX queue index
1723  * Description: this function initializes the DMA TX descriptors
1724  * and allocates the socket buffers. It supports the chained and ring
1725  * modes.
1726  */
1727 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
1728 {
1729         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1730         int i;
1731
1732         netif_dbg(priv, probe, priv->dev,
1733                   "(%s) dma_tx_phy=0x%08x\n", __func__,
1734                   (u32)tx_q->dma_tx_phy);
1735
1736         /* Setup the chained descriptor addresses */
1737         if (priv->mode == STMMAC_CHAIN_MODE) {
1738                 if (priv->extend_desc)
1739                         stmmac_mode_init(priv, tx_q->dma_etx,
1740                                          tx_q->dma_tx_phy,
1741                                          priv->dma_tx_size, 1);
1742                 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1743                         stmmac_mode_init(priv, tx_q->dma_tx,
1744                                          tx_q->dma_tx_phy,
1745                                          priv->dma_tx_size, 0);
1746         }
1747
1748         tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1749
1750         for (i = 0; i < priv->dma_tx_size; i++) {
1751                 struct dma_desc *p;
1752
1753                 if (priv->extend_desc)
1754                         p = &((tx_q->dma_etx + i)->basic);
1755                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1756                         p = &((tx_q->dma_entx + i)->basic);
1757                 else
1758                         p = tx_q->dma_tx + i;
1759
1760                 stmmac_clear_desc(priv, p);
1761
1762                 tx_q->tx_skbuff_dma[i].buf = 0;
1763                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1764                 tx_q->tx_skbuff_dma[i].len = 0;
1765                 tx_q->tx_skbuff_dma[i].last_segment = false;
1766                 tx_q->tx_skbuff[i] = NULL;
1767         }
1768
1769         tx_q->dirty_tx = 0;
1770         tx_q->cur_tx = 0;
1771         tx_q->mss = 0;
1772
1773         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1774
1775         return 0;
1776 }
1777
1778 static int init_dma_tx_desc_rings(struct net_device *dev)
1779 {
1780         struct stmmac_priv *priv = netdev_priv(dev);
1781         u32 tx_queue_cnt;
1782         u32 queue;
1783
1784         tx_queue_cnt = priv->plat->tx_queues_to_use;
1785
1786         for (queue = 0; queue < tx_queue_cnt; queue++)
1787                 __init_dma_tx_desc_rings(priv, queue);
1788
1789         return 0;
1790 }
1791
1792 /**
1793  * init_dma_desc_rings - init the RX/TX descriptor rings
1794  * @dev: net device structure
1795  * @flags: gfp flag.
1796  * Description: this function initializes the DMA RX/TX descriptors
1797  * and allocates the socket buffers. It supports the chained and ring
1798  * modes.
1799  */
1800 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1801 {
1802         struct stmmac_priv *priv = netdev_priv(dev);
1803         int ret;
1804
1805         ret = init_dma_rx_desc_rings(dev, flags);
1806         if (ret)
1807                 return ret;
1808
1809         ret = init_dma_tx_desc_rings(dev);
1810
1811         stmmac_clear_descriptors(priv);
1812
1813         if (netif_msg_hw(priv))
1814                 stmmac_display_rings(priv);
1815
1816         return ret;
1817 }
1818
1819 /**
1820  * dma_free_tx_skbufs - free TX dma buffers
1821  * @priv: private structure
1822  * @queue: TX queue index
1823  */
1824 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1825 {
1826         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1827         int i;
1828
1829         tx_q->xsk_frames_done = 0;
1830
1831         for (i = 0; i < priv->dma_tx_size; i++)
1832                 stmmac_free_tx_buffer(priv, queue, i);
1833
1834         if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1835                 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1836                 tx_q->xsk_frames_done = 0;
1837                 tx_q->xsk_pool = NULL;
1838         }
1839 }
1840
1841 /**
1842  * stmmac_free_tx_skbufs - free TX skb buffers
1843  * @priv: private structure
1844  */
1845 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1846 {
1847         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1848         u32 queue;
1849
1850         for (queue = 0; queue < tx_queue_cnt; queue++)
1851                 dma_free_tx_skbufs(priv, queue);
1852 }
1853
1854 /**
1855  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1856  * @priv: private structure
1857  * @queue: RX queue index
1858  */
1859 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1860 {
1861         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1862
1863         /* Release the DMA RX socket buffers */
1864         if (rx_q->xsk_pool)
1865                 dma_free_rx_xskbufs(priv, queue);
1866         else
1867                 dma_free_rx_skbufs(priv, queue);
1868
1869         rx_q->buf_alloc_num = 0;
1870         rx_q->xsk_pool = NULL;
1871
1872         /* Free DMA regions of consistent memory previously allocated */
1873         if (!priv->extend_desc)
1874                 dma_free_coherent(priv->device, priv->dma_rx_size *
1875                                   sizeof(struct dma_desc),
1876                                   rx_q->dma_rx, rx_q->dma_rx_phy);
1877         else
1878                 dma_free_coherent(priv->device, priv->dma_rx_size *
1879                                   sizeof(struct dma_extended_desc),
1880                                   rx_q->dma_erx, rx_q->dma_rx_phy);
1881
1882         if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1883                 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1884
1885         kfree(rx_q->buf_pool);
1886         if (rx_q->page_pool)
1887                 page_pool_destroy(rx_q->page_pool);
1888 }
1889
1890 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1891 {
1892         u32 rx_count = priv->plat->rx_queues_to_use;
1893         u32 queue;
1894
1895         /* Free RX queue resources */
1896         for (queue = 0; queue < rx_count; queue++)
1897                 __free_dma_rx_desc_resources(priv, queue);
1898 }
1899
1900 /**
1901  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1902  * @priv: private structure
1903  * @queue: TX queue index
1904  */
1905 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
1906 {
1907         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1908         size_t size;
1909         void *addr;
1910
1911         /* Release the DMA TX socket buffers */
1912         dma_free_tx_skbufs(priv, queue);
1913
1914         if (priv->extend_desc) {
1915                 size = sizeof(struct dma_extended_desc);
1916                 addr = tx_q->dma_etx;
1917         } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1918                 size = sizeof(struct dma_edesc);
1919                 addr = tx_q->dma_entx;
1920         } else {
1921                 size = sizeof(struct dma_desc);
1922                 addr = tx_q->dma_tx;
1923         }
1924
1925         size *= priv->dma_tx_size;
1926
1927         dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1928
1929         kfree(tx_q->tx_skbuff_dma);
1930         kfree(tx_q->tx_skbuff);
1931 }
1932
1933 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1934 {
1935         u32 tx_count = priv->plat->tx_queues_to_use;
1936         u32 queue;
1937
1938         /* Free TX queue resources */
1939         for (queue = 0; queue < tx_count; queue++)
1940                 __free_dma_tx_desc_resources(priv, queue);
1941 }
1942
1943 /**
1944  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1945  * @priv: private structure
1946  * @queue: RX queue index
1947  * Description: according to which descriptor can be used (extend or basic)
1948  * this function allocates the resources for TX and RX paths. In case of
1949  * reception, for example, it pre-allocated the RX socket buffer in order to
1950  * allow zero-copy mechanism.
1951  */
1952 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1953 {
1954         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1955         struct stmmac_channel *ch = &priv->channel[queue];
1956         bool xdp_prog = stmmac_xdp_is_enabled(priv);
1957         struct page_pool_params pp_params = { 0 };
1958         unsigned int num_pages;
1959         unsigned int napi_id;
1960         int ret;
1961
1962         rx_q->queue_index = queue;
1963         rx_q->priv_data = priv;
1964
1965         pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
1966         pp_params.pool_size = priv->dma_rx_size;
1967         num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1968         pp_params.order = ilog2(num_pages);
1969         pp_params.nid = dev_to_node(priv->device);
1970         pp_params.dev = priv->device;
1971         pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1972         pp_params.offset = stmmac_rx_offset(priv);
1973         pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
1974
1975         rx_q->page_pool = page_pool_create(&pp_params);
1976         if (IS_ERR(rx_q->page_pool)) {
1977                 ret = PTR_ERR(rx_q->page_pool);
1978                 rx_q->page_pool = NULL;
1979                 return ret;
1980         }
1981
1982         rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1983                                  sizeof(*rx_q->buf_pool),
1984                                  GFP_KERNEL);
1985         if (!rx_q->buf_pool)
1986                 return -ENOMEM;
1987
1988         if (priv->extend_desc) {
1989                 rx_q->dma_erx = dma_alloc_coherent(priv->device,
1990                                                    priv->dma_rx_size *
1991                                                    sizeof(struct dma_extended_desc),
1992                                                    &rx_q->dma_rx_phy,
1993                                                    GFP_KERNEL);
1994                 if (!rx_q->dma_erx)
1995                         return -ENOMEM;
1996
1997         } else {
1998                 rx_q->dma_rx = dma_alloc_coherent(priv->device,
1999                                                   priv->dma_rx_size *
2000                                                   sizeof(struct dma_desc),
2001                                                   &rx_q->dma_rx_phy,
2002                                                   GFP_KERNEL);
2003                 if (!rx_q->dma_rx)
2004                         return -ENOMEM;
2005         }
2006
2007         if (stmmac_xdp_is_enabled(priv) &&
2008             test_bit(queue, priv->af_xdp_zc_qps))
2009                 napi_id = ch->rxtx_napi.napi_id;
2010         else
2011                 napi_id = ch->rx_napi.napi_id;
2012
2013         ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2014                                rx_q->queue_index,
2015                                napi_id);
2016         if (ret) {
2017                 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2018                 return -EINVAL;
2019         }
2020
2021         return 0;
2022 }
2023
2024 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
2025 {
2026         u32 rx_count = priv->plat->rx_queues_to_use;
2027         u32 queue;
2028         int ret;
2029
2030         /* RX queues buffers and DMA */
2031         for (queue = 0; queue < rx_count; queue++) {
2032                 ret = __alloc_dma_rx_desc_resources(priv, queue);
2033                 if (ret)
2034                         goto err_dma;
2035         }
2036
2037         return 0;
2038
2039 err_dma:
2040         free_dma_rx_desc_resources(priv);
2041
2042         return ret;
2043 }
2044
2045 /**
2046  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2047  * @priv: private structure
2048  * @queue: TX queue index
2049  * Description: according to which descriptor can be used (extend or basic)
2050  * this function allocates the resources for TX and RX paths. In case of
2051  * reception, for example, it pre-allocated the RX socket buffer in order to
2052  * allow zero-copy mechanism.
2053  */
2054 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
2055 {
2056         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2057         size_t size;
2058         void *addr;
2059
2060         tx_q->queue_index = queue;
2061         tx_q->priv_data = priv;
2062
2063         tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
2064                                       sizeof(*tx_q->tx_skbuff_dma),
2065                                       GFP_KERNEL);
2066         if (!tx_q->tx_skbuff_dma)
2067                 return -ENOMEM;
2068
2069         tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
2070                                   sizeof(struct sk_buff *),
2071                                   GFP_KERNEL);
2072         if (!tx_q->tx_skbuff)
2073                 return -ENOMEM;
2074
2075         if (priv->extend_desc)
2076                 size = sizeof(struct dma_extended_desc);
2077         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2078                 size = sizeof(struct dma_edesc);
2079         else
2080                 size = sizeof(struct dma_desc);
2081
2082         size *= priv->dma_tx_size;
2083
2084         addr = dma_alloc_coherent(priv->device, size,
2085                                   &tx_q->dma_tx_phy, GFP_KERNEL);
2086         if (!addr)
2087                 return -ENOMEM;
2088
2089         if (priv->extend_desc)
2090                 tx_q->dma_etx = addr;
2091         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2092                 tx_q->dma_entx = addr;
2093         else
2094                 tx_q->dma_tx = addr;
2095
2096         return 0;
2097 }
2098
2099 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
2100 {
2101         u32 tx_count = priv->plat->tx_queues_to_use;
2102         u32 queue;
2103         int ret;
2104
2105         /* TX queues buffers and DMA */
2106         for (queue = 0; queue < tx_count; queue++) {
2107                 ret = __alloc_dma_tx_desc_resources(priv, queue);
2108                 if (ret)
2109                         goto err_dma;
2110         }
2111
2112         return 0;
2113
2114 err_dma:
2115         free_dma_tx_desc_resources(priv);
2116         return ret;
2117 }
2118
2119 /**
2120  * alloc_dma_desc_resources - alloc TX/RX resources.
2121  * @priv: private structure
2122  * Description: according to which descriptor can be used (extend or basic)
2123  * this function allocates the resources for TX and RX paths. In case of
2124  * reception, for example, it pre-allocated the RX socket buffer in order to
2125  * allow zero-copy mechanism.
2126  */
2127 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
2128 {
2129         /* RX Allocation */
2130         int ret = alloc_dma_rx_desc_resources(priv);
2131
2132         if (ret)
2133                 return ret;
2134
2135         ret = alloc_dma_tx_desc_resources(priv);
2136
2137         return ret;
2138 }
2139
2140 /**
2141  * free_dma_desc_resources - free dma desc resources
2142  * @priv: private structure
2143  */
2144 static void free_dma_desc_resources(struct stmmac_priv *priv)
2145 {
2146         /* Release the DMA TX socket buffers */
2147         free_dma_tx_desc_resources(priv);
2148
2149         /* Release the DMA RX socket buffers later
2150          * to ensure all pending XDP_TX buffers are returned.
2151          */
2152         free_dma_rx_desc_resources(priv);
2153 }
2154
2155 /**
2156  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2157  *  @priv: driver private structure
2158  *  Description: It is used for enabling the rx queues in the MAC
2159  */
2160 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2161 {
2162         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2163         int queue;
2164         u8 mode;
2165
2166         for (queue = 0; queue < rx_queues_count; queue++) {
2167                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2168                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2169         }
2170 }
2171
2172 /**
2173  * stmmac_start_rx_dma - start RX DMA channel
2174  * @priv: driver private structure
2175  * @chan: RX channel index
2176  * Description:
2177  * This starts a RX DMA channel
2178  */
2179 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2180 {
2181         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2182         stmmac_start_rx(priv, priv->ioaddr, chan);
2183 }
2184
2185 /**
2186  * stmmac_start_tx_dma - start TX DMA channel
2187  * @priv: driver private structure
2188  * @chan: TX channel index
2189  * Description:
2190  * This starts a TX DMA channel
2191  */
2192 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2193 {
2194         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2195         stmmac_start_tx(priv, priv->ioaddr, chan);
2196 }
2197
2198 /**
2199  * stmmac_stop_rx_dma - stop RX DMA channel
2200  * @priv: driver private structure
2201  * @chan: RX channel index
2202  * Description:
2203  * This stops a RX DMA channel
2204  */
2205 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2206 {
2207         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2208         stmmac_stop_rx(priv, priv->ioaddr, chan);
2209 }
2210
2211 /**
2212  * stmmac_stop_tx_dma - stop TX DMA channel
2213  * @priv: driver private structure
2214  * @chan: TX channel index
2215  * Description:
2216  * This stops a TX DMA channel
2217  */
2218 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2219 {
2220         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2221         stmmac_stop_tx(priv, priv->ioaddr, chan);
2222 }
2223
2224 /**
2225  * stmmac_start_all_dma - start all RX and TX DMA channels
2226  * @priv: driver private structure
2227  * Description:
2228  * This starts all the RX and TX DMA channels
2229  */
2230 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2231 {
2232         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2233         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2234         u32 chan = 0;
2235
2236         for (chan = 0; chan < rx_channels_count; chan++)
2237                 stmmac_start_rx_dma(priv, chan);
2238
2239         for (chan = 0; chan < tx_channels_count; chan++)
2240                 stmmac_start_tx_dma(priv, chan);
2241 }
2242
2243 /**
2244  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2245  * @priv: driver private structure
2246  * Description:
2247  * This stops the RX and TX DMA channels
2248  */
2249 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2250 {
2251         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2252         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2253         u32 chan = 0;
2254
2255         for (chan = 0; chan < rx_channels_count; chan++)
2256                 stmmac_stop_rx_dma(priv, chan);
2257
2258         for (chan = 0; chan < tx_channels_count; chan++)
2259                 stmmac_stop_tx_dma(priv, chan);
2260 }
2261
2262 /**
2263  *  stmmac_dma_operation_mode - HW DMA operation mode
2264  *  @priv: driver private structure
2265  *  Description: it is used for configuring the DMA operation mode register in
2266  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2267  */
2268 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2269 {
2270         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2271         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2272         int rxfifosz = priv->plat->rx_fifo_size;
2273         int txfifosz = priv->plat->tx_fifo_size;
2274         u32 txmode = 0;
2275         u32 rxmode = 0;
2276         u32 chan = 0;
2277         u8 qmode = 0;
2278
2279         if (rxfifosz == 0)
2280                 rxfifosz = priv->dma_cap.rx_fifo_size;
2281         if (txfifosz == 0)
2282                 txfifosz = priv->dma_cap.tx_fifo_size;
2283
2284         /* Adjust for real per queue fifo size */
2285         rxfifosz /= rx_channels_count;
2286         txfifosz /= tx_channels_count;
2287
2288         if (priv->plat->force_thresh_dma_mode) {
2289                 txmode = tc;
2290                 rxmode = tc;
2291         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2292                 /*
2293                  * In case of GMAC, SF mode can be enabled
2294                  * to perform the TX COE in HW. This depends on:
2295                  * 1) TX COE if actually supported
2296                  * 2) There is no bugged Jumbo frame support
2297                  *    that needs to not insert csum in the TDES.
2298                  */
2299                 txmode = SF_DMA_MODE;
2300                 rxmode = SF_DMA_MODE;
2301                 priv->xstats.threshold = SF_DMA_MODE;
2302         } else {
2303                 txmode = tc;
2304                 rxmode = SF_DMA_MODE;
2305         }
2306
2307         /* configure all channels */
2308         for (chan = 0; chan < rx_channels_count; chan++) {
2309                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2310                 u32 buf_size;
2311
2312                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2313
2314                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2315                                 rxfifosz, qmode);
2316
2317                 if (rx_q->xsk_pool) {
2318                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2319                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2320                                               buf_size,
2321                                               chan);
2322                 } else {
2323                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
2324                                               priv->dma_buf_sz,
2325                                               chan);
2326                 }
2327         }
2328
2329         for (chan = 0; chan < tx_channels_count; chan++) {
2330                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2331
2332                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2333                                 txfifosz, qmode);
2334         }
2335 }
2336
2337 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2338 {
2339         struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2340         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2341         struct xsk_buff_pool *pool = tx_q->xsk_pool;
2342         unsigned int entry = tx_q->cur_tx;
2343         struct dma_desc *tx_desc = NULL;
2344         struct xdp_desc xdp_desc;
2345         bool work_done = true;
2346
2347         /* Avoids TX time-out as we are sharing with slow path */
2348         txq_trans_cond_update(nq);
2349
2350         budget = min(budget, stmmac_tx_avail(priv, queue));
2351
2352         while (budget-- > 0) {
2353                 dma_addr_t dma_addr;
2354                 bool set_ic;
2355
2356                 /* We are sharing with slow path and stop XSK TX desc submission when
2357                  * available TX ring is less than threshold.
2358                  */
2359                 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2360                     !netif_carrier_ok(priv->dev)) {
2361                         work_done = false;
2362                         break;
2363                 }
2364
2365                 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2366                         break;
2367
2368                 if (likely(priv->extend_desc))
2369                         tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2370                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2371                         tx_desc = &tx_q->dma_entx[entry].basic;
2372                 else
2373                         tx_desc = tx_q->dma_tx + entry;
2374
2375                 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2376                 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2377
2378                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2379
2380                 /* To return XDP buffer to XSK pool, we simple call
2381                  * xsk_tx_completed(), so we don't need to fill up
2382                  * 'buf' and 'xdpf'.
2383                  */
2384                 tx_q->tx_skbuff_dma[entry].buf = 0;
2385                 tx_q->xdpf[entry] = NULL;
2386
2387                 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2388                 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2389                 tx_q->tx_skbuff_dma[entry].last_segment = true;
2390                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2391
2392                 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2393
2394                 tx_q->tx_count_frames++;
2395
2396                 if (!priv->tx_coal_frames[queue])
2397                         set_ic = false;
2398                 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2399                         set_ic = true;
2400                 else
2401                         set_ic = false;
2402
2403                 if (set_ic) {
2404                         tx_q->tx_count_frames = 0;
2405                         stmmac_set_tx_ic(priv, tx_desc);
2406                         priv->xstats.tx_set_ic_bit++;
2407                 }
2408
2409                 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2410                                        true, priv->mode, true, true,
2411                                        xdp_desc.len);
2412
2413                 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2414
2415                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2416                 entry = tx_q->cur_tx;
2417         }
2418
2419         if (tx_desc) {
2420                 stmmac_flush_tx_descriptors(priv, queue);
2421                 xsk_tx_release(pool);
2422         }
2423
2424         /* Return true if all of the 3 conditions are met
2425          *  a) TX Budget is still available
2426          *  b) work_done = true when XSK TX desc peek is empty (no more
2427          *     pending XSK TX for transmission)
2428          */
2429         return !!budget && work_done;
2430 }
2431
2432 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2433 {
2434         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2435                 tc += 64;
2436
2437                 if (priv->plat->force_thresh_dma_mode)
2438                         stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2439                 else
2440                         stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2441                                                       chan);
2442
2443                 priv->xstats.threshold = tc;
2444         }
2445 }
2446
2447 /**
2448  * stmmac_tx_clean - to manage the transmission completion
2449  * @priv: driver private structure
2450  * @budget: napi budget limiting this functions packet handling
2451  * @queue: TX queue index
2452  * Description: it reclaims the transmit resources after transmission completes.
2453  */
2454 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2455 {
2456         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2457         unsigned int bytes_compl = 0, pkts_compl = 0;
2458         unsigned int entry, xmits = 0, count = 0;
2459
2460         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2461
2462         priv->xstats.tx_clean++;
2463
2464         tx_q->xsk_frames_done = 0;
2465
2466         entry = tx_q->dirty_tx;
2467
2468         /* Try to clean all TX complete frame in 1 shot */
2469         while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
2470                 struct xdp_frame *xdpf;
2471                 struct sk_buff *skb;
2472                 struct dma_desc *p;
2473                 int status;
2474
2475                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2476                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2477                         xdpf = tx_q->xdpf[entry];
2478                         skb = NULL;
2479                 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2480                         xdpf = NULL;
2481                         skb = tx_q->tx_skbuff[entry];
2482                 } else {
2483                         xdpf = NULL;
2484                         skb = NULL;
2485                 }
2486
2487                 if (priv->extend_desc)
2488                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
2489                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2490                         p = &tx_q->dma_entx[entry].basic;
2491                 else
2492                         p = tx_q->dma_tx + entry;
2493
2494                 status = stmmac_tx_status(priv, &priv->dev->stats,
2495                                 &priv->xstats, p, priv->ioaddr);
2496                 /* Check if the descriptor is owned by the DMA */
2497                 if (unlikely(status & tx_dma_own))
2498                         break;
2499
2500                 count++;
2501
2502                 /* Make sure descriptor fields are read after reading
2503                  * the own bit.
2504                  */
2505                 dma_rmb();
2506
2507                 /* Just consider the last segment and ...*/
2508                 if (likely(!(status & tx_not_ls))) {
2509                         /* ... verify the status error condition */
2510                         if (unlikely(status & tx_err)) {
2511                                 priv->dev->stats.tx_errors++;
2512                                 if (unlikely(status & tx_err_bump_tc))
2513                                         stmmac_bump_dma_threshold(priv, queue);
2514                         } else {
2515                                 priv->dev->stats.tx_packets++;
2516                                 priv->xstats.tx_pkt_n++;
2517                                 priv->xstats.txq_stats[queue].tx_pkt_n++;
2518                         }
2519                         if (skb)
2520                                 stmmac_get_tx_hwtstamp(priv, p, skb);
2521                 }
2522
2523                 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2524                            tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2525                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
2526                                 dma_unmap_page(priv->device,
2527                                                tx_q->tx_skbuff_dma[entry].buf,
2528                                                tx_q->tx_skbuff_dma[entry].len,
2529                                                DMA_TO_DEVICE);
2530                         else
2531                                 dma_unmap_single(priv->device,
2532                                                  tx_q->tx_skbuff_dma[entry].buf,
2533                                                  tx_q->tx_skbuff_dma[entry].len,
2534                                                  DMA_TO_DEVICE);
2535                         tx_q->tx_skbuff_dma[entry].buf = 0;
2536                         tx_q->tx_skbuff_dma[entry].len = 0;
2537                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
2538                 }
2539
2540                 stmmac_clean_desc3(priv, tx_q, p);
2541
2542                 tx_q->tx_skbuff_dma[entry].last_segment = false;
2543                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2544
2545                 if (xdpf &&
2546                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2547                         xdp_return_frame_rx_napi(xdpf);
2548                         tx_q->xdpf[entry] = NULL;
2549                 }
2550
2551                 if (xdpf &&
2552                     tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2553                         xdp_return_frame(xdpf);
2554                         tx_q->xdpf[entry] = NULL;
2555                 }
2556
2557                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2558                         tx_q->xsk_frames_done++;
2559
2560                 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2561                         if (likely(skb)) {
2562                                 pkts_compl++;
2563                                 bytes_compl += skb->len;
2564                                 dev_consume_skb_any(skb);
2565                                 tx_q->tx_skbuff[entry] = NULL;
2566                         }
2567                 }
2568
2569                 stmmac_release_tx_desc(priv, p, priv->mode);
2570
2571                 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2572         }
2573         tx_q->dirty_tx = entry;
2574
2575         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2576                                   pkts_compl, bytes_compl);
2577
2578         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2579                                                                 queue))) &&
2580             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2581
2582                 netif_dbg(priv, tx_done, priv->dev,
2583                           "%s: restart transmit\n", __func__);
2584                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2585         }
2586
2587         if (tx_q->xsk_pool) {
2588                 bool work_done;
2589
2590                 if (tx_q->xsk_frames_done)
2591                         xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2592
2593                 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2594                         xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2595
2596                 /* For XSK TX, we try to send as many as possible.
2597                  * If XSK work done (XSK TX desc empty and budget still
2598                  * available), return "budget - 1" to reenable TX IRQ.
2599                  * Else, return "budget" to make NAPI continue polling.
2600                  */
2601                 work_done = stmmac_xdp_xmit_zc(priv, queue,
2602                                                STMMAC_XSK_TX_BUDGET_MAX);
2603                 if (work_done)
2604                         xmits = budget - 1;
2605                 else
2606                         xmits = budget;
2607         }
2608
2609         if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2610             priv->eee_sw_timer_en) {
2611                 stmmac_enable_eee_mode(priv);
2612                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2613         }
2614
2615         /* We still have pending packets, let's call for a new scheduling */
2616         if (tx_q->dirty_tx != tx_q->cur_tx)
2617                 hrtimer_start(&tx_q->txtimer,
2618                               STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2619                               HRTIMER_MODE_REL);
2620
2621         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2622
2623         /* Combine decisions from TX clean and XSK TX */
2624         return max(count, xmits);
2625 }
2626
2627 /**
2628  * stmmac_tx_err - to manage the tx error
2629  * @priv: driver private structure
2630  * @chan: channel index
2631  * Description: it cleans the descriptors and restarts the transmission
2632  * in case of transmission errors.
2633  */
2634 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2635 {
2636         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2637
2638         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2639
2640         stmmac_stop_tx_dma(priv, chan);
2641         dma_free_tx_skbufs(priv, chan);
2642         stmmac_clear_tx_descriptors(priv, chan);
2643         tx_q->dirty_tx = 0;
2644         tx_q->cur_tx = 0;
2645         tx_q->mss = 0;
2646         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2647         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2648                             tx_q->dma_tx_phy, chan);
2649         stmmac_start_tx_dma(priv, chan);
2650
2651         priv->dev->stats.tx_errors++;
2652         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2653 }
2654
2655 /**
2656  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2657  *  @priv: driver private structure
2658  *  @txmode: TX operating mode
2659  *  @rxmode: RX operating mode
2660  *  @chan: channel index
2661  *  Description: it is used for configuring of the DMA operation mode in
2662  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2663  *  mode.
2664  */
2665 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2666                                           u32 rxmode, u32 chan)
2667 {
2668         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2669         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2670         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2671         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2672         int rxfifosz = priv->plat->rx_fifo_size;
2673         int txfifosz = priv->plat->tx_fifo_size;
2674
2675         if (rxfifosz == 0)
2676                 rxfifosz = priv->dma_cap.rx_fifo_size;
2677         if (txfifosz == 0)
2678                 txfifosz = priv->dma_cap.tx_fifo_size;
2679
2680         /* Adjust for real per queue fifo size */
2681         rxfifosz /= rx_channels_count;
2682         txfifosz /= tx_channels_count;
2683
2684         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2685         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2686 }
2687
2688 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2689 {
2690         int ret;
2691
2692         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2693                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2694         if (ret && (ret != -EINVAL)) {
2695                 stmmac_global_err(priv);
2696                 return true;
2697         }
2698
2699         return false;
2700 }
2701
2702 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2703 {
2704         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2705                                                  &priv->xstats, chan, dir);
2706         struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2707         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2708         struct stmmac_channel *ch = &priv->channel[chan];
2709         struct napi_struct *rx_napi;
2710         struct napi_struct *tx_napi;
2711         unsigned long flags;
2712
2713         rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2714         tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2715
2716         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2717                 if (napi_schedule_prep(rx_napi)) {
2718                         spin_lock_irqsave(&ch->lock, flags);
2719                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2720                         spin_unlock_irqrestore(&ch->lock, flags);
2721                         __napi_schedule(rx_napi);
2722                 }
2723         }
2724
2725         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2726                 if (napi_schedule_prep(tx_napi)) {
2727                         spin_lock_irqsave(&ch->lock, flags);
2728                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2729                         spin_unlock_irqrestore(&ch->lock, flags);
2730                         __napi_schedule(tx_napi);
2731                 }
2732         }
2733
2734         return status;
2735 }
2736
2737 /**
2738  * stmmac_dma_interrupt - DMA ISR
2739  * @priv: driver private structure
2740  * Description: this is the DMA ISR. It is called by the main ISR.
2741  * It calls the dwmac dma routine and schedule poll method in case of some
2742  * work can be done.
2743  */
2744 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2745 {
2746         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2747         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2748         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2749                                 tx_channel_count : rx_channel_count;
2750         u32 chan;
2751         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2752
2753         /* Make sure we never check beyond our status buffer. */
2754         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2755                 channels_to_check = ARRAY_SIZE(status);
2756
2757         for (chan = 0; chan < channels_to_check; chan++)
2758                 status[chan] = stmmac_napi_check(priv, chan,
2759                                                  DMA_DIR_RXTX);
2760
2761         for (chan = 0; chan < tx_channel_count; chan++) {
2762                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2763                         /* Try to bump up the dma threshold on this failure */
2764                         stmmac_bump_dma_threshold(priv, chan);
2765                 } else if (unlikely(status[chan] == tx_hard_error)) {
2766                         stmmac_tx_err(priv, chan);
2767                 }
2768         }
2769 }
2770
2771 /**
2772  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2773  * @priv: driver private structure
2774  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2775  */
2776 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2777 {
2778         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2779                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2780
2781         stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2782
2783         if (priv->dma_cap.rmon) {
2784                 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2785                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2786         } else
2787                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2788 }
2789
2790 /**
2791  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2792  * @priv: driver private structure
2793  * Description:
2794  *  new GMAC chip generations have a new register to indicate the
2795  *  presence of the optional feature/functions.
2796  *  This can be also used to override the value passed through the
2797  *  platform and necessary for old MAC10/100 and GMAC chips.
2798  */
2799 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2800 {
2801         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2802 }
2803
2804 /**
2805  * stmmac_check_ether_addr - check if the MAC addr is valid
2806  * @priv: driver private structure
2807  * Description:
2808  * it is to verify if the MAC address is valid, in case of failures it
2809  * generates a random MAC address
2810  */
2811 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2812 {
2813         u8 addr[ETH_ALEN];
2814
2815         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2816                 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2817                 if (is_valid_ether_addr(addr))
2818                         eth_hw_addr_set(priv->dev, addr);
2819                 else
2820                         eth_hw_addr_random(priv->dev);
2821                 dev_info(priv->device, "device MAC address %pM\n",
2822                          priv->dev->dev_addr);
2823         }
2824 }
2825
2826 /**
2827  * stmmac_init_dma_engine - DMA init.
2828  * @priv: driver private structure
2829  * Description:
2830  * It inits the DMA invoking the specific MAC/GMAC callback.
2831  * Some DMA parameters can be passed from the platform;
2832  * in case of these are not passed a default is kept for the MAC or GMAC.
2833  */
2834 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2835 {
2836         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2837         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2838         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2839         struct stmmac_rx_queue *rx_q;
2840         struct stmmac_tx_queue *tx_q;
2841         u32 chan = 0;
2842         int atds = 0;
2843         int ret = 0;
2844
2845         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2846                 dev_err(priv->device, "Invalid DMA configuration\n");
2847                 return -EINVAL;
2848         }
2849
2850         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2851                 atds = 1;
2852
2853         ret = stmmac_reset(priv, priv->ioaddr);
2854         if (ret) {
2855                 dev_err(priv->device, "Failed to reset the dma\n");
2856                 return ret;
2857         }
2858
2859         /* DMA Configuration */
2860         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2861
2862         if (priv->plat->axi)
2863                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2864
2865         /* DMA CSR Channel configuration */
2866         for (chan = 0; chan < dma_csr_ch; chan++)
2867                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2868
2869         /* DMA RX Channel Configuration */
2870         for (chan = 0; chan < rx_channels_count; chan++) {
2871                 rx_q = &priv->rx_queue[chan];
2872
2873                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2874                                     rx_q->dma_rx_phy, chan);
2875
2876                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2877                                      (rx_q->buf_alloc_num *
2878                                       sizeof(struct dma_desc));
2879                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2880                                        rx_q->rx_tail_addr, chan);
2881         }
2882
2883         /* DMA TX Channel Configuration */
2884         for (chan = 0; chan < tx_channels_count; chan++) {
2885                 tx_q = &priv->tx_queue[chan];
2886
2887                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2888                                     tx_q->dma_tx_phy, chan);
2889
2890                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2891                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2892                                        tx_q->tx_tail_addr, chan);
2893         }
2894
2895         return ret;
2896 }
2897
2898 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2899 {
2900         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2901
2902         hrtimer_start(&tx_q->txtimer,
2903                       STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2904                       HRTIMER_MODE_REL);
2905 }
2906
2907 /**
2908  * stmmac_tx_timer - mitigation sw timer for tx.
2909  * @t: data pointer
2910  * Description:
2911  * This is the timer handler to directly invoke the stmmac_tx_clean.
2912  */
2913 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2914 {
2915         struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2916         struct stmmac_priv *priv = tx_q->priv_data;
2917         struct stmmac_channel *ch;
2918         struct napi_struct *napi;
2919
2920         ch = &priv->channel[tx_q->queue_index];
2921         napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2922
2923         if (likely(napi_schedule_prep(napi))) {
2924                 unsigned long flags;
2925
2926                 spin_lock_irqsave(&ch->lock, flags);
2927                 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2928                 spin_unlock_irqrestore(&ch->lock, flags);
2929                 __napi_schedule(napi);
2930         }
2931
2932         return HRTIMER_NORESTART;
2933 }
2934
2935 /**
2936  * stmmac_init_coalesce - init mitigation options.
2937  * @priv: driver private structure
2938  * Description:
2939  * This inits the coalesce parameters: i.e. timer rate,
2940  * timer handler and default threshold used for enabling the
2941  * interrupt on completion bit.
2942  */
2943 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2944 {
2945         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2946         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2947         u32 chan;
2948
2949         for (chan = 0; chan < tx_channel_count; chan++) {
2950                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2951
2952                 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
2953                 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
2954
2955                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2956                 tx_q->txtimer.function = stmmac_tx_timer;
2957         }
2958
2959         for (chan = 0; chan < rx_channel_count; chan++)
2960                 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
2961 }
2962
2963 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2964 {
2965         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2966         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2967         u32 chan;
2968
2969         /* set TX ring length */
2970         for (chan = 0; chan < tx_channels_count; chan++)
2971                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2972                                        (priv->dma_tx_size - 1), chan);
2973
2974         /* set RX ring length */
2975         for (chan = 0; chan < rx_channels_count; chan++)
2976                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2977                                        (priv->dma_rx_size - 1), chan);
2978 }
2979
2980 /**
2981  *  stmmac_set_tx_queue_weight - Set TX queue weight
2982  *  @priv: driver private structure
2983  *  Description: It is used for setting TX queues weight
2984  */
2985 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2986 {
2987         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2988         u32 weight;
2989         u32 queue;
2990
2991         for (queue = 0; queue < tx_queues_count; queue++) {
2992                 weight = priv->plat->tx_queues_cfg[queue].weight;
2993                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2994         }
2995 }
2996
2997 /**
2998  *  stmmac_configure_cbs - Configure CBS in TX queue
2999  *  @priv: driver private structure
3000  *  Description: It is used for configuring CBS in AVB TX queues
3001  */
3002 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3003 {
3004         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3005         u32 mode_to_use;
3006         u32 queue;
3007
3008         /* queue 0 is reserved for legacy traffic */
3009         for (queue = 1; queue < tx_queues_count; queue++) {
3010                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3011                 if (mode_to_use == MTL_QUEUE_DCB)
3012                         continue;
3013
3014                 stmmac_config_cbs(priv, priv->hw,
3015                                 priv->plat->tx_queues_cfg[queue].send_slope,
3016                                 priv->plat->tx_queues_cfg[queue].idle_slope,
3017                                 priv->plat->tx_queues_cfg[queue].high_credit,
3018                                 priv->plat->tx_queues_cfg[queue].low_credit,
3019                                 queue);
3020         }
3021 }
3022
3023 /**
3024  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3025  *  @priv: driver private structure
3026  *  Description: It is used for mapping RX queues to RX dma channels
3027  */
3028 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3029 {
3030         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3031         u32 queue;
3032         u32 chan;
3033
3034         for (queue = 0; queue < rx_queues_count; queue++) {
3035                 chan = priv->plat->rx_queues_cfg[queue].chan;
3036                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3037         }
3038 }
3039
3040 /**
3041  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3042  *  @priv: driver private structure
3043  *  Description: It is used for configuring the RX Queue Priority
3044  */
3045 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3046 {
3047         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3048         u32 queue;
3049         u32 prio;
3050
3051         for (queue = 0; queue < rx_queues_count; queue++) {
3052                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3053                         continue;
3054
3055                 prio = priv->plat->rx_queues_cfg[queue].prio;
3056                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3057         }
3058 }
3059
3060 /**
3061  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3062  *  @priv: driver private structure
3063  *  Description: It is used for configuring the TX Queue Priority
3064  */
3065 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3066 {
3067         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3068         u32 queue;
3069         u32 prio;
3070
3071         for (queue = 0; queue < tx_queues_count; queue++) {
3072                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3073                         continue;
3074
3075                 prio = priv->plat->tx_queues_cfg[queue].prio;
3076                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3077         }
3078 }
3079
3080 /**
3081  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3082  *  @priv: driver private structure
3083  *  Description: It is used for configuring the RX queue routing
3084  */
3085 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3086 {
3087         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3088         u32 queue;
3089         u8 packet;
3090
3091         for (queue = 0; queue < rx_queues_count; queue++) {
3092                 /* no specific packet type routing specified for the queue */
3093                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3094                         continue;
3095
3096                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3097                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3098         }
3099 }
3100
3101 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3102 {
3103         if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3104                 priv->rss.enable = false;
3105                 return;
3106         }
3107
3108         if (priv->dev->features & NETIF_F_RXHASH)
3109                 priv->rss.enable = true;
3110         else
3111                 priv->rss.enable = false;
3112
3113         stmmac_rss_configure(priv, priv->hw, &priv->rss,
3114                              priv->plat->rx_queues_to_use);
3115 }
3116
3117 /**
3118  *  stmmac_mtl_configuration - Configure MTL
3119  *  @priv: driver private structure
3120  *  Description: It is used for configurring MTL
3121  */
3122 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3123 {
3124         u32 rx_queues_count = priv->plat->rx_queues_to_use;
3125         u32 tx_queues_count = priv->plat->tx_queues_to_use;
3126
3127         if (tx_queues_count > 1)
3128                 stmmac_set_tx_queue_weight(priv);
3129
3130         /* Configure MTL RX algorithms */
3131         if (rx_queues_count > 1)
3132                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3133                                 priv->plat->rx_sched_algorithm);
3134
3135         /* Configure MTL TX algorithms */
3136         if (tx_queues_count > 1)
3137                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3138                                 priv->plat->tx_sched_algorithm);
3139
3140         /* Configure CBS in AVB TX queues */
3141         if (tx_queues_count > 1)
3142                 stmmac_configure_cbs(priv);
3143
3144         /* Map RX MTL to DMA channels */
3145         stmmac_rx_queue_dma_chan_map(priv);
3146
3147         /* Enable MAC RX Queues */
3148         stmmac_mac_enable_rx_queues(priv);
3149
3150         /* Set RX priorities */
3151         if (rx_queues_count > 1)
3152                 stmmac_mac_config_rx_queues_prio(priv);
3153
3154         /* Set TX priorities */
3155         if (tx_queues_count > 1)
3156                 stmmac_mac_config_tx_queues_prio(priv);
3157
3158         /* Set RX routing */
3159         if (rx_queues_count > 1)
3160                 stmmac_mac_config_rx_queues_routing(priv);
3161
3162         /* Receive Side Scaling */
3163         if (rx_queues_count > 1)
3164                 stmmac_mac_config_rss(priv);
3165 }
3166
3167 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3168 {
3169         if (priv->dma_cap.asp) {
3170                 netdev_info(priv->dev, "Enabling Safety Features\n");
3171                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3172                                           priv->plat->safety_feat_cfg);
3173         } else {
3174                 netdev_info(priv->dev, "No Safety Features support found\n");
3175         }
3176 }
3177
3178 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3179 {
3180         char *name;
3181
3182         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3183         clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3184
3185         name = priv->wq_name;
3186         sprintf(name, "%s-fpe", priv->dev->name);
3187
3188         priv->fpe_wq = create_singlethread_workqueue(name);
3189         if (!priv->fpe_wq) {
3190                 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3191
3192                 return -ENOMEM;
3193         }
3194         netdev_info(priv->dev, "FPE workqueue start");
3195
3196         return 0;
3197 }
3198
3199 /**
3200  * stmmac_hw_setup - setup mac in a usable state.
3201  *  @dev : pointer to the device structure.
3202  *  @init_ptp: initialize PTP if set
3203  *  Description:
3204  *  this is the main function to setup the HW in a usable state because the
3205  *  dma engine is reset, the core registers are configured (e.g. AXI,
3206  *  Checksum features, timers). The DMA is ready to start receiving and
3207  *  transmitting.
3208  *  Return value:
3209  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3210  *  file on failure.
3211  */
3212 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
3213 {
3214         struct stmmac_priv *priv = netdev_priv(dev);
3215         u32 rx_cnt = priv->plat->rx_queues_to_use;
3216         u32 tx_cnt = priv->plat->tx_queues_to_use;
3217         bool sph_en;
3218         u32 chan;
3219         int ret;
3220
3221         /* DMA initialization and SW reset */
3222         ret = stmmac_init_dma_engine(priv);
3223         if (ret < 0) {
3224                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3225                            __func__);
3226                 return ret;
3227         }
3228
3229         /* Copy the MAC addr into the HW  */
3230         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3231
3232         /* PS and related bits will be programmed according to the speed */
3233         if (priv->hw->pcs) {
3234                 int speed = priv->plat->mac_port_sel_speed;
3235
3236                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3237                     (speed == SPEED_1000)) {
3238                         priv->hw->ps = speed;
3239                 } else {
3240                         dev_warn(priv->device, "invalid port speed\n");
3241                         priv->hw->ps = 0;
3242                 }
3243         }
3244
3245         /* Initialize the MAC Core */
3246         stmmac_core_init(priv, priv->hw, dev);
3247
3248         /* Initialize MTL*/
3249         stmmac_mtl_configuration(priv);
3250
3251         /* Initialize Safety Features */
3252         stmmac_safety_feat_configuration(priv);
3253
3254         ret = stmmac_rx_ipc(priv, priv->hw);
3255         if (!ret) {
3256                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3257                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3258                 priv->hw->rx_csum = 0;
3259         }
3260
3261         /* Enable the MAC Rx/Tx */
3262         stmmac_mac_set(priv, priv->ioaddr, true);
3263
3264         /* Set the HW DMA mode and the COE */
3265         stmmac_dma_operation_mode(priv);
3266
3267         stmmac_mmc_setup(priv);
3268
3269         if (init_ptp) {
3270                 ret = stmmac_init_ptp(priv);
3271                 if (ret == -EOPNOTSUPP)
3272                         netdev_warn(priv->dev, "PTP not supported by HW\n");
3273                 else if (ret)
3274                         netdev_warn(priv->dev, "PTP init failed\n");
3275         }
3276
3277         priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3278
3279         /* Convert the timer from msec to usec */
3280         if (!priv->tx_lpi_timer)
3281                 priv->tx_lpi_timer = eee_timer * 1000;
3282
3283         if (priv->use_riwt) {
3284                 u32 queue;
3285
3286                 for (queue = 0; queue < rx_cnt; queue++) {
3287                         if (!priv->rx_riwt[queue])
3288                                 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3289
3290                         stmmac_rx_watchdog(priv, priv->ioaddr,
3291                                            priv->rx_riwt[queue], queue);
3292                 }
3293         }
3294
3295         if (priv->hw->pcs)
3296                 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3297
3298         /* set TX and RX rings length */
3299         stmmac_set_rings_length(priv);
3300
3301         /* Enable TSO */
3302         if (priv->tso) {
3303                 for (chan = 0; chan < tx_cnt; chan++) {
3304                         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3305
3306                         /* TSO and TBS cannot co-exist */
3307                         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3308                                 continue;
3309
3310                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3311                 }
3312         }
3313
3314         /* Enable Split Header */
3315         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3316         for (chan = 0; chan < rx_cnt; chan++)
3317                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3318
3319
3320         /* VLAN Tag Insertion */
3321         if (priv->dma_cap.vlins)
3322                 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3323
3324         /* TBS */
3325         for (chan = 0; chan < tx_cnt; chan++) {
3326                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3327                 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3328
3329                 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3330         }
3331
3332         /* Configure real RX and TX queues */
3333         netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3334         netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3335
3336         /* Start the ball rolling... */
3337         stmmac_start_all_dma(priv);
3338
3339         if (priv->dma_cap.fpesel) {
3340                 stmmac_fpe_start_wq(priv);
3341
3342                 if (priv->plat->fpe_cfg->enable)
3343                         stmmac_fpe_handshake(priv, true);
3344         }
3345
3346         return 0;
3347 }
3348
3349 static void stmmac_hw_teardown(struct net_device *dev)
3350 {
3351         struct stmmac_priv *priv = netdev_priv(dev);
3352
3353         clk_disable_unprepare(priv->plat->clk_ptp_ref);
3354 }
3355
3356 static void stmmac_free_irq(struct net_device *dev,
3357                             enum request_irq_err irq_err, int irq_idx)
3358 {
3359         struct stmmac_priv *priv = netdev_priv(dev);
3360         int j;
3361
3362         switch (irq_err) {
3363         case REQ_IRQ_ERR_ALL:
3364                 irq_idx = priv->plat->tx_queues_to_use;
3365                 fallthrough;
3366         case REQ_IRQ_ERR_TX:
3367                 for (j = irq_idx - 1; j >= 0; j--) {
3368                         if (priv->tx_irq[j] > 0) {
3369                                 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3370                                 free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
3371                         }
3372                 }
3373                 irq_idx = priv->plat->rx_queues_to_use;
3374                 fallthrough;
3375         case REQ_IRQ_ERR_RX:
3376                 for (j = irq_idx - 1; j >= 0; j--) {
3377                         if (priv->rx_irq[j] > 0) {
3378                                 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3379                                 free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
3380                         }
3381                 }
3382
3383                 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3384                         free_irq(priv->sfty_ue_irq, dev);
3385                 fallthrough;
3386         case REQ_IRQ_ERR_SFTY_UE:
3387                 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3388                         free_irq(priv->sfty_ce_irq, dev);
3389                 fallthrough;
3390         case REQ_IRQ_ERR_SFTY_CE:
3391                 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3392                         free_irq(priv->lpi_irq, dev);
3393                 fallthrough;
3394         case REQ_IRQ_ERR_LPI:
3395                 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3396                         free_irq(priv->wol_irq, dev);
3397                 fallthrough;
3398         case REQ_IRQ_ERR_WOL:
3399                 free_irq(dev->irq, dev);
3400                 fallthrough;
3401         case REQ_IRQ_ERR_MAC:
3402         case REQ_IRQ_ERR_NO:
3403                 /* If MAC IRQ request error, no more IRQ to free */
3404                 break;
3405         }
3406 }
3407
3408 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3409 {
3410         struct stmmac_priv *priv = netdev_priv(dev);
3411         enum request_irq_err irq_err;
3412         cpumask_t cpu_mask;
3413         int irq_idx = 0;
3414         char *int_name;
3415         int ret;
3416         int i;
3417
3418         /* For common interrupt */
3419         int_name = priv->int_name_mac;
3420         sprintf(int_name, "%s:%s", dev->name, "mac");
3421         ret = request_irq(dev->irq, stmmac_mac_interrupt,
3422                           0, int_name, dev);
3423         if (unlikely(ret < 0)) {
3424                 netdev_err(priv->dev,
3425                            "%s: alloc mac MSI %d (error: %d)\n",
3426                            __func__, dev->irq, ret);
3427                 irq_err = REQ_IRQ_ERR_MAC;
3428                 goto irq_error;
3429         }
3430
3431         /* Request the Wake IRQ in case of another line
3432          * is used for WoL
3433          */
3434         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3435                 int_name = priv->int_name_wol;
3436                 sprintf(int_name, "%s:%s", dev->name, "wol");
3437                 ret = request_irq(priv->wol_irq,
3438                                   stmmac_mac_interrupt,
3439                                   0, int_name, dev);
3440                 if (unlikely(ret < 0)) {
3441                         netdev_err(priv->dev,
3442                                    "%s: alloc wol MSI %d (error: %d)\n",
3443                                    __func__, priv->wol_irq, ret);
3444                         irq_err = REQ_IRQ_ERR_WOL;
3445                         goto irq_error;
3446                 }
3447         }
3448
3449         /* Request the LPI IRQ in case of another line
3450          * is used for LPI
3451          */
3452         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3453                 int_name = priv->int_name_lpi;
3454                 sprintf(int_name, "%s:%s", dev->name, "lpi");
3455                 ret = request_irq(priv->lpi_irq,
3456                                   stmmac_mac_interrupt,
3457                                   0, int_name, dev);
3458                 if (unlikely(ret < 0)) {
3459                         netdev_err(priv->dev,
3460                                    "%s: alloc lpi MSI %d (error: %d)\n",
3461                                    __func__, priv->lpi_irq, ret);
3462                         irq_err = REQ_IRQ_ERR_LPI;
3463                         goto irq_error;
3464                 }
3465         }
3466
3467         /* Request the Safety Feature Correctible Error line in
3468          * case of another line is used
3469          */
3470         if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3471                 int_name = priv->int_name_sfty_ce;
3472                 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3473                 ret = request_irq(priv->sfty_ce_irq,
3474                                   stmmac_safety_interrupt,
3475                                   0, int_name, dev);
3476                 if (unlikely(ret < 0)) {
3477                         netdev_err(priv->dev,
3478                                    "%s: alloc sfty ce MSI %d (error: %d)\n",
3479                                    __func__, priv->sfty_ce_irq, ret);
3480                         irq_err = REQ_IRQ_ERR_SFTY_CE;
3481                         goto irq_error;
3482                 }
3483         }
3484
3485         /* Request the Safety Feature Uncorrectible Error line in
3486          * case of another line is used
3487          */
3488         if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3489                 int_name = priv->int_name_sfty_ue;
3490                 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3491                 ret = request_irq(priv->sfty_ue_irq,
3492                                   stmmac_safety_interrupt,
3493                                   0, int_name, dev);
3494                 if (unlikely(ret < 0)) {
3495                         netdev_err(priv->dev,
3496                                    "%s: alloc sfty ue MSI %d (error: %d)\n",
3497                                    __func__, priv->sfty_ue_irq, ret);
3498                         irq_err = REQ_IRQ_ERR_SFTY_UE;
3499                         goto irq_error;
3500                 }
3501         }
3502
3503         /* Request Rx MSI irq */
3504         for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3505                 if (i >= MTL_MAX_RX_QUEUES)
3506                         break;
3507                 if (priv->rx_irq[i] == 0)
3508                         continue;
3509
3510                 int_name = priv->int_name_rx_irq[i];
3511                 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3512                 ret = request_irq(priv->rx_irq[i],
3513                                   stmmac_msi_intr_rx,
3514                                   0, int_name, &priv->rx_queue[i]);
3515                 if (unlikely(ret < 0)) {
3516                         netdev_err(priv->dev,
3517                                    "%s: alloc rx-%d  MSI %d (error: %d)\n",
3518                                    __func__, i, priv->rx_irq[i], ret);
3519                         irq_err = REQ_IRQ_ERR_RX;
3520                         irq_idx = i;
3521                         goto irq_error;
3522                 }
3523                 cpumask_clear(&cpu_mask);
3524                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3525                 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3526         }
3527
3528         /* Request Tx MSI irq */
3529         for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3530                 if (i >= MTL_MAX_TX_QUEUES)
3531                         break;
3532                 if (priv->tx_irq[i] == 0)
3533                         continue;
3534
3535                 int_name = priv->int_name_tx_irq[i];
3536                 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3537                 ret = request_irq(priv->tx_irq[i],
3538                                   stmmac_msi_intr_tx,
3539                                   0, int_name, &priv->tx_queue[i]);
3540                 if (unlikely(ret < 0)) {
3541                         netdev_err(priv->dev,
3542                                    "%s: alloc tx-%d  MSI %d (error: %d)\n",
3543                                    __func__, i, priv->tx_irq[i], ret);
3544                         irq_err = REQ_IRQ_ERR_TX;
3545                         irq_idx = i;
3546                         goto irq_error;
3547                 }
3548                 cpumask_clear(&cpu_mask);
3549                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3550                 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3551         }
3552
3553         return 0;
3554
3555 irq_error:
3556         stmmac_free_irq(dev, irq_err, irq_idx);
3557         return ret;
3558 }
3559
3560 static int stmmac_request_irq_single(struct net_device *dev)
3561 {
3562         struct stmmac_priv *priv = netdev_priv(dev);
3563         enum request_irq_err irq_err;
3564         int ret;
3565
3566         ret = request_irq(dev->irq, stmmac_interrupt,
3567                           IRQF_SHARED, dev->name, dev);
3568         if (unlikely(ret < 0)) {
3569                 netdev_err(priv->dev,
3570                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3571                            __func__, dev->irq, ret);
3572                 irq_err = REQ_IRQ_ERR_MAC;
3573                 goto irq_error;
3574         }
3575
3576         /* Request the Wake IRQ in case of another line
3577          * is used for WoL
3578          */
3579         if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3580                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3581                                   IRQF_SHARED, dev->name, dev);
3582                 if (unlikely(ret < 0)) {
3583                         netdev_err(priv->dev,
3584                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3585                                    __func__, priv->wol_irq, ret);
3586                         irq_err = REQ_IRQ_ERR_WOL;
3587                         goto irq_error;
3588                 }
3589         }
3590
3591         /* Request the IRQ lines */
3592         if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3593                 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3594                                   IRQF_SHARED, dev->name, dev);
3595                 if (unlikely(ret < 0)) {
3596                         netdev_err(priv->dev,
3597                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3598                                    __func__, priv->lpi_irq, ret);
3599                         irq_err = REQ_IRQ_ERR_LPI;
3600                         goto irq_error;
3601                 }
3602         }
3603
3604         return 0;
3605
3606 irq_error:
3607         stmmac_free_irq(dev, irq_err, 0);
3608         return ret;
3609 }
3610
3611 static int stmmac_request_irq(struct net_device *dev)
3612 {
3613         struct stmmac_priv *priv = netdev_priv(dev);
3614         int ret;
3615
3616         /* Request the IRQ lines */
3617         if (priv->plat->multi_msi_en)
3618                 ret = stmmac_request_irq_multi_msi(dev);
3619         else
3620                 ret = stmmac_request_irq_single(dev);
3621
3622         return ret;
3623 }
3624
3625 /**
3626  *  stmmac_open - open entry point of the driver
3627  *  @dev : pointer to the device structure.
3628  *  Description:
3629  *  This function is the open entry point of the driver.
3630  *  Return value:
3631  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3632  *  file on failure.
3633  */
3634 static int stmmac_open(struct net_device *dev)
3635 {
3636         struct stmmac_priv *priv = netdev_priv(dev);
3637         int mode = priv->plat->phy_interface;
3638         int bfsize = 0;
3639         u32 chan;
3640         int ret;
3641
3642         ret = pm_runtime_get_sync(priv->device);
3643         if (ret < 0) {
3644                 pm_runtime_put_noidle(priv->device);
3645                 return ret;
3646         }
3647
3648         if (priv->hw->pcs != STMMAC_PCS_TBI &&
3649             priv->hw->pcs != STMMAC_PCS_RTBI &&
3650             (!priv->hw->xpcs ||
3651              xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3652                 ret = stmmac_init_phy(dev);
3653                 if (ret) {
3654                         netdev_err(priv->dev,
3655                                    "%s: Cannot attach to PHY (error: %d)\n",
3656                                    __func__, ret);
3657                         goto init_phy_error;
3658                 }
3659         }
3660
3661         /* Extra statistics */
3662         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3663         priv->xstats.threshold = tc;
3664
3665         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
3666         if (bfsize < 0)
3667                 bfsize = 0;
3668
3669         if (bfsize < BUF_SIZE_16KiB)
3670                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
3671
3672         priv->dma_buf_sz = bfsize;
3673         buf_sz = bfsize;
3674
3675         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3676
3677         if (!priv->dma_tx_size)
3678                 priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3679         if (!priv->dma_rx_size)
3680                 priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3681
3682         /* Earlier check for TBS */
3683         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3684                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3685                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3686
3687                 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3688                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3689         }
3690
3691         ret = alloc_dma_desc_resources(priv);
3692         if (ret < 0) {
3693                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3694                            __func__);
3695                 goto dma_desc_error;
3696         }
3697
3698         ret = init_dma_desc_rings(dev, GFP_KERNEL);
3699         if (ret < 0) {
3700                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3701                            __func__);
3702                 goto init_error;
3703         }
3704
3705         ret = stmmac_hw_setup(dev, true);
3706         if (ret < 0) {
3707                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3708                 goto init_error;
3709         }
3710
3711         stmmac_init_coalesce(priv);
3712
3713         phylink_start(priv->phylink);
3714         /* We may have called phylink_speed_down before */
3715         phylink_speed_up(priv->phylink);
3716
3717         ret = stmmac_request_irq(dev);
3718         if (ret)
3719                 goto irq_error;
3720
3721         stmmac_enable_all_queues(priv);
3722         netif_tx_start_all_queues(priv->dev);
3723
3724         return 0;
3725
3726 irq_error:
3727         phylink_stop(priv->phylink);
3728
3729         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3730                 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3731
3732         stmmac_hw_teardown(dev);
3733 init_error:
3734         free_dma_desc_resources(priv);
3735 dma_desc_error:
3736         phylink_disconnect_phy(priv->phylink);
3737 init_phy_error:
3738         pm_runtime_put(priv->device);
3739         return ret;
3740 }
3741
3742 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3743 {
3744         set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3745
3746         if (priv->fpe_wq)
3747                 destroy_workqueue(priv->fpe_wq);
3748
3749         netdev_info(priv->dev, "FPE workqueue stop");
3750 }
3751
3752 /**
3753  *  stmmac_release - close entry point of the driver
3754  *  @dev : device pointer.
3755  *  Description:
3756  *  This is the stop entry point of the driver.
3757  */
3758 static int stmmac_release(struct net_device *dev)
3759 {
3760         struct stmmac_priv *priv = netdev_priv(dev);
3761         u32 chan;
3762
3763         netif_tx_disable(dev);
3764
3765         if (device_may_wakeup(priv->device))
3766                 phylink_speed_down(priv->phylink, false);
3767         /* Stop and disconnect the PHY */
3768         phylink_stop(priv->phylink);
3769         phylink_disconnect_phy(priv->phylink);
3770
3771         stmmac_disable_all_queues(priv);
3772
3773         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3774                 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3775
3776         /* Free the IRQ lines */
3777         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3778
3779         if (priv->eee_enabled) {
3780                 priv->tx_path_in_lpi_mode = false;
3781                 del_timer_sync(&priv->eee_ctrl_timer);
3782         }
3783
3784         /* Stop TX/RX DMA and clear the descriptors */
3785         stmmac_stop_all_dma(priv);
3786
3787         /* Release and free the Rx/Tx resources */
3788         free_dma_desc_resources(priv);
3789
3790         /* Disable the MAC Rx/Tx */
3791         stmmac_mac_set(priv, priv->ioaddr, false);
3792
3793         netif_carrier_off(dev);
3794
3795         stmmac_release_ptp(priv);
3796
3797         pm_runtime_put(priv->device);
3798
3799         if (priv->dma_cap.fpesel)
3800                 stmmac_fpe_stop_wq(priv);
3801
3802         return 0;
3803 }
3804
3805 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3806                                struct stmmac_tx_queue *tx_q)
3807 {
3808         u16 tag = 0x0, inner_tag = 0x0;
3809         u32 inner_type = 0x0;
3810         struct dma_desc *p;
3811
3812         if (!priv->dma_cap.vlins)
3813                 return false;
3814         if (!skb_vlan_tag_present(skb))
3815                 return false;
3816         if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3817                 inner_tag = skb_vlan_tag_get(skb);
3818                 inner_type = STMMAC_VLAN_INSERT;
3819         }
3820
3821         tag = skb_vlan_tag_get(skb);
3822
3823         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3824                 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3825         else
3826                 p = &tx_q->dma_tx[tx_q->cur_tx];
3827
3828         if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3829                 return false;
3830
3831         stmmac_set_tx_owner(priv, p);
3832         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3833         return true;
3834 }
3835
3836 /**
3837  *  stmmac_tso_allocator - close entry point of the driver
3838  *  @priv: driver private structure
3839  *  @des: buffer start address
3840  *  @total_len: total length to fill in descriptors
3841  *  @last_segment: condition for the last descriptor
3842  *  @queue: TX queue index
3843  *  Description:
3844  *  This function fills descriptor and request new descriptors according to
3845  *  buffer length to fill
3846  */
3847 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3848                                  int total_len, bool last_segment, u32 queue)
3849 {
3850         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3851         struct dma_desc *desc;
3852         u32 buff_size;
3853         int tmp_len;
3854
3855         tmp_len = total_len;
3856
3857         while (tmp_len > 0) {
3858                 dma_addr_t curr_addr;
3859
3860                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3861                                                 priv->dma_tx_size);
3862                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3863
3864                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3865                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3866                 else
3867                         desc = &tx_q->dma_tx[tx_q->cur_tx];
3868
3869                 curr_addr = des + (total_len - tmp_len);
3870                 if (priv->dma_cap.addr64 <= 32)
3871                         desc->des0 = cpu_to_le32(curr_addr);
3872                 else
3873                         stmmac_set_desc_addr(priv, desc, curr_addr);
3874
3875                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3876                             TSO_MAX_BUFF_SIZE : tmp_len;
3877
3878                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3879                                 0, 1,
3880                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3881                                 0, 0);
3882
3883                 tmp_len -= TSO_MAX_BUFF_SIZE;
3884         }
3885 }
3886
3887 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3888 {
3889         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3890         int desc_size;
3891
3892         if (likely(priv->extend_desc))
3893                 desc_size = sizeof(struct dma_extended_desc);
3894         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3895                 desc_size = sizeof(struct dma_edesc);
3896         else
3897                 desc_size = sizeof(struct dma_desc);
3898
3899         /* The own bit must be the latest setting done when prepare the
3900          * descriptor and then barrier is needed to make sure that
3901          * all is coherent before granting the DMA engine.
3902          */
3903         wmb();
3904
3905         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3906         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3907 }
3908
3909 /**
3910  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3911  *  @skb : the socket buffer
3912  *  @dev : device pointer
3913  *  Description: this is the transmit function that is called on TSO frames
3914  *  (support available on GMAC4 and newer chips).
3915  *  Diagram below show the ring programming in case of TSO frames:
3916  *
3917  *  First Descriptor
3918  *   --------
3919  *   | DES0 |---> buffer1 = L2/L3/L4 header
3920  *   | DES1 |---> TCP Payload (can continue on next descr...)
3921  *   | DES2 |---> buffer 1 and 2 len
3922  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3923  *   --------
3924  *      |
3925  *     ...
3926  *      |
3927  *   --------
3928  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3929  *   | DES1 | --|
3930  *   | DES2 | --> buffer 1 and 2 len
3931  *   | DES3 |
3932  *   --------
3933  *
3934  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3935  */
3936 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3937 {
3938         struct dma_desc *desc, *first, *mss_desc = NULL;
3939         struct stmmac_priv *priv = netdev_priv(dev);
3940         int nfrags = skb_shinfo(skb)->nr_frags;
3941         u32 queue = skb_get_queue_mapping(skb);
3942         unsigned int first_entry, tx_packets;
3943         int tmp_pay_len = 0, first_tx;
3944         struct stmmac_tx_queue *tx_q;
3945         bool has_vlan, set_ic;
3946         u8 proto_hdr_len, hdr;
3947         u32 pay_len, mss;
3948         dma_addr_t des;
3949         int i;
3950
3951         tx_q = &priv->tx_queue[queue];
3952         first_tx = tx_q->cur_tx;
3953
3954         /* Compute header lengths */
3955         if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3956                 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3957                 hdr = sizeof(struct udphdr);
3958         } else {
3959                 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3960                 hdr = tcp_hdrlen(skb);
3961         }
3962
3963         /* Desc availability based on threshold should be enough safe */
3964         if (unlikely(stmmac_tx_avail(priv, queue) <
3965                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3966                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3967                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3968                                                                 queue));
3969                         /* This is a hard error, log it. */
3970                         netdev_err(priv->dev,
3971                                    "%s: Tx Ring full when queue awake\n",
3972                                    __func__);
3973                 }
3974                 return NETDEV_TX_BUSY;
3975         }
3976
3977         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3978
3979         mss = skb_shinfo(skb)->gso_size;
3980
3981         /* set new MSS value if needed */
3982         if (mss != tx_q->mss) {
3983                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3984                         mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3985                 else
3986                         mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3987
3988                 stmmac_set_mss(priv, mss_desc, mss);
3989                 tx_q->mss = mss;
3990                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3991                                                 priv->dma_tx_size);
3992                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3993         }
3994
3995         if (netif_msg_tx_queued(priv)) {
3996                 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3997                         __func__, hdr, proto_hdr_len, pay_len, mss);
3998                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3999                         skb->data_len);
4000         }
4001
4002         /* Check if VLAN can be inserted by HW */
4003         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4004
4005         first_entry = tx_q->cur_tx;
4006         WARN_ON(tx_q->tx_skbuff[first_entry]);
4007
4008         if (tx_q->tbs & STMMAC_TBS_AVAIL)
4009                 desc = &tx_q->dma_entx[first_entry].basic;
4010         else
4011                 desc = &tx_q->dma_tx[first_entry];
4012         first = desc;
4013
4014         if (has_vlan)
4015                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4016
4017         /* first descriptor: fill Headers on Buf1 */
4018         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4019                              DMA_TO_DEVICE);
4020         if (dma_mapping_error(priv->device, des))
4021                 goto dma_map_err;
4022
4023         tx_q->tx_skbuff_dma[first_entry].buf = des;
4024         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4025         tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4026         tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4027
4028         if (priv->dma_cap.addr64 <= 32) {
4029                 first->des0 = cpu_to_le32(des);
4030
4031                 /* Fill start of payload in buff2 of first descriptor */
4032                 if (pay_len)
4033                         first->des1 = cpu_to_le32(des + proto_hdr_len);
4034
4035                 /* If needed take extra descriptors to fill the remaining payload */
4036                 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4037         } else {
4038                 stmmac_set_desc_addr(priv, first, des);
4039                 tmp_pay_len = pay_len;
4040                 des += proto_hdr_len;
4041                 pay_len = 0;
4042         }
4043
4044         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4045
4046         /* Prepare fragments */
4047         for (i = 0; i < nfrags; i++) {
4048                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4049
4050                 des = skb_frag_dma_map(priv->device, frag, 0,
4051                                        skb_frag_size(frag),
4052                                        DMA_TO_DEVICE);
4053                 if (dma_mapping_error(priv->device, des))
4054                         goto dma_map_err;
4055
4056                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4057                                      (i == nfrags - 1), queue);
4058
4059                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4060                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4061                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4062                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4063         }
4064
4065         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4066
4067         /* Only the last descriptor gets to point to the skb. */
4068         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4069         tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4070
4071         /* Manage tx mitigation */
4072         tx_packets = (tx_q->cur_tx + 1) - first_tx;
4073         tx_q->tx_count_frames += tx_packets;
4074
4075         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4076                 set_ic = true;
4077         else if (!priv->tx_coal_frames[queue])
4078                 set_ic = false;
4079         else if (tx_packets > priv->tx_coal_frames[queue])
4080                 set_ic = true;
4081         else if ((tx_q->tx_count_frames %
4082                   priv->tx_coal_frames[queue]) < tx_packets)
4083                 set_ic = true;
4084         else
4085                 set_ic = false;
4086
4087         if (set_ic) {
4088                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4089                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4090                 else
4091                         desc = &tx_q->dma_tx[tx_q->cur_tx];
4092
4093                 tx_q->tx_count_frames = 0;
4094                 stmmac_set_tx_ic(priv, desc);
4095                 priv->xstats.tx_set_ic_bit++;
4096         }
4097
4098         /* We've used all descriptors we need for this skb, however,
4099          * advance cur_tx so that it references a fresh descriptor.
4100          * ndo_start_xmit will fill this descriptor the next time it's
4101          * called and stmmac_tx_clean may clean up to this descriptor.
4102          */
4103         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
4104
4105         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4106                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4107                           __func__);
4108                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4109         }
4110
4111         dev->stats.tx_bytes += skb->len;
4112         priv->xstats.tx_tso_frames++;
4113         priv->xstats.tx_tso_nfrags += nfrags;
4114
4115         if (priv->sarc_type)
4116                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4117
4118         skb_tx_timestamp(skb);
4119
4120         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4121                      priv->hwts_tx_en)) {
4122                 /* declare that device is doing timestamping */
4123                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4124                 stmmac_enable_tx_timestamp(priv, first);
4125         }
4126
4127         /* Complete the first descriptor before granting the DMA */
4128         stmmac_prepare_tso_tx_desc(priv, first, 1,
4129                         proto_hdr_len,
4130                         pay_len,
4131                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4132                         hdr / 4, (skb->len - proto_hdr_len));
4133
4134         /* If context desc is used to change MSS */
4135         if (mss_desc) {
4136                 /* Make sure that first descriptor has been completely
4137                  * written, including its own bit. This is because MSS is
4138                  * actually before first descriptor, so we need to make
4139                  * sure that MSS's own bit is the last thing written.
4140                  */
4141                 dma_wmb();
4142                 stmmac_set_tx_owner(priv, mss_desc);
4143         }
4144
4145         if (netif_msg_pktdata(priv)) {
4146                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4147                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4148                         tx_q->cur_tx, first, nfrags);
4149                 pr_info(">>> frame to be transmitted: ");
4150                 print_pkt(skb->data, skb_headlen(skb));
4151         }
4152
4153         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4154
4155         stmmac_flush_tx_descriptors(priv, queue);
4156         stmmac_tx_timer_arm(priv, queue);
4157
4158         return NETDEV_TX_OK;
4159
4160 dma_map_err:
4161         dev_err(priv->device, "Tx dma map failed\n");
4162         dev_kfree_skb(skb);
4163         priv->dev->stats.tx_dropped++;
4164         return NETDEV_TX_OK;
4165 }
4166
4167 /**
4168  *  stmmac_xmit - Tx entry point of the driver
4169  *  @skb : the socket buffer
4170  *  @dev : device pointer
4171  *  Description : this is the tx entry point of the driver.
4172  *  It programs the chain or the ring and supports oversized frames
4173  *  and SG feature.
4174  */
4175 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4176 {
4177         unsigned int first_entry, tx_packets, enh_desc;
4178         struct stmmac_priv *priv = netdev_priv(dev);
4179         unsigned int nopaged_len = skb_headlen(skb);
4180         int i, csum_insertion = 0, is_jumbo = 0;
4181         u32 queue = skb_get_queue_mapping(skb);
4182         int nfrags = skb_shinfo(skb)->nr_frags;
4183         int gso = skb_shinfo(skb)->gso_type;
4184         struct dma_edesc *tbs_desc = NULL;
4185         struct dma_desc *desc, *first;
4186         struct stmmac_tx_queue *tx_q;
4187         bool has_vlan, set_ic;
4188         int entry, first_tx;
4189         dma_addr_t des;
4190
4191         tx_q = &priv->tx_queue[queue];
4192         first_tx = tx_q->cur_tx;
4193
4194         if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4195                 stmmac_disable_eee_mode(priv);
4196
4197         /* Manage oversized TCP frames for GMAC4 device */
4198         if (skb_is_gso(skb) && priv->tso) {
4199                 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4200                         return stmmac_tso_xmit(skb, dev);
4201                 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4202                         return stmmac_tso_xmit(skb, dev);
4203         }
4204
4205         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4206                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4207                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4208                                                                 queue));
4209                         /* This is a hard error, log it. */
4210                         netdev_err(priv->dev,
4211                                    "%s: Tx Ring full when queue awake\n",
4212                                    __func__);
4213                 }
4214                 return NETDEV_TX_BUSY;
4215         }
4216
4217         /* Check if VLAN can be inserted by HW */
4218         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4219
4220         entry = tx_q->cur_tx;
4221         first_entry = entry;
4222         WARN_ON(tx_q->tx_skbuff[first_entry]);
4223
4224         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4225
4226         if (likely(priv->extend_desc))
4227                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4228         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4229                 desc = &tx_q->dma_entx[entry].basic;
4230         else
4231                 desc = tx_q->dma_tx + entry;
4232
4233         first = desc;
4234
4235         if (has_vlan)
4236                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4237
4238         enh_desc = priv->plat->enh_desc;
4239         /* To program the descriptors according to the size of the frame */
4240         if (enh_desc)
4241                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4242
4243         if (unlikely(is_jumbo)) {
4244                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4245                 if (unlikely(entry < 0) && (entry != -EINVAL))
4246                         goto dma_map_err;
4247         }
4248
4249         for (i = 0; i < nfrags; i++) {
4250                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4251                 int len = skb_frag_size(frag);
4252                 bool last_segment = (i == (nfrags - 1));
4253
4254                 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4255                 WARN_ON(tx_q->tx_skbuff[entry]);
4256
4257                 if (likely(priv->extend_desc))
4258                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4259                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4260                         desc = &tx_q->dma_entx[entry].basic;
4261                 else
4262                         desc = tx_q->dma_tx + entry;
4263
4264                 des = skb_frag_dma_map(priv->device, frag, 0, len,
4265                                        DMA_TO_DEVICE);
4266                 if (dma_mapping_error(priv->device, des))
4267                         goto dma_map_err; /* should reuse desc w/o issues */
4268
4269                 tx_q->tx_skbuff_dma[entry].buf = des;
4270
4271                 stmmac_set_desc_addr(priv, desc, des);
4272
4273                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4274                 tx_q->tx_skbuff_dma[entry].len = len;
4275                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4276                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4277
4278                 /* Prepare the descriptor and set the own bit too */
4279                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4280                                 priv->mode, 1, last_segment, skb->len);
4281         }
4282
4283         /* Only the last descriptor gets to point to the skb. */
4284         tx_q->tx_skbuff[entry] = skb;
4285         tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4286
4287         /* According to the coalesce parameter the IC bit for the latest
4288          * segment is reset and the timer re-started to clean the tx status.
4289          * This approach takes care about the fragments: desc is the first
4290          * element in case of no SG.
4291          */
4292         tx_packets = (entry + 1) - first_tx;
4293         tx_q->tx_count_frames += tx_packets;
4294
4295         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4296                 set_ic = true;
4297         else if (!priv->tx_coal_frames[queue])
4298                 set_ic = false;
4299         else if (tx_packets > priv->tx_coal_frames[queue])
4300                 set_ic = true;
4301         else if ((tx_q->tx_count_frames %
4302                   priv->tx_coal_frames[queue]) < tx_packets)
4303                 set_ic = true;
4304         else
4305                 set_ic = false;
4306
4307         if (set_ic) {
4308                 if (likely(priv->extend_desc))
4309                         desc = &tx_q->dma_etx[entry].basic;
4310                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4311                         desc = &tx_q->dma_entx[entry].basic;
4312                 else
4313                         desc = &tx_q->dma_tx[entry];
4314
4315                 tx_q->tx_count_frames = 0;
4316                 stmmac_set_tx_ic(priv, desc);
4317                 priv->xstats.tx_set_ic_bit++;
4318         }
4319
4320         /* We've used all descriptors we need for this skb, however,
4321          * advance cur_tx so that it references a fresh descriptor.
4322          * ndo_start_xmit will fill this descriptor the next time it's
4323          * called and stmmac_tx_clean may clean up to this descriptor.
4324          */
4325         entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4326         tx_q->cur_tx = entry;
4327
4328         if (netif_msg_pktdata(priv)) {
4329                 netdev_dbg(priv->dev,
4330                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4331                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4332                            entry, first, nfrags);
4333
4334                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4335                 print_pkt(skb->data, skb->len);
4336         }
4337
4338         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4339                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4340                           __func__);
4341                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4342         }
4343
4344         dev->stats.tx_bytes += skb->len;
4345
4346         if (priv->sarc_type)
4347                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4348
4349         skb_tx_timestamp(skb);
4350
4351         /* Ready to fill the first descriptor and set the OWN bit w/o any
4352          * problems because all the descriptors are actually ready to be
4353          * passed to the DMA engine.
4354          */
4355         if (likely(!is_jumbo)) {
4356                 bool last_segment = (nfrags == 0);
4357
4358                 des = dma_map_single(priv->device, skb->data,
4359                                      nopaged_len, DMA_TO_DEVICE);
4360                 if (dma_mapping_error(priv->device, des))
4361                         goto dma_map_err;
4362
4363                 tx_q->tx_skbuff_dma[first_entry].buf = des;
4364                 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4365                 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4366
4367                 stmmac_set_desc_addr(priv, first, des);
4368
4369                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4370                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4371
4372                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4373                              priv->hwts_tx_en)) {
4374                         /* declare that device is doing timestamping */
4375                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4376                         stmmac_enable_tx_timestamp(priv, first);
4377                 }
4378
4379                 /* Prepare the first descriptor setting the OWN bit too */
4380                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4381                                 csum_insertion, priv->mode, 0, last_segment,
4382                                 skb->len);
4383         }
4384
4385         if (tx_q->tbs & STMMAC_TBS_EN) {
4386                 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4387
4388                 tbs_desc = &tx_q->dma_entx[first_entry];
4389                 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4390         }
4391
4392         stmmac_set_tx_owner(priv, first);
4393
4394         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4395
4396         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4397
4398         stmmac_flush_tx_descriptors(priv, queue);
4399         stmmac_tx_timer_arm(priv, queue);
4400
4401         return NETDEV_TX_OK;
4402
4403 dma_map_err:
4404         netdev_err(priv->dev, "Tx DMA map failed\n");
4405         dev_kfree_skb(skb);
4406         priv->dev->stats.tx_dropped++;
4407         return NETDEV_TX_OK;
4408 }
4409
4410 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4411 {
4412         struct vlan_ethhdr *veth;
4413         __be16 vlan_proto;
4414         u16 vlanid;
4415
4416         veth = (struct vlan_ethhdr *)skb->data;
4417         vlan_proto = veth->h_vlan_proto;
4418
4419         if ((vlan_proto == htons(ETH_P_8021Q) &&
4420              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4421             (vlan_proto == htons(ETH_P_8021AD) &&
4422              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4423                 /* pop the vlan tag */
4424                 vlanid = ntohs(veth->h_vlan_TCI);
4425                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4426                 skb_pull(skb, VLAN_HLEN);
4427                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4428         }
4429 }
4430
4431 /**
4432  * stmmac_rx_refill - refill used skb preallocated buffers
4433  * @priv: driver private structure
4434  * @queue: RX queue index
4435  * Description : this is to reallocate the skb for the reception process
4436  * that is based on zero-copy.
4437  */
4438 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4439 {
4440         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4441         int dirty = stmmac_rx_dirty(priv, queue);
4442         unsigned int entry = rx_q->dirty_rx;
4443         gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4444
4445         if (priv->dma_cap.addr64 <= 32)
4446                 gfp |= GFP_DMA32;
4447
4448         while (dirty-- > 0) {
4449                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4450                 struct dma_desc *p;
4451                 bool use_rx_wd;
4452
4453                 if (priv->extend_desc)
4454                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
4455                 else
4456                         p = rx_q->dma_rx + entry;
4457
4458                 if (!buf->page) {
4459                         buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4460                         if (!buf->page)
4461                                 break;
4462                 }
4463
4464                 if (priv->sph && !buf->sec_page) {
4465                         buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4466                         if (!buf->sec_page)
4467                                 break;
4468
4469                         buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4470                 }
4471
4472                 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4473
4474                 stmmac_set_desc_addr(priv, p, buf->addr);
4475                 if (priv->sph)
4476                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4477                 else
4478                         stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4479                 stmmac_refill_desc3(priv, rx_q, p);
4480
4481                 rx_q->rx_count_frames++;
4482                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4483                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4484                         rx_q->rx_count_frames = 0;
4485
4486                 use_rx_wd = !priv->rx_coal_frames[queue];
4487                 use_rx_wd |= rx_q->rx_count_frames > 0;
4488                 if (!priv->use_riwt)
4489                         use_rx_wd = false;
4490
4491                 dma_wmb();
4492                 stmmac_set_rx_owner(priv, p, use_rx_wd);
4493
4494                 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4495         }
4496         rx_q->dirty_rx = entry;
4497         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4498                             (rx_q->dirty_rx * sizeof(struct dma_desc));
4499         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4500 }
4501
4502 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4503                                        struct dma_desc *p,
4504                                        int status, unsigned int len)
4505 {
4506         unsigned int plen = 0, hlen = 0;
4507         int coe = priv->hw->rx_csum;
4508
4509         /* Not first descriptor, buffer is always zero */
4510         if (priv->sph && len)
4511                 return 0;
4512
4513         /* First descriptor, get split header length */
4514         stmmac_get_rx_header_len(priv, p, &hlen);
4515         if (priv->sph && hlen) {
4516                 priv->xstats.rx_split_hdr_pkt_n++;
4517                 return hlen;
4518         }
4519
4520         /* First descriptor, not last descriptor and not split header */
4521         if (status & rx_not_ls)
4522                 return priv->dma_buf_sz;
4523
4524         plen = stmmac_get_rx_frame_len(priv, p, coe);
4525
4526         /* First descriptor and last descriptor and not split header */
4527         return min_t(unsigned int, priv->dma_buf_sz, plen);
4528 }
4529
4530 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4531                                        struct dma_desc *p,
4532                                        int status, unsigned int len)
4533 {
4534         int coe = priv->hw->rx_csum;
4535         unsigned int plen = 0;
4536
4537         /* Not split header, buffer is not available */
4538         if (!priv->sph)
4539                 return 0;
4540
4541         /* Not last descriptor */
4542         if (status & rx_not_ls)
4543                 return priv->dma_buf_sz;
4544
4545         plen = stmmac_get_rx_frame_len(priv, p, coe);
4546
4547         /* Last descriptor */
4548         return plen - len;
4549 }
4550
4551 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4552                                 struct xdp_frame *xdpf, bool dma_map)
4553 {
4554         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4555         unsigned int entry = tx_q->cur_tx;
4556         struct dma_desc *tx_desc;
4557         dma_addr_t dma_addr;
4558         bool set_ic;
4559
4560         if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4561                 return STMMAC_XDP_CONSUMED;
4562
4563         if (likely(priv->extend_desc))
4564                 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4565         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4566                 tx_desc = &tx_q->dma_entx[entry].basic;
4567         else
4568                 tx_desc = tx_q->dma_tx + entry;
4569
4570         if (dma_map) {
4571                 dma_addr = dma_map_single(priv->device, xdpf->data,
4572                                           xdpf->len, DMA_TO_DEVICE);
4573                 if (dma_mapping_error(priv->device, dma_addr))
4574                         return STMMAC_XDP_CONSUMED;
4575
4576                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4577         } else {
4578                 struct page *page = virt_to_page(xdpf->data);
4579
4580                 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4581                            xdpf->headroom;
4582                 dma_sync_single_for_device(priv->device, dma_addr,
4583                                            xdpf->len, DMA_BIDIRECTIONAL);
4584
4585                 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4586         }
4587
4588         tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4589         tx_q->tx_skbuff_dma[entry].map_as_page = false;
4590         tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4591         tx_q->tx_skbuff_dma[entry].last_segment = true;
4592         tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4593
4594         tx_q->xdpf[entry] = xdpf;
4595
4596         stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4597
4598         stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4599                                true, priv->mode, true, true,
4600                                xdpf->len);
4601
4602         tx_q->tx_count_frames++;
4603
4604         if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4605                 set_ic = true;
4606         else
4607                 set_ic = false;
4608
4609         if (set_ic) {
4610                 tx_q->tx_count_frames = 0;
4611                 stmmac_set_tx_ic(priv, tx_desc);
4612                 priv->xstats.tx_set_ic_bit++;
4613         }
4614
4615         stmmac_enable_dma_transmission(priv, priv->ioaddr);
4616
4617         entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4618         tx_q->cur_tx = entry;
4619
4620         return STMMAC_XDP_TX;
4621 }
4622
4623 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4624                                    int cpu)
4625 {
4626         int index = cpu;
4627
4628         if (unlikely(index < 0))
4629                 index = 0;
4630
4631         while (index >= priv->plat->tx_queues_to_use)
4632                 index -= priv->plat->tx_queues_to_use;
4633
4634         return index;
4635 }
4636
4637 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4638                                 struct xdp_buff *xdp)
4639 {
4640         struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4641         int cpu = smp_processor_id();
4642         struct netdev_queue *nq;
4643         int queue;
4644         int res;
4645
4646         if (unlikely(!xdpf))
4647                 return STMMAC_XDP_CONSUMED;
4648
4649         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4650         nq = netdev_get_tx_queue(priv->dev, queue);
4651
4652         __netif_tx_lock(nq, cpu);
4653         /* Avoids TX time-out as we are sharing with slow path */
4654         txq_trans_cond_update(nq);
4655
4656         res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4657         if (res == STMMAC_XDP_TX)
4658                 stmmac_flush_tx_descriptors(priv, queue);
4659
4660         __netif_tx_unlock(nq);
4661
4662         return res;
4663 }
4664
4665 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4666                                  struct bpf_prog *prog,
4667                                  struct xdp_buff *xdp)
4668 {
4669         u32 act;
4670         int res;
4671
4672         act = bpf_prog_run_xdp(prog, xdp);
4673         switch (act) {
4674         case XDP_PASS:
4675                 res = STMMAC_XDP_PASS;
4676                 break;
4677         case XDP_TX:
4678                 res = stmmac_xdp_xmit_back(priv, xdp);
4679                 break;
4680         case XDP_REDIRECT:
4681                 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4682                         res = STMMAC_XDP_CONSUMED;
4683                 else
4684                         res = STMMAC_XDP_REDIRECT;
4685                 break;
4686         default:
4687                 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4688                 fallthrough;
4689         case XDP_ABORTED:
4690                 trace_xdp_exception(priv->dev, prog, act);
4691                 fallthrough;
4692         case XDP_DROP:
4693                 res = STMMAC_XDP_CONSUMED;
4694                 break;
4695         }
4696
4697         return res;
4698 }
4699
4700 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4701                                            struct xdp_buff *xdp)
4702 {
4703         struct bpf_prog *prog;
4704         int res;
4705
4706         prog = READ_ONCE(priv->xdp_prog);
4707         if (!prog) {
4708                 res = STMMAC_XDP_PASS;
4709                 goto out;
4710         }
4711
4712         res = __stmmac_xdp_run_prog(priv, prog, xdp);
4713 out:
4714         return ERR_PTR(-res);
4715 }
4716
4717 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4718                                    int xdp_status)
4719 {
4720         int cpu = smp_processor_id();
4721         int queue;
4722
4723         queue = stmmac_xdp_get_tx_queue(priv, cpu);
4724
4725         if (xdp_status & STMMAC_XDP_TX)
4726                 stmmac_tx_timer_arm(priv, queue);
4727
4728         if (xdp_status & STMMAC_XDP_REDIRECT)
4729                 xdp_do_flush();
4730 }
4731
4732 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4733                                                struct xdp_buff *xdp)
4734 {
4735         unsigned int metasize = xdp->data - xdp->data_meta;
4736         unsigned int datasize = xdp->data_end - xdp->data;
4737         struct sk_buff *skb;
4738
4739         skb = __napi_alloc_skb(&ch->rxtx_napi,
4740                                xdp->data_end - xdp->data_hard_start,
4741                                GFP_ATOMIC | __GFP_NOWARN);
4742         if (unlikely(!skb))
4743                 return NULL;
4744
4745         skb_reserve(skb, xdp->data - xdp->data_hard_start);
4746         memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4747         if (metasize)
4748                 skb_metadata_set(skb, metasize);
4749
4750         return skb;
4751 }
4752
4753 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4754                                    struct dma_desc *p, struct dma_desc *np,
4755                                    struct xdp_buff *xdp)
4756 {
4757         struct stmmac_channel *ch = &priv->channel[queue];
4758         unsigned int len = xdp->data_end - xdp->data;
4759         enum pkt_hash_types hash_type;
4760         int coe = priv->hw->rx_csum;
4761         struct sk_buff *skb;
4762         u32 hash;
4763
4764         skb = stmmac_construct_skb_zc(ch, xdp);
4765         if (!skb) {
4766                 priv->dev->stats.rx_dropped++;
4767                 return;
4768         }
4769
4770         stmmac_get_rx_hwtstamp(priv, p, np, skb);
4771         stmmac_rx_vlan(priv->dev, skb);
4772         skb->protocol = eth_type_trans(skb, priv->dev);
4773
4774         if (unlikely(!coe))
4775                 skb_checksum_none_assert(skb);
4776         else
4777                 skb->ip_summed = CHECKSUM_UNNECESSARY;
4778
4779         if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4780                 skb_set_hash(skb, hash, hash_type);
4781
4782         skb_record_rx_queue(skb, queue);
4783         napi_gro_receive(&ch->rxtx_napi, skb);
4784
4785         priv->dev->stats.rx_packets++;
4786         priv->dev->stats.rx_bytes += len;
4787 }
4788
4789 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4790 {
4791         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4792         unsigned int entry = rx_q->dirty_rx;
4793         struct dma_desc *rx_desc = NULL;
4794         bool ret = true;
4795
4796         budget = min(budget, stmmac_rx_dirty(priv, queue));
4797
4798         while (budget-- > 0 && entry != rx_q->cur_rx) {
4799                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4800                 dma_addr_t dma_addr;
4801                 bool use_rx_wd;
4802
4803                 if (!buf->xdp) {
4804                         buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4805                         if (!buf->xdp) {
4806                                 ret = false;
4807                                 break;
4808                         }
4809                 }
4810
4811                 if (priv->extend_desc)
4812                         rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4813                 else
4814                         rx_desc = rx_q->dma_rx + entry;
4815
4816                 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4817                 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4818                 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4819                 stmmac_refill_desc3(priv, rx_q, rx_desc);
4820
4821                 rx_q->rx_count_frames++;
4822                 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4823                 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4824                         rx_q->rx_count_frames = 0;
4825
4826                 use_rx_wd = !priv->rx_coal_frames[queue];
4827                 use_rx_wd |= rx_q->rx_count_frames > 0;
4828                 if (!priv->use_riwt)
4829                         use_rx_wd = false;
4830
4831                 dma_wmb();
4832                 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4833
4834                 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4835         }
4836
4837         if (rx_desc) {
4838                 rx_q->dirty_rx = entry;
4839                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4840                                      (rx_q->dirty_rx * sizeof(struct dma_desc));
4841                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4842         }
4843
4844         return ret;
4845 }
4846
4847 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4848 {
4849         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4850         unsigned int count = 0, error = 0, len = 0;
4851         int dirty = stmmac_rx_dirty(priv, queue);
4852         unsigned int next_entry = rx_q->cur_rx;
4853         unsigned int desc_size;
4854         struct bpf_prog *prog;
4855         bool failure = false;
4856         int xdp_status = 0;
4857         int status = 0;
4858
4859         if (netif_msg_rx_status(priv)) {
4860                 void *rx_head;
4861
4862                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4863                 if (priv->extend_desc) {
4864                         rx_head = (void *)rx_q->dma_erx;
4865                         desc_size = sizeof(struct dma_extended_desc);
4866                 } else {
4867                         rx_head = (void *)rx_q->dma_rx;
4868                         desc_size = sizeof(struct dma_desc);
4869                 }
4870
4871                 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4872                                     rx_q->dma_rx_phy, desc_size);
4873         }
4874         while (count < limit) {
4875                 struct stmmac_rx_buffer *buf;
4876                 unsigned int buf1_len = 0;
4877                 struct dma_desc *np, *p;
4878                 int entry;
4879                 int res;
4880
4881                 if (!count && rx_q->state_saved) {
4882                         error = rx_q->state.error;
4883                         len = rx_q->state.len;
4884                 } else {
4885                         rx_q->state_saved = false;
4886                         error = 0;
4887                         len = 0;
4888                 }
4889
4890                 if (count >= limit)
4891                         break;
4892
4893 read_again:
4894                 buf1_len = 0;
4895                 entry = next_entry;
4896                 buf = &rx_q->buf_pool[entry];
4897
4898                 if (dirty >= STMMAC_RX_FILL_BATCH) {
4899                         failure = failure ||
4900                                   !stmmac_rx_refill_zc(priv, queue, dirty);
4901                         dirty = 0;
4902                 }
4903
4904                 if (priv->extend_desc)
4905                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
4906                 else
4907                         p = rx_q->dma_rx + entry;
4908
4909                 /* read the status of the incoming frame */
4910                 status = stmmac_rx_status(priv, &priv->dev->stats,
4911                                           &priv->xstats, p);
4912                 /* check if managed by the DMA otherwise go ahead */
4913                 if (unlikely(status & dma_own))
4914                         break;
4915
4916                 /* Prefetch the next RX descriptor */
4917                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4918                                                 priv->dma_rx_size);
4919                 next_entry = rx_q->cur_rx;
4920
4921                 if (priv->extend_desc)
4922                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4923                 else
4924                         np = rx_q->dma_rx + next_entry;
4925
4926                 prefetch(np);
4927
4928                 /* Ensure a valid XSK buffer before proceed */
4929                 if (!buf->xdp)
4930                         break;
4931
4932                 if (priv->extend_desc)
4933                         stmmac_rx_extended_status(priv, &priv->dev->stats,
4934                                                   &priv->xstats,
4935                                                   rx_q->dma_erx + entry);
4936                 if (unlikely(status == discard_frame)) {
4937                         xsk_buff_free(buf->xdp);
4938                         buf->xdp = NULL;
4939                         dirty++;
4940                         error = 1;
4941                         if (!priv->hwts_rx_en)
4942                                 priv->dev->stats.rx_errors++;
4943                 }
4944
4945                 if (unlikely(error && (status & rx_not_ls)))
4946                         goto read_again;
4947                 if (unlikely(error)) {
4948                         count++;
4949                         continue;
4950                 }
4951
4952                 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
4953                 if (likely(status & rx_not_ls)) {
4954                         xsk_buff_free(buf->xdp);
4955                         buf->xdp = NULL;
4956                         dirty++;
4957                         count++;
4958                         goto read_again;
4959                 }
4960
4961                 /* XDP ZC Frame only support primary buffers for now */
4962                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
4963                 len += buf1_len;
4964
4965                 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
4966                  * Type frames (LLC/LLC-SNAP)
4967                  *
4968                  * llc_snap is never checked in GMAC >= 4, so this ACS
4969                  * feature is always disabled and packets need to be
4970                  * stripped manually.
4971                  */
4972                 if (likely(!(status & rx_not_ls)) &&
4973                     (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
4974                      unlikely(status != llc_snap))) {
4975                         buf1_len -= ETH_FCS_LEN;
4976                         len -= ETH_FCS_LEN;
4977                 }
4978
4979                 /* RX buffer is good and fit into a XSK pool buffer */
4980                 buf->xdp->data_end = buf->xdp->data + buf1_len;
4981                 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
4982
4983                 prog = READ_ONCE(priv->xdp_prog);
4984                 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
4985
4986                 switch (res) {
4987                 case STMMAC_XDP_PASS:
4988                         stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
4989                         xsk_buff_free(buf->xdp);
4990                         break;
4991                 case STMMAC_XDP_CONSUMED:
4992                         xsk_buff_free(buf->xdp);
4993                         priv->dev->stats.rx_dropped++;
4994                         break;
4995                 case STMMAC_XDP_TX:
4996                 case STMMAC_XDP_REDIRECT:
4997                         xdp_status |= res;
4998                         break;
4999                 }
5000
5001                 buf->xdp = NULL;
5002                 dirty++;
5003                 count++;
5004         }
5005
5006         if (status & rx_not_ls) {
5007                 rx_q->state_saved = true;
5008                 rx_q->state.error = error;
5009                 rx_q->state.len = len;
5010         }
5011
5012         stmmac_finalize_xdp_rx(priv, xdp_status);
5013
5014         priv->xstats.rx_pkt_n += count;
5015         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5016
5017         if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5018                 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5019                         xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5020                 else
5021                         xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5022
5023                 return (int)count;
5024         }
5025
5026         return failure ? limit : (int)count;
5027 }
5028
5029 /**
5030  * stmmac_rx - manage the receive process
5031  * @priv: driver private structure
5032  * @limit: napi bugget
5033  * @queue: RX queue index.
5034  * Description :  this the function called by the napi poll method.
5035  * It gets all the frames inside the ring.
5036  */
5037 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5038 {
5039         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5040         struct stmmac_channel *ch = &priv->channel[queue];
5041         unsigned int count = 0, error = 0, len = 0;
5042         int status = 0, coe = priv->hw->rx_csum;
5043         unsigned int next_entry = rx_q->cur_rx;
5044         enum dma_data_direction dma_dir;
5045         unsigned int desc_size;
5046         struct sk_buff *skb = NULL;
5047         struct xdp_buff xdp;
5048         int xdp_status = 0;
5049         int buf_sz;
5050
5051         dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5052         buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5053
5054         if (netif_msg_rx_status(priv)) {
5055                 void *rx_head;
5056
5057                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5058                 if (priv->extend_desc) {
5059                         rx_head = (void *)rx_q->dma_erx;
5060                         desc_size = sizeof(struct dma_extended_desc);
5061                 } else {
5062                         rx_head = (void *)rx_q->dma_rx;
5063                         desc_size = sizeof(struct dma_desc);
5064                 }
5065
5066                 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
5067                                     rx_q->dma_rx_phy, desc_size);
5068         }
5069         while (count < limit) {
5070                 unsigned int buf1_len = 0, buf2_len = 0;
5071                 enum pkt_hash_types hash_type;
5072                 struct stmmac_rx_buffer *buf;
5073                 struct dma_desc *np, *p;
5074                 int entry;
5075                 u32 hash;
5076
5077                 if (!count && rx_q->state_saved) {
5078                         skb = rx_q->state.skb;
5079                         error = rx_q->state.error;
5080                         len = rx_q->state.len;
5081                 } else {
5082                         rx_q->state_saved = false;
5083                         skb = NULL;
5084                         error = 0;
5085                         len = 0;
5086                 }
5087
5088                 if (count >= limit)
5089                         break;
5090
5091 read_again:
5092                 buf1_len = 0;
5093                 buf2_len = 0;
5094                 entry = next_entry;
5095                 buf = &rx_q->buf_pool[entry];
5096
5097                 if (priv->extend_desc)
5098                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
5099                 else
5100                         p = rx_q->dma_rx + entry;
5101
5102                 /* read the status of the incoming frame */
5103                 status = stmmac_rx_status(priv, &priv->dev->stats,
5104                                 &priv->xstats, p);
5105                 /* check if managed by the DMA otherwise go ahead */
5106                 if (unlikely(status & dma_own))
5107                         break;
5108
5109                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5110                                                 priv->dma_rx_size);
5111                 next_entry = rx_q->cur_rx;
5112
5113                 if (priv->extend_desc)
5114                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5115                 else
5116                         np = rx_q->dma_rx + next_entry;
5117
5118                 prefetch(np);
5119
5120                 if (priv->extend_desc)
5121                         stmmac_rx_extended_status(priv, &priv->dev->stats,
5122                                         &priv->xstats, rx_q->dma_erx + entry);
5123                 if (unlikely(status == discard_frame)) {
5124                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5125                         buf->page = NULL;
5126                         error = 1;
5127                         if (!priv->hwts_rx_en)
5128                                 priv->dev->stats.rx_errors++;
5129                 }
5130
5131                 if (unlikely(error && (status & rx_not_ls)))
5132                         goto read_again;
5133                 if (unlikely(error)) {
5134                         dev_kfree_skb(skb);
5135                         skb = NULL;
5136                         count++;
5137                         continue;
5138                 }
5139
5140                 /* Buffer is good. Go on. */
5141
5142                 prefetch(page_address(buf->page) + buf->page_offset);
5143                 if (buf->sec_page)
5144                         prefetch(page_address(buf->sec_page));
5145
5146                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5147                 len += buf1_len;
5148                 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5149                 len += buf2_len;
5150
5151                 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5152                  * Type frames (LLC/LLC-SNAP)
5153                  *
5154                  * llc_snap is never checked in GMAC >= 4, so this ACS
5155                  * feature is always disabled and packets need to be
5156                  * stripped manually.
5157                  */
5158                 if (likely(!(status & rx_not_ls)) &&
5159                     (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5160                      unlikely(status != llc_snap))) {
5161                         if (buf2_len) {
5162                                 buf2_len -= ETH_FCS_LEN;
5163                                 len -= ETH_FCS_LEN;
5164                         } else if (buf1_len) {
5165                                 buf1_len -= ETH_FCS_LEN;
5166                                 len -= ETH_FCS_LEN;
5167                         }
5168                 }
5169
5170                 if (!skb) {
5171                         unsigned int pre_len, sync_len;
5172
5173                         dma_sync_single_for_cpu(priv->device, buf->addr,
5174                                                 buf1_len, dma_dir);
5175
5176                         xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5177                         xdp_prepare_buff(&xdp, page_address(buf->page),
5178                                          buf->page_offset, buf1_len, false);
5179
5180                         pre_len = xdp.data_end - xdp.data_hard_start -
5181                                   buf->page_offset;
5182                         skb = stmmac_xdp_run_prog(priv, &xdp);
5183                         /* Due xdp_adjust_tail: DMA sync for_device
5184                          * cover max len CPU touch
5185                          */
5186                         sync_len = xdp.data_end - xdp.data_hard_start -
5187                                    buf->page_offset;
5188                         sync_len = max(sync_len, pre_len);
5189
5190                         /* For Not XDP_PASS verdict */
5191                         if (IS_ERR(skb)) {
5192                                 unsigned int xdp_res = -PTR_ERR(skb);
5193
5194                                 if (xdp_res & STMMAC_XDP_CONSUMED) {
5195                                         page_pool_put_page(rx_q->page_pool,
5196                                                            virt_to_head_page(xdp.data),
5197                                                            sync_len, true);
5198                                         buf->page = NULL;
5199                                         priv->dev->stats.rx_dropped++;
5200
5201                                         /* Clear skb as it was set as
5202                                          * status by XDP program.
5203                                          */
5204                                         skb = NULL;
5205
5206                                         if (unlikely((status & rx_not_ls)))
5207                                                 goto read_again;
5208
5209                                         count++;
5210                                         continue;
5211                                 } else if (xdp_res & (STMMAC_XDP_TX |
5212                                                       STMMAC_XDP_REDIRECT)) {
5213                                         xdp_status |= xdp_res;
5214                                         buf->page = NULL;
5215                                         skb = NULL;
5216                                         count++;
5217                                         continue;
5218                                 }
5219                         }
5220                 }
5221
5222                 if (!skb) {
5223                         /* XDP program may expand or reduce tail */
5224                         buf1_len = xdp.data_end - xdp.data;
5225
5226                         skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5227                         if (!skb) {
5228                                 priv->dev->stats.rx_dropped++;
5229                                 count++;
5230                                 goto drain_data;
5231                         }
5232
5233                         /* XDP program may adjust header */
5234                         skb_copy_to_linear_data(skb, xdp.data, buf1_len);
5235                         skb_put(skb, buf1_len);
5236
5237                         /* Data payload copied into SKB, page ready for recycle */
5238                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
5239                         buf->page = NULL;
5240                 } else if (buf1_len) {
5241                         dma_sync_single_for_cpu(priv->device, buf->addr,
5242                                                 buf1_len, dma_dir);
5243                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5244                                         buf->page, buf->page_offset, buf1_len,
5245                                         priv->dma_buf_sz);
5246
5247                         /* Data payload appended into SKB */
5248                         page_pool_release_page(rx_q->page_pool, buf->page);
5249                         buf->page = NULL;
5250                 }
5251
5252                 if (buf2_len) {
5253                         dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5254                                                 buf2_len, dma_dir);
5255                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5256                                         buf->sec_page, 0, buf2_len,
5257                                         priv->dma_buf_sz);
5258
5259                         /* Data payload appended into SKB */
5260                         page_pool_release_page(rx_q->page_pool, buf->sec_page);
5261                         buf->sec_page = NULL;
5262                 }
5263
5264 drain_data:
5265                 if (likely(status & rx_not_ls))
5266                         goto read_again;
5267                 if (!skb)
5268                         continue;
5269
5270                 /* Got entire packet into SKB. Finish it. */
5271
5272                 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5273                 stmmac_rx_vlan(priv->dev, skb);
5274                 skb->protocol = eth_type_trans(skb, priv->dev);
5275
5276                 if (unlikely(!coe))
5277                         skb_checksum_none_assert(skb);
5278                 else
5279                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5280
5281                 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5282                         skb_set_hash(skb, hash, hash_type);
5283
5284                 skb_record_rx_queue(skb, queue);
5285                 napi_gro_receive(&ch->rx_napi, skb);
5286                 skb = NULL;
5287
5288                 priv->dev->stats.rx_packets++;
5289                 priv->dev->stats.rx_bytes += len;
5290                 count++;
5291         }
5292
5293         if (status & rx_not_ls || skb) {
5294                 rx_q->state_saved = true;
5295                 rx_q->state.skb = skb;
5296                 rx_q->state.error = error;
5297                 rx_q->state.len = len;
5298         }
5299
5300         stmmac_finalize_xdp_rx(priv, xdp_status);
5301
5302         stmmac_rx_refill(priv, queue);
5303
5304         priv->xstats.rx_pkt_n += count;
5305         priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5306
5307         return count;
5308 }
5309
5310 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5311 {
5312         struct stmmac_channel *ch =
5313                 container_of(napi, struct stmmac_channel, rx_napi);
5314         struct stmmac_priv *priv = ch->priv_data;
5315         u32 chan = ch->index;
5316         int work_done;
5317
5318         priv->xstats.napi_poll++;
5319
5320         work_done = stmmac_rx(priv, budget, chan);
5321         if (work_done < budget && napi_complete_done(napi, work_done)) {
5322                 unsigned long flags;
5323
5324                 spin_lock_irqsave(&ch->lock, flags);
5325                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5326                 spin_unlock_irqrestore(&ch->lock, flags);
5327         }
5328
5329         return work_done;
5330 }
5331
5332 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5333 {
5334         struct stmmac_channel *ch =
5335                 container_of(napi, struct stmmac_channel, tx_napi);
5336         struct stmmac_priv *priv = ch->priv_data;
5337         u32 chan = ch->index;
5338         int work_done;
5339
5340         priv->xstats.napi_poll++;
5341
5342         work_done = stmmac_tx_clean(priv, budget, chan);
5343         work_done = min(work_done, budget);
5344
5345         if (work_done < budget && napi_complete_done(napi, work_done)) {
5346                 unsigned long flags;
5347
5348                 spin_lock_irqsave(&ch->lock, flags);
5349                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5350                 spin_unlock_irqrestore(&ch->lock, flags);
5351         }
5352
5353         return work_done;
5354 }
5355
5356 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5357 {
5358         struct stmmac_channel *ch =
5359                 container_of(napi, struct stmmac_channel, rxtx_napi);
5360         struct stmmac_priv *priv = ch->priv_data;
5361         int rx_done, tx_done, rxtx_done;
5362         u32 chan = ch->index;
5363
5364         priv->xstats.napi_poll++;
5365
5366         tx_done = stmmac_tx_clean(priv, budget, chan);
5367         tx_done = min(tx_done, budget);
5368
5369         rx_done = stmmac_rx_zc(priv, budget, chan);
5370
5371         rxtx_done = max(tx_done, rx_done);
5372
5373         /* If either TX or RX work is not complete, return budget
5374          * and keep pooling
5375          */
5376         if (rxtx_done >= budget)
5377                 return budget;
5378
5379         /* all work done, exit the polling mode */
5380         if (napi_complete_done(napi, rxtx_done)) {
5381                 unsigned long flags;
5382
5383                 spin_lock_irqsave(&ch->lock, flags);
5384                 /* Both RX and TX work done are compelte,
5385                  * so enable both RX & TX IRQs.
5386                  */
5387                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5388                 spin_unlock_irqrestore(&ch->lock, flags);
5389         }
5390
5391         return min(rxtx_done, budget - 1);
5392 }
5393
5394 /**
5395  *  stmmac_tx_timeout
5396  *  @dev : Pointer to net device structure
5397  *  @txqueue: the index of the hanging transmit queue
5398  *  Description: this function is called when a packet transmission fails to
5399  *   complete within a reasonable time. The driver will mark the error in the
5400  *   netdev structure and arrange for the device to be reset to a sane state
5401  *   in order to transmit a new packet.
5402  */
5403 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5404 {
5405         struct stmmac_priv *priv = netdev_priv(dev);
5406
5407         stmmac_global_err(priv);
5408 }
5409
5410 /**
5411  *  stmmac_set_rx_mode - entry point for multicast addressing
5412  *  @dev : pointer to the device structure
5413  *  Description:
5414  *  This function is a driver entry point which gets called by the kernel
5415  *  whenever multicast addresses must be enabled/disabled.
5416  *  Return value:
5417  *  void.
5418  */
5419 static void stmmac_set_rx_mode(struct net_device *dev)
5420 {
5421         struct stmmac_priv *priv = netdev_priv(dev);
5422
5423         stmmac_set_filter(priv, priv->hw, dev);
5424 }
5425
5426 /**
5427  *  stmmac_change_mtu - entry point to change MTU size for the device.
5428  *  @dev : device pointer.
5429  *  @new_mtu : the new MTU size for the device.
5430  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5431  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5432  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5433  *  Return value:
5434  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5435  *  file on failure.
5436  */
5437 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5438 {
5439         struct stmmac_priv *priv = netdev_priv(dev);
5440         int txfifosz = priv->plat->tx_fifo_size;
5441         const int mtu = new_mtu;
5442
5443         if (txfifosz == 0)
5444                 txfifosz = priv->dma_cap.tx_fifo_size;
5445
5446         txfifosz /= priv->plat->tx_queues_to_use;
5447
5448         if (netif_running(dev)) {
5449                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
5450                 return -EBUSY;
5451         }
5452
5453         if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5454                 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5455                 return -EINVAL;
5456         }
5457
5458         new_mtu = STMMAC_ALIGN(new_mtu);
5459
5460         /* If condition true, FIFO is too small or MTU too large */
5461         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5462                 return -EINVAL;
5463
5464         dev->mtu = mtu;
5465
5466         netdev_update_features(dev);
5467
5468         return 0;
5469 }
5470
5471 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5472                                              netdev_features_t features)
5473 {
5474         struct stmmac_priv *priv = netdev_priv(dev);
5475
5476         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5477                 features &= ~NETIF_F_RXCSUM;
5478
5479         if (!priv->plat->tx_coe)
5480                 features &= ~NETIF_F_CSUM_MASK;
5481
5482         /* Some GMAC devices have a bugged Jumbo frame support that
5483          * needs to have the Tx COE disabled for oversized frames
5484          * (due to limited buffer sizes). In this case we disable
5485          * the TX csum insertion in the TDES and not use SF.
5486          */
5487         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5488                 features &= ~NETIF_F_CSUM_MASK;
5489
5490         /* Disable tso if asked by ethtool */
5491         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5492                 if (features & NETIF_F_TSO)
5493                         priv->tso = true;
5494                 else
5495                         priv->tso = false;
5496         }
5497
5498         return features;
5499 }
5500
5501 static int stmmac_set_features(struct net_device *netdev,
5502                                netdev_features_t features)
5503 {
5504         struct stmmac_priv *priv = netdev_priv(netdev);
5505
5506         /* Keep the COE Type in case of csum is supporting */
5507         if (features & NETIF_F_RXCSUM)
5508                 priv->hw->rx_csum = priv->plat->rx_coe;
5509         else
5510                 priv->hw->rx_csum = 0;
5511         /* No check needed because rx_coe has been set before and it will be
5512          * fixed in case of issue.
5513          */
5514         stmmac_rx_ipc(priv, priv->hw);
5515
5516         if (priv->sph_cap) {
5517                 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5518                 u32 chan;
5519
5520                 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5521                         stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5522         }
5523
5524         return 0;
5525 }
5526
5527 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5528 {
5529         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5530         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5531         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5532         bool *hs_enable = &fpe_cfg->hs_enable;
5533
5534         if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5535                 return;
5536
5537         /* If LP has sent verify mPacket, LP is FPE capable */
5538         if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5539                 if (*lp_state < FPE_STATE_CAPABLE)
5540                         *lp_state = FPE_STATE_CAPABLE;
5541
5542                 /* If user has requested FPE enable, quickly response */
5543                 if (*hs_enable)
5544                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5545                                                 MPACKET_RESPONSE);
5546         }
5547
5548         /* If Local has sent verify mPacket, Local is FPE capable */
5549         if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5550                 if (*lo_state < FPE_STATE_CAPABLE)
5551                         *lo_state = FPE_STATE_CAPABLE;
5552         }
5553
5554         /* If LP has sent response mPacket, LP is entering FPE ON */
5555         if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5556                 *lp_state = FPE_STATE_ENTERING_ON;
5557
5558         /* If Local has sent response mPacket, Local is entering FPE ON */
5559         if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5560                 *lo_state = FPE_STATE_ENTERING_ON;
5561
5562         if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5563             !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5564             priv->fpe_wq) {
5565                 queue_work(priv->fpe_wq, &priv->fpe_task);
5566         }
5567 }
5568
5569 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5570 {
5571         u32 rx_cnt = priv->plat->rx_queues_to_use;
5572         u32 tx_cnt = priv->plat->tx_queues_to_use;
5573         u32 queues_count;
5574         u32 queue;
5575         bool xmac;
5576
5577         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5578         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5579
5580         if (priv->irq_wake)
5581                 pm_wakeup_event(priv->device, 0);
5582
5583         if (priv->dma_cap.estsel)
5584                 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5585                                       &priv->xstats, tx_cnt);
5586
5587         if (priv->dma_cap.fpesel) {
5588                 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5589                                                    priv->dev);
5590
5591                 stmmac_fpe_event_status(priv, status);
5592         }
5593
5594         /* To handle GMAC own interrupts */
5595         if ((priv->plat->has_gmac) || xmac) {
5596                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5597
5598                 if (unlikely(status)) {
5599                         /* For LPI we need to save the tx status */
5600                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5601                                 priv->tx_path_in_lpi_mode = true;
5602                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5603                                 priv->tx_path_in_lpi_mode = false;
5604                 }
5605
5606                 for (queue = 0; queue < queues_count; queue++) {
5607                         status = stmmac_host_mtl_irq_status(priv, priv->hw,
5608                                                             queue);
5609                 }
5610
5611                 /* PCS link status */
5612                 if (priv->hw->pcs) {
5613                         if (priv->xstats.pcs_link)
5614                                 netif_carrier_on(priv->dev);
5615                         else
5616                                 netif_carrier_off(priv->dev);
5617                 }
5618
5619                 stmmac_timestamp_interrupt(priv, priv);
5620         }
5621 }
5622
5623 /**
5624  *  stmmac_interrupt - main ISR
5625  *  @irq: interrupt number.
5626  *  @dev_id: to pass the net device pointer.
5627  *  Description: this is the main driver interrupt service routine.
5628  *  It can call:
5629  *  o DMA service routine (to manage incoming frame reception and transmission
5630  *    status)
5631  *  o Core interrupts to manage: remote wake-up, management counter, LPI
5632  *    interrupts.
5633  */
5634 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5635 {
5636         struct net_device *dev = (struct net_device *)dev_id;
5637         struct stmmac_priv *priv = netdev_priv(dev);
5638
5639         /* Check if adapter is up */
5640         if (test_bit(STMMAC_DOWN, &priv->state))
5641                 return IRQ_HANDLED;
5642
5643         /* Check if a fatal error happened */
5644         if (stmmac_safety_feat_interrupt(priv))
5645                 return IRQ_HANDLED;
5646
5647         /* To handle Common interrupts */
5648         stmmac_common_interrupt(priv);
5649
5650         /* To handle DMA interrupts */
5651         stmmac_dma_interrupt(priv);
5652
5653         return IRQ_HANDLED;
5654 }
5655
5656 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5657 {
5658         struct net_device *dev = (struct net_device *)dev_id;
5659         struct stmmac_priv *priv = netdev_priv(dev);
5660
5661         if (unlikely(!dev)) {
5662                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5663                 return IRQ_NONE;
5664         }
5665
5666         /* Check if adapter is up */
5667         if (test_bit(STMMAC_DOWN, &priv->state))
5668                 return IRQ_HANDLED;
5669
5670         /* To handle Common interrupts */
5671         stmmac_common_interrupt(priv);
5672
5673         return IRQ_HANDLED;
5674 }
5675
5676 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5677 {
5678         struct net_device *dev = (struct net_device *)dev_id;
5679         struct stmmac_priv *priv = netdev_priv(dev);
5680
5681         if (unlikely(!dev)) {
5682                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5683                 return IRQ_NONE;
5684         }
5685
5686         /* Check if adapter is up */
5687         if (test_bit(STMMAC_DOWN, &priv->state))
5688                 return IRQ_HANDLED;
5689
5690         /* Check if a fatal error happened */
5691         stmmac_safety_feat_interrupt(priv);
5692
5693         return IRQ_HANDLED;
5694 }
5695
5696 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5697 {
5698         struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5699         int chan = tx_q->queue_index;
5700         struct stmmac_priv *priv;
5701         int status;
5702
5703         priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
5704
5705         if (unlikely(!data)) {
5706                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5707                 return IRQ_NONE;
5708         }
5709
5710         /* Check if adapter is up */
5711         if (test_bit(STMMAC_DOWN, &priv->state))
5712                 return IRQ_HANDLED;
5713
5714         status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5715
5716         if (unlikely(status & tx_hard_error_bump_tc)) {
5717                 /* Try to bump up the dma threshold on this failure */
5718                 stmmac_bump_dma_threshold(priv, chan);
5719         } else if (unlikely(status == tx_hard_error)) {
5720                 stmmac_tx_err(priv, chan);
5721         }
5722
5723         return IRQ_HANDLED;
5724 }
5725
5726 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5727 {
5728         struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5729         int chan = rx_q->queue_index;
5730         struct stmmac_priv *priv;
5731
5732         priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
5733
5734         if (unlikely(!data)) {
5735                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5736                 return IRQ_NONE;
5737         }
5738
5739         /* Check if adapter is up */
5740         if (test_bit(STMMAC_DOWN, &priv->state))
5741                 return IRQ_HANDLED;
5742
5743         stmmac_napi_check(priv, chan, DMA_DIR_RX);
5744
5745         return IRQ_HANDLED;
5746 }
5747
5748 #ifdef CONFIG_NET_POLL_CONTROLLER
5749 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5750  * to allow network I/O with interrupts disabled.
5751  */
5752 static void stmmac_poll_controller(struct net_device *dev)
5753 {
5754         struct stmmac_priv *priv = netdev_priv(dev);
5755         int i;
5756
5757         /* If adapter is down, do nothing */
5758         if (test_bit(STMMAC_DOWN, &priv->state))
5759                 return;
5760
5761         if (priv->plat->multi_msi_en) {
5762                 for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5763                         stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
5764
5765                 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5766                         stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
5767         } else {
5768                 disable_irq(dev->irq);
5769                 stmmac_interrupt(dev->irq, dev);
5770                 enable_irq(dev->irq);
5771         }
5772 }
5773 #endif
5774
5775 /**
5776  *  stmmac_ioctl - Entry point for the Ioctl
5777  *  @dev: Device pointer.
5778  *  @rq: An IOCTL specefic structure, that can contain a pointer to
5779  *  a proprietary structure used to pass information to the driver.
5780  *  @cmd: IOCTL command
5781  *  Description:
5782  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5783  */
5784 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5785 {
5786         struct stmmac_priv *priv = netdev_priv (dev);
5787         int ret = -EOPNOTSUPP;
5788
5789         if (!netif_running(dev))
5790                 return -EINVAL;
5791
5792         switch (cmd) {
5793         case SIOCGMIIPHY:
5794         case SIOCGMIIREG:
5795         case SIOCSMIIREG:
5796                 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
5797                 break;
5798         case SIOCSHWTSTAMP:
5799                 ret = stmmac_hwtstamp_set(dev, rq);
5800                 break;
5801         case SIOCGHWTSTAMP:
5802                 ret = stmmac_hwtstamp_get(dev, rq);
5803                 break;
5804         default:
5805                 break;
5806         }
5807
5808         return ret;
5809 }
5810
5811 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5812                                     void *cb_priv)
5813 {
5814         struct stmmac_priv *priv = cb_priv;
5815         int ret = -EOPNOTSUPP;
5816
5817         if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5818                 return ret;
5819
5820         __stmmac_disable_all_queues(priv);
5821
5822         switch (type) {
5823         case TC_SETUP_CLSU32:
5824                 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5825                 break;
5826         case TC_SETUP_CLSFLOWER:
5827                 ret = stmmac_tc_setup_cls(priv, priv, type_data);
5828                 break;
5829         default:
5830                 break;
5831         }
5832
5833         stmmac_enable_all_queues(priv);
5834         return ret;
5835 }
5836
5837 static LIST_HEAD(stmmac_block_cb_list);
5838
5839 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5840                            void *type_data)
5841 {
5842         struct stmmac_priv *priv = netdev_priv(ndev);
5843
5844         switch (type) {
5845         case TC_SETUP_BLOCK:
5846                 return flow_block_cb_setup_simple(type_data,
5847                                                   &stmmac_block_cb_list,
5848                                                   stmmac_setup_tc_block_cb,
5849                                                   priv, priv, true);
5850         case TC_SETUP_QDISC_CBS:
5851                 return stmmac_tc_setup_cbs(priv, priv, type_data);
5852         case TC_SETUP_QDISC_TAPRIO:
5853                 return stmmac_tc_setup_taprio(priv, priv, type_data);
5854         case TC_SETUP_QDISC_ETF:
5855                 return stmmac_tc_setup_etf(priv, priv, type_data);
5856         default:
5857                 return -EOPNOTSUPP;
5858         }
5859 }
5860
5861 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
5862                                struct net_device *sb_dev)
5863 {
5864         int gso = skb_shinfo(skb)->gso_type;
5865
5866         if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
5867                 /*
5868                  * There is no way to determine the number of TSO/USO
5869                  * capable Queues. Let's use always the Queue 0
5870                  * because if TSO/USO is supported then at least this
5871                  * one will be capable.
5872                  */
5873                 return 0;
5874         }
5875
5876         return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
5877 }
5878
5879 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
5880 {
5881         struct stmmac_priv *priv = netdev_priv(ndev);
5882         int ret = 0;
5883
5884         ret = pm_runtime_get_sync(priv->device);
5885         if (ret < 0) {
5886                 pm_runtime_put_noidle(priv->device);
5887                 return ret;
5888         }
5889
5890         ret = eth_mac_addr(ndev, addr);
5891         if (ret)
5892                 goto set_mac_error;
5893
5894         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
5895
5896 set_mac_error:
5897         pm_runtime_put(priv->device);
5898
5899         return ret;
5900 }
5901
5902 #ifdef CONFIG_DEBUG_FS
5903 static struct dentry *stmmac_fs_dir;
5904
5905 static void sysfs_display_ring(void *head, int size, int extend_desc,
5906                                struct seq_file *seq, dma_addr_t dma_phy_addr)
5907 {
5908         int i;
5909         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
5910         struct dma_desc *p = (struct dma_desc *)head;
5911         dma_addr_t dma_addr;
5912
5913         for (i = 0; i < size; i++) {
5914                 if (extend_desc) {
5915                         dma_addr = dma_phy_addr + i * sizeof(*ep);
5916                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5917                                    i, &dma_addr,
5918                                    le32_to_cpu(ep->basic.des0),
5919                                    le32_to_cpu(ep->basic.des1),
5920                                    le32_to_cpu(ep->basic.des2),
5921                                    le32_to_cpu(ep->basic.des3));
5922                         ep++;
5923                 } else {
5924                         dma_addr = dma_phy_addr + i * sizeof(*p);
5925                         seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5926                                    i, &dma_addr,
5927                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5928                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5929                         p++;
5930                 }
5931                 seq_printf(seq, "\n");
5932         }
5933 }
5934
5935 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
5936 {
5937         struct net_device *dev = seq->private;
5938         struct stmmac_priv *priv = netdev_priv(dev);
5939         u32 rx_count = priv->plat->rx_queues_to_use;
5940         u32 tx_count = priv->plat->tx_queues_to_use;
5941         u32 queue;
5942
5943         if ((dev->flags & IFF_UP) == 0)
5944                 return 0;
5945
5946         for (queue = 0; queue < rx_count; queue++) {
5947                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5948
5949                 seq_printf(seq, "RX Queue %d:\n", queue);
5950
5951                 if (priv->extend_desc) {
5952                         seq_printf(seq, "Extended descriptor ring:\n");
5953                         sysfs_display_ring((void *)rx_q->dma_erx,
5954                                            priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
5955                 } else {
5956                         seq_printf(seq, "Descriptor ring:\n");
5957                         sysfs_display_ring((void *)rx_q->dma_rx,
5958                                            priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
5959                 }
5960         }
5961
5962         for (queue = 0; queue < tx_count; queue++) {
5963                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5964
5965                 seq_printf(seq, "TX Queue %d:\n", queue);
5966
5967                 if (priv->extend_desc) {
5968                         seq_printf(seq, "Extended descriptor ring:\n");
5969                         sysfs_display_ring((void *)tx_q->dma_etx,
5970                                            priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
5971                 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
5972                         seq_printf(seq, "Descriptor ring:\n");
5973                         sysfs_display_ring((void *)tx_q->dma_tx,
5974                                            priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
5975                 }
5976         }
5977
5978         return 0;
5979 }
5980 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
5981
5982 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
5983 {
5984         struct net_device *dev = seq->private;
5985         struct stmmac_priv *priv = netdev_priv(dev);
5986
5987         if (!priv->hw_cap_support) {
5988                 seq_printf(seq, "DMA HW features not supported\n");
5989                 return 0;
5990         }
5991
5992         seq_printf(seq, "==============================\n");
5993         seq_printf(seq, "\tDMA HW features\n");
5994         seq_printf(seq, "==============================\n");
5995
5996         seq_printf(seq, "\t10/100 Mbps: %s\n",
5997                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
5998         seq_printf(seq, "\t1000 Mbps: %s\n",
5999                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
6000         seq_printf(seq, "\tHalf duplex: %s\n",
6001                    (priv->dma_cap.half_duplex) ? "Y" : "N");
6002         seq_printf(seq, "\tHash Filter: %s\n",
6003                    (priv->dma_cap.hash_filter) ? "Y" : "N");
6004         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6005                    (priv->dma_cap.multi_addr) ? "Y" : "N");
6006         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6007                    (priv->dma_cap.pcs) ? "Y" : "N");
6008         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6009                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
6010         seq_printf(seq, "\tPMT Remote wake up: %s\n",
6011                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6012         seq_printf(seq, "\tPMT Magic Frame: %s\n",
6013                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6014         seq_printf(seq, "\tRMON module: %s\n",
6015                    (priv->dma_cap.rmon) ? "Y" : "N");
6016         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6017                    (priv->dma_cap.time_stamp) ? "Y" : "N");
6018         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6019                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
6020         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6021                    (priv->dma_cap.eee) ? "Y" : "N");
6022         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6023         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6024                    (priv->dma_cap.tx_coe) ? "Y" : "N");
6025         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6026                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6027                            (priv->dma_cap.rx_coe) ? "Y" : "N");
6028         } else {
6029                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6030                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6031                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6032                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6033         }
6034         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6035                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6036         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6037                    priv->dma_cap.number_rx_channel);
6038         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6039                    priv->dma_cap.number_tx_channel);
6040         seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6041                    priv->dma_cap.number_rx_queues);
6042         seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6043                    priv->dma_cap.number_tx_queues);
6044         seq_printf(seq, "\tEnhanced descriptors: %s\n",
6045                    (priv->dma_cap.enh_desc) ? "Y" : "N");
6046         seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6047         seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6048         seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6049         seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6050         seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6051                    priv->dma_cap.pps_out_num);
6052         seq_printf(seq, "\tSafety Features: %s\n",
6053                    priv->dma_cap.asp ? "Y" : "N");
6054         seq_printf(seq, "\tFlexible RX Parser: %s\n",
6055                    priv->dma_cap.frpsel ? "Y" : "N");
6056         seq_printf(seq, "\tEnhanced Addressing: %d\n",
6057                    priv->dma_cap.addr64);
6058         seq_printf(seq, "\tReceive Side Scaling: %s\n",
6059                    priv->dma_cap.rssen ? "Y" : "N");
6060         seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6061                    priv->dma_cap.vlhash ? "Y" : "N");
6062         seq_printf(seq, "\tSplit Header: %s\n",
6063                    priv->dma_cap.sphen ? "Y" : "N");
6064         seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6065                    priv->dma_cap.vlins ? "Y" : "N");
6066         seq_printf(seq, "\tDouble VLAN: %s\n",
6067                    priv->dma_cap.dvlan ? "Y" : "N");
6068         seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6069                    priv->dma_cap.l3l4fnum);
6070         seq_printf(seq, "\tARP Offloading: %s\n",
6071                    priv->dma_cap.arpoffsel ? "Y" : "N");
6072         seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6073                    priv->dma_cap.estsel ? "Y" : "N");
6074         seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6075                    priv->dma_cap.fpesel ? "Y" : "N");
6076         seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6077                    priv->dma_cap.tbssel ? "Y" : "N");
6078         return 0;
6079 }
6080 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6081
6082 /* Use network device events to rename debugfs file entries.
6083  */
6084 static int stmmac_device_event(struct notifier_block *unused,
6085                                unsigned long event, void *ptr)
6086 {
6087         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6088         struct stmmac_priv *priv = netdev_priv(dev);
6089
6090         if (dev->netdev_ops != &stmmac_netdev_ops)
6091                 goto done;
6092
6093         switch (event) {
6094         case NETDEV_CHANGENAME:
6095                 if (priv->dbgfs_dir)
6096                         priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6097                                                          priv->dbgfs_dir,
6098                                                          stmmac_fs_dir,
6099                                                          dev->name);
6100                 break;
6101         }
6102 done:
6103         return NOTIFY_DONE;
6104 }
6105
6106 static struct notifier_block stmmac_notifier = {
6107         .notifier_call = stmmac_device_event,
6108 };
6109
6110 static void stmmac_init_fs(struct net_device *dev)
6111 {
6112         struct stmmac_priv *priv = netdev_priv(dev);
6113
6114         rtnl_lock();
6115
6116         /* Create per netdev entries */
6117         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6118
6119         /* Entry to report DMA RX/TX rings */
6120         debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6121                             &stmmac_rings_status_fops);
6122
6123         /* Entry to report the DMA HW features */
6124         debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6125                             &stmmac_dma_cap_fops);
6126
6127         rtnl_unlock();
6128 }
6129
6130 static void stmmac_exit_fs(struct net_device *dev)
6131 {
6132         struct stmmac_priv *priv = netdev_priv(dev);
6133
6134         debugfs_remove_recursive(priv->dbgfs_dir);
6135 }
6136 #endif /* CONFIG_DEBUG_FS */
6137
6138 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6139 {
6140         unsigned char *data = (unsigned char *)&vid_le;
6141         unsigned char data_byte = 0;
6142         u32 crc = ~0x0;
6143         u32 temp = 0;
6144         int i, bits;
6145
6146         bits = get_bitmask_order(VLAN_VID_MASK);
6147         for (i = 0; i < bits; i++) {
6148                 if ((i % 8) == 0)
6149                         data_byte = data[i / 8];
6150
6151                 temp = ((crc & 1) ^ data_byte) & 1;
6152                 crc >>= 1;
6153                 data_byte >>= 1;
6154
6155                 if (temp)
6156                         crc ^= 0xedb88320;
6157         }
6158
6159         return crc;
6160 }
6161
6162 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6163 {
6164         u32 crc, hash = 0;
6165         __le16 pmatch = 0;
6166         int count = 0;
6167         u16 vid = 0;
6168
6169         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6170                 __le16 vid_le = cpu_to_le16(vid);
6171                 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6172                 hash |= (1 << crc);
6173                 count++;
6174         }
6175
6176         if (!priv->dma_cap.vlhash) {
6177                 if (count > 2) /* VID = 0 always passes filter */
6178                         return -EOPNOTSUPP;
6179
6180                 pmatch = cpu_to_le16(vid);
6181                 hash = 0;
6182         }
6183
6184         return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6185 }
6186
6187 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6188 {
6189         struct stmmac_priv *priv = netdev_priv(ndev);
6190         bool is_double = false;
6191         int ret;
6192
6193         if (be16_to_cpu(proto) == ETH_P_8021AD)
6194                 is_double = true;
6195
6196         set_bit(vid, priv->active_vlans);
6197         ret = stmmac_vlan_update(priv, is_double);
6198         if (ret) {
6199                 clear_bit(vid, priv->active_vlans);
6200                 return ret;
6201         }
6202
6203         if (priv->hw->num_vlan) {
6204                 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6205                 if (ret)
6206                         return ret;
6207         }
6208
6209         return 0;
6210 }
6211
6212 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6213 {
6214         struct stmmac_priv *priv = netdev_priv(ndev);
6215         bool is_double = false;
6216         int ret;
6217
6218         ret = pm_runtime_get_sync(priv->device);
6219         if (ret < 0) {
6220                 pm_runtime_put_noidle(priv->device);
6221                 return ret;
6222         }
6223
6224         if (be16_to_cpu(proto) == ETH_P_8021AD)
6225                 is_double = true;
6226
6227         clear_bit(vid, priv->active_vlans);
6228
6229         if (priv->hw->num_vlan) {
6230                 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6231                 if (ret)
6232                         goto del_vlan_error;
6233         }
6234
6235         ret = stmmac_vlan_update(priv, is_double);
6236
6237 del_vlan_error:
6238         pm_runtime_put(priv->device);
6239
6240         return ret;
6241 }
6242
6243 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6244 {
6245         struct stmmac_priv *priv = netdev_priv(dev);
6246
6247         switch (bpf->command) {
6248         case XDP_SETUP_PROG:
6249                 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6250         case XDP_SETUP_XSK_POOL:
6251                 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6252                                              bpf->xsk.queue_id);
6253         default:
6254                 return -EOPNOTSUPP;
6255         }
6256 }
6257
6258 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6259                            struct xdp_frame **frames, u32 flags)
6260 {
6261         struct stmmac_priv *priv = netdev_priv(dev);
6262         int cpu = smp_processor_id();
6263         struct netdev_queue *nq;
6264         int i, nxmit = 0;
6265         int queue;
6266
6267         if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6268                 return -ENETDOWN;
6269
6270         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6271                 return -EINVAL;
6272
6273         queue = stmmac_xdp_get_tx_queue(priv, cpu);
6274         nq = netdev_get_tx_queue(priv->dev, queue);
6275
6276         __netif_tx_lock(nq, cpu);
6277         /* Avoids TX time-out as we are sharing with slow path */
6278         txq_trans_cond_update(nq);
6279
6280         for (i = 0; i < num_frames; i++) {
6281                 int res;
6282
6283                 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6284                 if (res == STMMAC_XDP_CONSUMED)
6285                         break;
6286
6287                 nxmit++;
6288         }
6289
6290         if (flags & XDP_XMIT_FLUSH) {
6291                 stmmac_flush_tx_descriptors(priv, queue);
6292                 stmmac_tx_timer_arm(priv, queue);
6293         }
6294
6295         __netif_tx_unlock(nq);
6296
6297         return nxmit;
6298 }
6299
6300 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6301 {
6302         struct stmmac_channel *ch = &priv->channel[queue];
6303         unsigned long flags;
6304
6305         spin_lock_irqsave(&ch->lock, flags);
6306         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6307         spin_unlock_irqrestore(&ch->lock, flags);
6308
6309         stmmac_stop_rx_dma(priv, queue);
6310         __free_dma_rx_desc_resources(priv, queue);
6311 }
6312
6313 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6314 {
6315         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6316         struct stmmac_channel *ch = &priv->channel[queue];
6317         unsigned long flags;
6318         u32 buf_size;
6319         int ret;
6320
6321         ret = __alloc_dma_rx_desc_resources(priv, queue);
6322         if (ret) {
6323                 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6324                 return;
6325         }
6326
6327         ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
6328         if (ret) {
6329                 __free_dma_rx_desc_resources(priv, queue);
6330                 netdev_err(priv->dev, "Failed to init RX desc.\n");
6331                 return;
6332         }
6333
6334         stmmac_clear_rx_descriptors(priv, queue);
6335
6336         stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6337                             rx_q->dma_rx_phy, rx_q->queue_index);
6338
6339         rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6340                              sizeof(struct dma_desc));
6341         stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6342                                rx_q->rx_tail_addr, rx_q->queue_index);
6343
6344         if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6345                 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6346                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6347                                       buf_size,
6348                                       rx_q->queue_index);
6349         } else {
6350                 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6351                                       priv->dma_buf_sz,
6352                                       rx_q->queue_index);
6353         }
6354
6355         stmmac_start_rx_dma(priv, queue);
6356
6357         spin_lock_irqsave(&ch->lock, flags);
6358         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6359         spin_unlock_irqrestore(&ch->lock, flags);
6360 }
6361
6362 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6363 {
6364         struct stmmac_channel *ch = &priv->channel[queue];
6365         unsigned long flags;
6366
6367         spin_lock_irqsave(&ch->lock, flags);
6368         stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6369         spin_unlock_irqrestore(&ch->lock, flags);
6370
6371         stmmac_stop_tx_dma(priv, queue);
6372         __free_dma_tx_desc_resources(priv, queue);
6373 }
6374
6375 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6376 {
6377         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6378         struct stmmac_channel *ch = &priv->channel[queue];
6379         unsigned long flags;
6380         int ret;
6381
6382         ret = __alloc_dma_tx_desc_resources(priv, queue);
6383         if (ret) {
6384                 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6385                 return;
6386         }
6387
6388         ret = __init_dma_tx_desc_rings(priv, queue);
6389         if (ret) {
6390                 __free_dma_tx_desc_resources(priv, queue);
6391                 netdev_err(priv->dev, "Failed to init TX desc.\n");
6392                 return;
6393         }
6394
6395         stmmac_clear_tx_descriptors(priv, queue);
6396
6397         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6398                             tx_q->dma_tx_phy, tx_q->queue_index);
6399
6400         if (tx_q->tbs & STMMAC_TBS_AVAIL)
6401                 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6402
6403         tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6404         stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6405                                tx_q->tx_tail_addr, tx_q->queue_index);
6406
6407         stmmac_start_tx_dma(priv, queue);
6408
6409         spin_lock_irqsave(&ch->lock, flags);
6410         stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6411         spin_unlock_irqrestore(&ch->lock, flags);
6412 }
6413
6414 void stmmac_xdp_release(struct net_device *dev)
6415 {
6416         struct stmmac_priv *priv = netdev_priv(dev);
6417         u32 chan;
6418
6419         /* Disable NAPI process */
6420         stmmac_disable_all_queues(priv);
6421
6422         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6423                 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
6424
6425         /* Free the IRQ lines */
6426         stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6427
6428         /* Stop TX/RX DMA channels */
6429         stmmac_stop_all_dma(priv);
6430
6431         /* Release and free the Rx/Tx resources */
6432         free_dma_desc_resources(priv);
6433
6434         /* Disable the MAC Rx/Tx */
6435         stmmac_mac_set(priv, priv->ioaddr, false);
6436
6437         /* set trans_start so we don't get spurious
6438          * watchdogs during reset
6439          */
6440         netif_trans_update(dev);
6441         netif_carrier_off(dev);
6442 }
6443
6444 int stmmac_xdp_open(struct net_device *dev)
6445 {
6446         struct stmmac_priv *priv = netdev_priv(dev);
6447         u32 rx_cnt = priv->plat->rx_queues_to_use;
6448         u32 tx_cnt = priv->plat->tx_queues_to_use;
6449         u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6450         struct stmmac_rx_queue *rx_q;
6451         struct stmmac_tx_queue *tx_q;
6452         u32 buf_size;
6453         bool sph_en;
6454         u32 chan;
6455         int ret;
6456
6457         ret = alloc_dma_desc_resources(priv);
6458         if (ret < 0) {
6459                 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6460                            __func__);
6461                 goto dma_desc_error;
6462         }
6463
6464         ret = init_dma_desc_rings(dev, GFP_KERNEL);
6465         if (ret < 0) {
6466                 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6467                            __func__);
6468                 goto init_error;
6469         }
6470
6471         /* DMA CSR Channel configuration */
6472         for (chan = 0; chan < dma_csr_ch; chan++)
6473                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6474
6475         /* Adjust Split header */
6476         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6477
6478         /* DMA RX Channel Configuration */
6479         for (chan = 0; chan < rx_cnt; chan++) {
6480                 rx_q = &priv->rx_queue[chan];
6481
6482                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6483                                     rx_q->dma_rx_phy, chan);
6484
6485                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6486                                      (rx_q->buf_alloc_num *
6487                                       sizeof(struct dma_desc));
6488                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6489                                        rx_q->rx_tail_addr, chan);
6490
6491                 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6492                         buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6493                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6494                                               buf_size,
6495                                               rx_q->queue_index);
6496                 } else {
6497                         stmmac_set_dma_bfsize(priv, priv->ioaddr,
6498                                               priv->dma_buf_sz,
6499                                               rx_q->queue_index);
6500                 }
6501
6502                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6503         }
6504
6505         /* DMA TX Channel Configuration */
6506         for (chan = 0; chan < tx_cnt; chan++) {
6507                 tx_q = &priv->tx_queue[chan];
6508
6509                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6510                                     tx_q->dma_tx_phy, chan);
6511
6512                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6513                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6514                                        tx_q->tx_tail_addr, chan);
6515
6516                 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6517                 tx_q->txtimer.function = stmmac_tx_timer;
6518         }
6519
6520         /* Enable the MAC Rx/Tx */
6521         stmmac_mac_set(priv, priv->ioaddr, true);
6522
6523         /* Start Rx & Tx DMA Channels */
6524         stmmac_start_all_dma(priv);
6525
6526         ret = stmmac_request_irq(dev);
6527         if (ret)
6528                 goto irq_error;
6529
6530         /* Enable NAPI process*/
6531         stmmac_enable_all_queues(priv);
6532         netif_carrier_on(dev);
6533         netif_tx_start_all_queues(dev);
6534
6535         return 0;
6536
6537 irq_error:
6538         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6539                 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
6540
6541         stmmac_hw_teardown(dev);
6542 init_error:
6543         free_dma_desc_resources(priv);
6544 dma_desc_error:
6545         return ret;
6546 }
6547
6548 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6549 {
6550         struct stmmac_priv *priv = netdev_priv(dev);
6551         struct stmmac_rx_queue *rx_q;
6552         struct stmmac_tx_queue *tx_q;
6553         struct stmmac_channel *ch;
6554
6555         if (test_bit(STMMAC_DOWN, &priv->state) ||
6556             !netif_carrier_ok(priv->dev))
6557                 return -ENETDOWN;
6558
6559         if (!stmmac_xdp_is_enabled(priv))
6560                 return -ENXIO;
6561
6562         if (queue >= priv->plat->rx_queues_to_use ||
6563             queue >= priv->plat->tx_queues_to_use)
6564                 return -EINVAL;
6565
6566         rx_q = &priv->rx_queue[queue];
6567         tx_q = &priv->tx_queue[queue];
6568         ch = &priv->channel[queue];
6569
6570         if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6571                 return -ENXIO;
6572
6573         if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6574                 /* EQoS does not have per-DMA channel SW interrupt,
6575                  * so we schedule RX Napi straight-away.
6576                  */
6577                 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6578                         __napi_schedule(&ch->rxtx_napi);
6579         }
6580
6581         return 0;
6582 }
6583
6584 static const struct net_device_ops stmmac_netdev_ops = {
6585         .ndo_open = stmmac_open,
6586         .ndo_start_xmit = stmmac_xmit,
6587         .ndo_stop = stmmac_release,
6588         .ndo_change_mtu = stmmac_change_mtu,
6589         .ndo_fix_features = stmmac_fix_features,
6590         .ndo_set_features = stmmac_set_features,
6591         .ndo_set_rx_mode = stmmac_set_rx_mode,
6592         .ndo_tx_timeout = stmmac_tx_timeout,
6593         .ndo_eth_ioctl = stmmac_ioctl,
6594         .ndo_setup_tc = stmmac_setup_tc,
6595         .ndo_select_queue = stmmac_select_queue,
6596 #ifdef CONFIG_NET_POLL_CONTROLLER
6597         .ndo_poll_controller = stmmac_poll_controller,
6598 #endif
6599         .ndo_set_mac_address = stmmac_set_mac_address,
6600         .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6601         .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
6602         .ndo_bpf = stmmac_bpf,
6603         .ndo_xdp_xmit = stmmac_xdp_xmit,
6604         .ndo_xsk_wakeup = stmmac_xsk_wakeup,
6605 };
6606
6607 static void stmmac_reset_subtask(struct stmmac_priv *priv)
6608 {
6609         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6610                 return;
6611         if (test_bit(STMMAC_DOWN, &priv->state))
6612                 return;
6613
6614         netdev_err(priv->dev, "Reset adapter.\n");
6615
6616         rtnl_lock();
6617         netif_trans_update(priv->dev);
6618         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6619                 usleep_range(1000, 2000);
6620
6621         set_bit(STMMAC_DOWN, &priv->state);
6622         dev_close(priv->dev);
6623         dev_open(priv->dev, NULL);
6624         clear_bit(STMMAC_DOWN, &priv->state);
6625         clear_bit(STMMAC_RESETING, &priv->state);
6626         rtnl_unlock();
6627 }
6628
6629 static void stmmac_service_task(struct work_struct *work)
6630 {
6631         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6632                         service_task);
6633
6634         stmmac_reset_subtask(priv);
6635         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6636 }
6637
6638 /**
6639  *  stmmac_hw_init - Init the MAC device
6640  *  @priv: driver private structure
6641  *  Description: this function is to configure the MAC device according to
6642  *  some platform parameters or the HW capability register. It prepares the
6643  *  driver to use either ring or chain modes and to setup either enhanced or
6644  *  normal descriptors.
6645  */
6646 static int stmmac_hw_init(struct stmmac_priv *priv)
6647 {
6648         int ret;
6649
6650         /* dwmac-sun8i only work in chain mode */
6651         if (priv->plat->has_sun8i)
6652                 chain_mode = 1;
6653         priv->chain_mode = chain_mode;
6654
6655         /* Initialize HW Interface */
6656         ret = stmmac_hwif_init(priv);
6657         if (ret)
6658                 return ret;
6659
6660         /* Get the HW capability (new GMAC newer than 3.50a) */
6661         priv->hw_cap_support = stmmac_get_hw_features(priv);
6662         if (priv->hw_cap_support) {
6663                 dev_info(priv->device, "DMA HW capability register supported\n");
6664
6665                 /* We can override some gmac/dma configuration fields: e.g.
6666                  * enh_desc, tx_coe (e.g. that are passed through the
6667                  * platform) with the values from the HW capability
6668                  * register (if supported).
6669                  */
6670                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
6671                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6672                                 !priv->plat->use_phy_wol;
6673                 priv->hw->pmt = priv->plat->pmt;
6674                 if (priv->dma_cap.hash_tb_sz) {
6675                         priv->hw->multicast_filter_bins =
6676                                         (BIT(priv->dma_cap.hash_tb_sz) << 5);
6677                         priv->hw->mcast_bits_log2 =
6678                                         ilog2(priv->hw->multicast_filter_bins);
6679                 }
6680
6681                 /* TXCOE doesn't work in thresh DMA mode */
6682                 if (priv->plat->force_thresh_dma_mode)
6683                         priv->plat->tx_coe = 0;
6684                 else
6685                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
6686
6687                 /* In case of GMAC4 rx_coe is from HW cap register. */
6688                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
6689
6690                 if (priv->dma_cap.rx_coe_type2)
6691                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6692                 else if (priv->dma_cap.rx_coe_type1)
6693                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6694
6695         } else {
6696                 dev_info(priv->device, "No HW DMA feature register supported\n");
6697         }
6698
6699         if (priv->plat->rx_coe) {
6700                 priv->hw->rx_csum = priv->plat->rx_coe;
6701                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
6702                 if (priv->synopsys_id < DWMAC_CORE_4_00)
6703                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
6704         }
6705         if (priv->plat->tx_coe)
6706                 dev_info(priv->device, "TX Checksum insertion supported\n");
6707
6708         if (priv->plat->pmt) {
6709                 dev_info(priv->device, "Wake-Up On Lan supported\n");
6710                 device_set_wakeup_capable(priv->device, 1);
6711         }
6712
6713         if (priv->dma_cap.tsoen)
6714                 dev_info(priv->device, "TSO supported\n");
6715
6716         priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6717         priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6718
6719         /* Run HW quirks, if any */
6720         if (priv->hwif_quirks) {
6721                 ret = priv->hwif_quirks(priv);
6722                 if (ret)
6723                         return ret;
6724         }
6725
6726         /* Rx Watchdog is available in the COREs newer than the 3.40.
6727          * In some case, for example on bugged HW this feature
6728          * has to be disable and this can be done by passing the
6729          * riwt_off field from the platform.
6730          */
6731         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6732             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6733                 priv->use_riwt = 1;
6734                 dev_info(priv->device,
6735                          "Enable RX Mitigation via HW Watchdog Timer\n");
6736         }
6737
6738         return 0;
6739 }
6740
6741 static void stmmac_napi_add(struct net_device *dev)
6742 {
6743         struct stmmac_priv *priv = netdev_priv(dev);
6744         u32 queue, maxq;
6745
6746         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6747
6748         for (queue = 0; queue < maxq; queue++) {
6749                 struct stmmac_channel *ch = &priv->channel[queue];
6750
6751                 ch->priv_data = priv;
6752                 ch->index = queue;
6753                 spin_lock_init(&ch->lock);
6754
6755                 if (queue < priv->plat->rx_queues_to_use) {
6756                         netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
6757                                        NAPI_POLL_WEIGHT);
6758                 }
6759                 if (queue < priv->plat->tx_queues_to_use) {
6760                         netif_tx_napi_add(dev, &ch->tx_napi,
6761                                           stmmac_napi_poll_tx,
6762                                           NAPI_POLL_WEIGHT);
6763                 }
6764                 if (queue < priv->plat->rx_queues_to_use &&
6765                     queue < priv->plat->tx_queues_to_use) {
6766                         netif_napi_add(dev, &ch->rxtx_napi,
6767                                        stmmac_napi_poll_rxtx,
6768                                        NAPI_POLL_WEIGHT);
6769                 }
6770         }
6771 }
6772
6773 static void stmmac_napi_del(struct net_device *dev)
6774 {
6775         struct stmmac_priv *priv = netdev_priv(dev);
6776         u32 queue, maxq;
6777
6778         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6779
6780         for (queue = 0; queue < maxq; queue++) {
6781                 struct stmmac_channel *ch = &priv->channel[queue];
6782
6783                 if (queue < priv->plat->rx_queues_to_use)
6784                         netif_napi_del(&ch->rx_napi);
6785                 if (queue < priv->plat->tx_queues_to_use)
6786                         netif_napi_del(&ch->tx_napi);
6787                 if (queue < priv->plat->rx_queues_to_use &&
6788                     queue < priv->plat->tx_queues_to_use) {
6789                         netif_napi_del(&ch->rxtx_napi);
6790                 }
6791         }
6792 }
6793
6794 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6795 {
6796         struct stmmac_priv *priv = netdev_priv(dev);
6797         int ret = 0;
6798
6799         if (netif_running(dev))
6800                 stmmac_release(dev);
6801
6802         stmmac_napi_del(dev);
6803
6804         priv->plat->rx_queues_to_use = rx_cnt;
6805         priv->plat->tx_queues_to_use = tx_cnt;
6806
6807         stmmac_napi_add(dev);
6808
6809         if (netif_running(dev))
6810                 ret = stmmac_open(dev);
6811
6812         return ret;
6813 }
6814
6815 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6816 {
6817         struct stmmac_priv *priv = netdev_priv(dev);
6818         int ret = 0;
6819
6820         if (netif_running(dev))
6821                 stmmac_release(dev);
6822
6823         priv->dma_rx_size = rx_size;
6824         priv->dma_tx_size = tx_size;
6825
6826         if (netif_running(dev))
6827                 ret = stmmac_open(dev);
6828
6829         return ret;
6830 }
6831
6832 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6833 static void stmmac_fpe_lp_task(struct work_struct *work)
6834 {
6835         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6836                                                 fpe_task);
6837         struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6838         enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6839         enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6840         bool *hs_enable = &fpe_cfg->hs_enable;
6841         bool *enable = &fpe_cfg->enable;
6842         int retries = 20;
6843
6844         while (retries-- > 0) {
6845                 /* Bail out immediately if FPE handshake is OFF */
6846                 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6847                         break;
6848
6849                 if (*lo_state == FPE_STATE_ENTERING_ON &&
6850                     *lp_state == FPE_STATE_ENTERING_ON) {
6851                         stmmac_fpe_configure(priv, priv->ioaddr,
6852                                              priv->plat->tx_queues_to_use,
6853                                              priv->plat->rx_queues_to_use,
6854                                              *enable);
6855
6856                         netdev_info(priv->dev, "configured FPE\n");
6857
6858                         *lo_state = FPE_STATE_ON;
6859                         *lp_state = FPE_STATE_ON;
6860                         netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
6861                         break;
6862                 }
6863
6864                 if ((*lo_state == FPE_STATE_CAPABLE ||
6865                      *lo_state == FPE_STATE_ENTERING_ON) &&
6866                      *lp_state != FPE_STATE_ON) {
6867                         netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
6868                                     *lo_state, *lp_state);
6869                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6870                                                 MPACKET_VERIFY);
6871                 }
6872                 /* Sleep then retry */
6873                 msleep(500);
6874         }
6875
6876         clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
6877 }
6878
6879 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
6880 {
6881         if (priv->plat->fpe_cfg->hs_enable != enable) {
6882                 if (enable) {
6883                         stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6884                                                 MPACKET_VERIFY);
6885                 } else {
6886                         priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
6887                         priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
6888                 }
6889
6890                 priv->plat->fpe_cfg->hs_enable = enable;
6891         }
6892 }
6893
6894 /**
6895  * stmmac_dvr_probe
6896  * @device: device pointer
6897  * @plat_dat: platform data pointer
6898  * @res: stmmac resource pointer
6899  * Description: this is the main probe function used to
6900  * call the alloc_etherdev, allocate the priv structure.
6901  * Return:
6902  * returns 0 on success, otherwise errno.
6903  */
6904 int stmmac_dvr_probe(struct device *device,
6905                      struct plat_stmmacenet_data *plat_dat,
6906                      struct stmmac_resources *res)
6907 {
6908         struct net_device *ndev = NULL;
6909         struct stmmac_priv *priv;
6910         u32 rxq;
6911         int i, ret = 0;
6912
6913         ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
6914                                        MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
6915         if (!ndev)
6916                 return -ENOMEM;
6917
6918         SET_NETDEV_DEV(ndev, device);
6919
6920         priv = netdev_priv(ndev);
6921         priv->device = device;
6922         priv->dev = ndev;
6923
6924         stmmac_set_ethtool_ops(ndev);
6925         priv->pause = pause;
6926         priv->plat = plat_dat;
6927         priv->ioaddr = res->addr;
6928         priv->dev->base_addr = (unsigned long)res->addr;
6929         priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
6930
6931         priv->dev->irq = res->irq;
6932         priv->wol_irq = res->wol_irq;
6933         priv->lpi_irq = res->lpi_irq;
6934         priv->sfty_ce_irq = res->sfty_ce_irq;
6935         priv->sfty_ue_irq = res->sfty_ue_irq;
6936         for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
6937                 priv->rx_irq[i] = res->rx_irq[i];
6938         for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
6939                 priv->tx_irq[i] = res->tx_irq[i];
6940
6941         if (!is_zero_ether_addr(res->mac))
6942                 eth_hw_addr_set(priv->dev, res->mac);
6943
6944         dev_set_drvdata(device, priv->dev);
6945
6946         /* Verify driver arguments */
6947         stmmac_verify_args();
6948
6949         priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
6950         if (!priv->af_xdp_zc_qps)
6951                 return -ENOMEM;
6952
6953         /* Allocate workqueue */
6954         priv->wq = create_singlethread_workqueue("stmmac_wq");
6955         if (!priv->wq) {
6956                 dev_err(priv->device, "failed to create workqueue\n");
6957                 return -ENOMEM;
6958         }
6959
6960         INIT_WORK(&priv->service_task, stmmac_service_task);
6961
6962         /* Initialize Link Partner FPE workqueue */
6963         INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
6964
6965         /* Override with kernel parameters if supplied XXX CRS XXX
6966          * this needs to have multiple instances
6967          */
6968         if ((phyaddr >= 0) && (phyaddr <= 31))
6969                 priv->plat->phy_addr = phyaddr;
6970
6971         if (priv->plat->stmmac_rst) {
6972                 ret = reset_control_assert(priv->plat->stmmac_rst);
6973                 reset_control_deassert(priv->plat->stmmac_rst);
6974                 /* Some reset controllers have only reset callback instead of
6975                  * assert + deassert callbacks pair.
6976                  */
6977                 if (ret == -ENOTSUPP)
6978                         reset_control_reset(priv->plat->stmmac_rst);
6979         }
6980
6981         ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
6982         if (ret == -ENOTSUPP)
6983                 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
6984                         ERR_PTR(ret));
6985
6986         /* Init MAC and get the capabilities */
6987         ret = stmmac_hw_init(priv);
6988         if (ret)
6989                 goto error_hw_init;
6990
6991         /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
6992          */
6993         if (priv->synopsys_id < DWMAC_CORE_5_20)
6994                 priv->plat->dma_cfg->dche = false;
6995
6996         stmmac_check_ether_addr(priv);
6997
6998         ndev->netdev_ops = &stmmac_netdev_ops;
6999
7000         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7001                             NETIF_F_RXCSUM;
7002
7003         ret = stmmac_tc_init(priv, priv);
7004         if (!ret) {
7005                 ndev->hw_features |= NETIF_F_HW_TC;
7006         }
7007
7008         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
7009                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7010                 if (priv->plat->has_gmac4)
7011                         ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7012                 priv->tso = true;
7013                 dev_info(priv->device, "TSO feature enabled\n");
7014         }
7015
7016         if (priv->dma_cap.sphen) {
7017                 ndev->hw_features |= NETIF_F_GRO;
7018                 priv->sph_cap = true;
7019                 priv->sph = priv->sph_cap;
7020                 dev_info(priv->device, "SPH feature enabled\n");
7021         }
7022
7023         /* The current IP register MAC_HW_Feature1[ADDR64] only define
7024          * 32/40/64 bit width, but some SOC support others like i.MX8MP
7025          * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
7026          * So overwrite dma_cap.addr64 according to HW real design.
7027          */
7028         if (priv->plat->addr64)
7029                 priv->dma_cap.addr64 = priv->plat->addr64;
7030
7031         if (priv->dma_cap.addr64) {
7032                 ret = dma_set_mask_and_coherent(device,
7033                                 DMA_BIT_MASK(priv->dma_cap.addr64));
7034                 if (!ret) {
7035                         dev_info(priv->device, "Using %d bits DMA width\n",
7036                                  priv->dma_cap.addr64);
7037
7038                         /*
7039                          * If more than 32 bits can be addressed, make sure to
7040                          * enable enhanced addressing mode.
7041                          */
7042                         if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7043                                 priv->plat->dma_cfg->eame = true;
7044                 } else {
7045                         ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7046                         if (ret) {
7047                                 dev_err(priv->device, "Failed to set DMA Mask\n");
7048                                 goto error_hw_init;
7049                         }
7050
7051                         priv->dma_cap.addr64 = 32;
7052                 }
7053         }
7054
7055         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7056         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7057 #ifdef STMMAC_VLAN_TAG_USED
7058         /* Both mac100 and gmac support receive VLAN tag detection */
7059         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7060         if (priv->dma_cap.vlhash) {
7061                 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7062                 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7063         }
7064         if (priv->dma_cap.vlins) {
7065                 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7066                 if (priv->dma_cap.dvlan)
7067                         ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7068         }
7069 #endif
7070         priv->msg_enable = netif_msg_init(debug, default_msg_level);
7071
7072         /* Initialize RSS */
7073         rxq = priv->plat->rx_queues_to_use;
7074         netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7075         for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7076                 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7077
7078         if (priv->dma_cap.rssen && priv->plat->rss_en)
7079                 ndev->features |= NETIF_F_RXHASH;
7080
7081         /* MTU range: 46 - hw-specific max */
7082         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7083         if (priv->plat->has_xgmac)
7084                 ndev->max_mtu = XGMAC_JUMBO_LEN;
7085         else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7086                 ndev->max_mtu = JUMBO_LEN;
7087         else
7088                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7089         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7090          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7091          */
7092         if ((priv->plat->maxmtu < ndev->max_mtu) &&
7093             (priv->plat->maxmtu >= ndev->min_mtu))
7094                 ndev->max_mtu = priv->plat->maxmtu;
7095         else if (priv->plat->maxmtu < ndev->min_mtu)
7096                 dev_warn(priv->device,
7097                          "%s: warning: maxmtu having invalid value (%d)\n",
7098                          __func__, priv->plat->maxmtu);
7099
7100         if (flow_ctrl)
7101                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
7102
7103         /* Setup channels NAPI */
7104         stmmac_napi_add(ndev);
7105
7106         mutex_init(&priv->lock);
7107
7108         /* If a specific clk_csr value is passed from the platform
7109          * this means that the CSR Clock Range selection cannot be
7110          * changed at run-time and it is fixed. Viceversa the driver'll try to
7111          * set the MDC clock dynamically according to the csr actual
7112          * clock input.
7113          */
7114         if (priv->plat->clk_csr >= 0)
7115                 priv->clk_csr = priv->plat->clk_csr;
7116         else
7117                 stmmac_clk_csr_set(priv);
7118
7119         stmmac_check_pcs_mode(priv);
7120
7121         pm_runtime_get_noresume(device);
7122         pm_runtime_set_active(device);
7123         if (!pm_runtime_enabled(device))
7124                 pm_runtime_enable(device);
7125
7126         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7127             priv->hw->pcs != STMMAC_PCS_RTBI) {
7128                 /* MDIO bus Registration */
7129                 ret = stmmac_mdio_register(ndev);
7130                 if (ret < 0) {
7131                         dev_err(priv->device,
7132                                 "%s: MDIO bus (id: %d) registration failed",
7133                                 __func__, priv->plat->bus_id);
7134                         goto error_mdio_register;
7135                 }
7136         }
7137
7138         if (priv->plat->speed_mode_2500)
7139                 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7140
7141         if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7142                 ret = stmmac_xpcs_setup(priv->mii);
7143                 if (ret)
7144                         goto error_xpcs_setup;
7145         }
7146
7147         ret = stmmac_phy_setup(priv);
7148         if (ret) {
7149                 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7150                 goto error_phy_setup;
7151         }
7152
7153         ret = register_netdev(ndev);
7154         if (ret) {
7155                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7156                         __func__, ret);
7157                 goto error_netdev_register;
7158         }
7159
7160         if (priv->plat->serdes_powerup) {
7161                 ret = priv->plat->serdes_powerup(ndev,
7162                                                  priv->plat->bsp_priv);
7163
7164                 if (ret < 0)
7165                         goto error_serdes_powerup;
7166         }
7167
7168 #ifdef CONFIG_DEBUG_FS
7169         stmmac_init_fs(ndev);
7170 #endif
7171
7172         if (priv->plat->dump_debug_regs)
7173                 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7174
7175         /* Let pm_runtime_put() disable the clocks.
7176          * If CONFIG_PM is not enabled, the clocks will stay powered.
7177          */
7178         pm_runtime_put(device);
7179
7180         return ret;
7181
7182 error_serdes_powerup:
7183         unregister_netdev(ndev);
7184 error_netdev_register:
7185         phylink_destroy(priv->phylink);
7186 error_xpcs_setup:
7187 error_phy_setup:
7188         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7189             priv->hw->pcs != STMMAC_PCS_RTBI)
7190                 stmmac_mdio_unregister(ndev);
7191 error_mdio_register:
7192         stmmac_napi_del(ndev);
7193 error_hw_init:
7194         destroy_workqueue(priv->wq);
7195         bitmap_free(priv->af_xdp_zc_qps);
7196
7197         return ret;
7198 }
7199 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7200
7201 /**
7202  * stmmac_dvr_remove
7203  * @dev: device pointer
7204  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7205  * changes the link status, releases the DMA descriptor rings.
7206  */
7207 int stmmac_dvr_remove(struct device *dev)
7208 {
7209         struct net_device *ndev = dev_get_drvdata(dev);
7210         struct stmmac_priv *priv = netdev_priv(ndev);
7211
7212         netdev_info(priv->dev, "%s: removing driver", __func__);
7213
7214         stmmac_stop_all_dma(priv);
7215         stmmac_mac_set(priv, priv->ioaddr, false);
7216         netif_carrier_off(ndev);
7217         unregister_netdev(ndev);
7218
7219         /* Serdes power down needs to happen after VLAN filter
7220          * is deleted that is triggered by unregister_netdev().
7221          */
7222         if (priv->plat->serdes_powerdown)
7223                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7224
7225 #ifdef CONFIG_DEBUG_FS
7226         stmmac_exit_fs(ndev);
7227 #endif
7228         phylink_destroy(priv->phylink);
7229         if (priv->plat->stmmac_rst)
7230                 reset_control_assert(priv->plat->stmmac_rst);
7231         reset_control_assert(priv->plat->stmmac_ahb_rst);
7232         pm_runtime_put(dev);
7233         pm_runtime_disable(dev);
7234         if (priv->hw->pcs != STMMAC_PCS_TBI &&
7235             priv->hw->pcs != STMMAC_PCS_RTBI)
7236                 stmmac_mdio_unregister(ndev);
7237         destroy_workqueue(priv->wq);
7238         mutex_destroy(&priv->lock);
7239         bitmap_free(priv->af_xdp_zc_qps);
7240
7241         return 0;
7242 }
7243 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7244
7245 /**
7246  * stmmac_suspend - suspend callback
7247  * @dev: device pointer
7248  * Description: this is the function to suspend the device and it is called
7249  * by the platform driver to stop the network queue, release the resources,
7250  * program the PMT register (for WoL), clean and release driver resources.
7251  */
7252 int stmmac_suspend(struct device *dev)
7253 {
7254         struct net_device *ndev = dev_get_drvdata(dev);
7255         struct stmmac_priv *priv = netdev_priv(ndev);
7256         u32 chan;
7257
7258         if (!ndev || !netif_running(ndev))
7259                 return 0;
7260
7261         mutex_lock(&priv->lock);
7262
7263         netif_device_detach(ndev);
7264
7265         stmmac_disable_all_queues(priv);
7266
7267         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7268                 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
7269
7270         if (priv->eee_enabled) {
7271                 priv->tx_path_in_lpi_mode = false;
7272                 del_timer_sync(&priv->eee_ctrl_timer);
7273         }
7274
7275         /* Stop TX/RX DMA */
7276         stmmac_stop_all_dma(priv);
7277
7278         if (priv->plat->serdes_powerdown)
7279                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7280
7281         /* Enable Power down mode by programming the PMT regs */
7282         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7283                 stmmac_pmt(priv, priv->hw, priv->wolopts);
7284                 priv->irq_wake = 1;
7285         } else {
7286                 stmmac_mac_set(priv, priv->ioaddr, false);
7287                 pinctrl_pm_select_sleep_state(priv->device);
7288         }
7289
7290         mutex_unlock(&priv->lock);
7291
7292         rtnl_lock();
7293         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7294                 phylink_suspend(priv->phylink, true);
7295         } else {
7296                 if (device_may_wakeup(priv->device))
7297                         phylink_speed_down(priv->phylink, false);
7298                 phylink_suspend(priv->phylink, false);
7299         }
7300         rtnl_unlock();
7301
7302         if (priv->dma_cap.fpesel) {
7303                 /* Disable FPE */
7304                 stmmac_fpe_configure(priv, priv->ioaddr,
7305                                      priv->plat->tx_queues_to_use,
7306                                      priv->plat->rx_queues_to_use, false);
7307
7308                 stmmac_fpe_handshake(priv, false);
7309                 stmmac_fpe_stop_wq(priv);
7310         }
7311
7312         priv->speed = SPEED_UNKNOWN;
7313         return 0;
7314 }
7315 EXPORT_SYMBOL_GPL(stmmac_suspend);
7316
7317 /**
7318  * stmmac_reset_queues_param - reset queue parameters
7319  * @priv: device pointer
7320  */
7321 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7322 {
7323         u32 rx_cnt = priv->plat->rx_queues_to_use;
7324         u32 tx_cnt = priv->plat->tx_queues_to_use;
7325         u32 queue;
7326
7327         for (queue = 0; queue < rx_cnt; queue++) {
7328                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
7329
7330                 rx_q->cur_rx = 0;
7331                 rx_q->dirty_rx = 0;
7332         }
7333
7334         for (queue = 0; queue < tx_cnt; queue++) {
7335                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
7336
7337                 tx_q->cur_tx = 0;
7338                 tx_q->dirty_tx = 0;
7339                 tx_q->mss = 0;
7340
7341                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7342         }
7343 }
7344
7345 /**
7346  * stmmac_resume - resume callback
7347  * @dev: device pointer
7348  * Description: when resume this function is invoked to setup the DMA and CORE
7349  * in a usable state.
7350  */
7351 int stmmac_resume(struct device *dev)
7352 {
7353         struct net_device *ndev = dev_get_drvdata(dev);
7354         struct stmmac_priv *priv = netdev_priv(ndev);
7355         int ret;
7356
7357         if (!netif_running(ndev))
7358                 return 0;
7359
7360         /* Power Down bit, into the PM register, is cleared
7361          * automatically as soon as a magic packet or a Wake-up frame
7362          * is received. Anyway, it's better to manually clear
7363          * this bit because it can generate problems while resuming
7364          * from another devices (e.g. serial console).
7365          */
7366         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7367                 mutex_lock(&priv->lock);
7368                 stmmac_pmt(priv, priv->hw, 0);
7369                 mutex_unlock(&priv->lock);
7370                 priv->irq_wake = 0;
7371         } else {
7372                 pinctrl_pm_select_default_state(priv->device);
7373                 /* reset the phy so that it's ready */
7374                 if (priv->mii)
7375                         stmmac_mdio_reset(priv->mii);
7376         }
7377
7378         if (priv->plat->serdes_powerup) {
7379                 ret = priv->plat->serdes_powerup(ndev,
7380                                                  priv->plat->bsp_priv);
7381
7382                 if (ret < 0)
7383                         return ret;
7384         }
7385
7386         rtnl_lock();
7387         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7388                 phylink_resume(priv->phylink);
7389         } else {
7390                 phylink_resume(priv->phylink);
7391                 if (device_may_wakeup(priv->device))
7392                         phylink_speed_up(priv->phylink);
7393         }
7394         rtnl_unlock();
7395
7396         rtnl_lock();
7397         mutex_lock(&priv->lock);
7398
7399         stmmac_reset_queues_param(priv);
7400
7401         stmmac_free_tx_skbufs(priv);
7402         stmmac_clear_descriptors(priv);
7403
7404         stmmac_hw_setup(ndev, false);
7405         stmmac_init_coalesce(priv);
7406         stmmac_set_rx_mode(ndev);
7407
7408         stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7409
7410         stmmac_enable_all_queues(priv);
7411
7412         mutex_unlock(&priv->lock);
7413         rtnl_unlock();
7414
7415         netif_device_attach(ndev);
7416
7417         return 0;
7418 }
7419 EXPORT_SYMBOL_GPL(stmmac_resume);
7420
7421 #ifndef MODULE
7422 static int __init stmmac_cmdline_opt(char *str)
7423 {
7424         char *opt;
7425
7426         if (!str || !*str)
7427                 return -EINVAL;
7428         while ((opt = strsep(&str, ",")) != NULL) {
7429                 if (!strncmp(opt, "debug:", 6)) {
7430                         if (kstrtoint(opt + 6, 0, &debug))
7431                                 goto err;
7432                 } else if (!strncmp(opt, "phyaddr:", 8)) {
7433                         if (kstrtoint(opt + 8, 0, &phyaddr))
7434                                 goto err;
7435                 } else if (!strncmp(opt, "buf_sz:", 7)) {
7436                         if (kstrtoint(opt + 7, 0, &buf_sz))
7437                                 goto err;
7438                 } else if (!strncmp(opt, "tc:", 3)) {
7439                         if (kstrtoint(opt + 3, 0, &tc))
7440                                 goto err;
7441                 } else if (!strncmp(opt, "watchdog:", 9)) {
7442                         if (kstrtoint(opt + 9, 0, &watchdog))
7443                                 goto err;
7444                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7445                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
7446                                 goto err;
7447                 } else if (!strncmp(opt, "pause:", 6)) {
7448                         if (kstrtoint(opt + 6, 0, &pause))
7449                                 goto err;
7450                 } else if (!strncmp(opt, "eee_timer:", 10)) {
7451                         if (kstrtoint(opt + 10, 0, &eee_timer))
7452                                 goto err;
7453                 } else if (!strncmp(opt, "chain_mode:", 11)) {
7454                         if (kstrtoint(opt + 11, 0, &chain_mode))
7455                                 goto err;
7456                 }
7457         }
7458         return 0;
7459
7460 err:
7461         pr_err("%s: ERROR broken module parameter conversion", __func__);
7462         return -EINVAL;
7463 }
7464
7465 __setup("stmmaceth=", stmmac_cmdline_opt);
7466 #endif /* MODULE */
7467
7468 static int __init stmmac_init(void)
7469 {
7470 #ifdef CONFIG_DEBUG_FS
7471         /* Create debugfs main directory if it doesn't exist yet */
7472         if (!stmmac_fs_dir)
7473                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7474         register_netdevice_notifier(&stmmac_notifier);
7475 #endif
7476
7477         return 0;
7478 }
7479
7480 static void __exit stmmac_exit(void)
7481 {
7482 #ifdef CONFIG_DEBUG_FS
7483         unregister_netdevice_notifier(&stmmac_notifier);
7484         debugfs_remove_recursive(stmmac_fs_dir);
7485 #endif
7486 }
7487
7488 module_init(stmmac_init)
7489 module_exit(stmmac_exit)
7490
7491 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7492 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7493 MODULE_LICENSE("GPL");