Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[linux-2.6-microblaze.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5
6         Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11   Documentation available at:
12         http://www.stlinux.com
13   Support available at:
14         https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/prefetch.h>
32 #include <linux/pinctrl/consumer.h>
33 #ifdef CONFIG_DEBUG_FS
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #endif /* CONFIG_DEBUG_FS */
37 #include <linux/net_tstamp.h>
38 #include <linux/phylink.h>
39 #include <linux/udp.h>
40 #include <net/pkt_cls.h>
41 #include "stmmac_ptp.h"
42 #include "stmmac.h"
43 #include <linux/reset.h>
44 #include <linux/of_mdio.h>
45 #include "dwmac1000.h"
46 #include "dwxgmac2.h"
47 #include "hwif.h"
48
49 #define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
50 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
51
52 /* Module parameters */
53 #define TX_TIMEO        5000
54 static int watchdog = TX_TIMEO;
55 module_param(watchdog, int, 0644);
56 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
57
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
61
62 static int phyaddr = -1;
63 module_param(phyaddr, int, 0444);
64 MODULE_PARM_DESC(phyaddr, "Physical device address");
65
66 #define STMMAC_TX_THRESH(x)     ((x)->dma_tx_size / 4)
67 #define STMMAC_RX_THRESH(x)     ((x)->dma_rx_size / 4)
68
69 static int flow_ctrl = FLOW_AUTO;
70 module_param(flow_ctrl, int, 0644);
71 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
72
73 static int pause = PAUSE_TIME;
74 module_param(pause, int, 0644);
75 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
76
77 #define TC_DEFAULT 64
78 static int tc = TC_DEFAULT;
79 module_param(tc, int, 0644);
80 MODULE_PARM_DESC(tc, "DMA threshold control value");
81
82 #define DEFAULT_BUFSIZE 1536
83 static int buf_sz = DEFAULT_BUFSIZE;
84 module_param(buf_sz, int, 0644);
85 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
86
87 #define STMMAC_RX_COPYBREAK     256
88
89 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
90                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
91                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
92
93 #define STMMAC_DEFAULT_LPI_TIMER        1000
94 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
95 module_param(eee_timer, int, 0644);
96 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
97 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
98
99 /* By default the driver will use the ring mode to manage tx and rx descriptors,
100  * but allow user to force to use the chain instead of the ring
101  */
102 static unsigned int chain_mode;
103 module_param(chain_mode, int, 0444);
104 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
105
106 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
107
108 #ifdef CONFIG_DEBUG_FS
109 static const struct net_device_ops stmmac_netdev_ops;
110 static void stmmac_init_fs(struct net_device *dev);
111 static void stmmac_exit_fs(struct net_device *dev);
112 #endif
113
114 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
115
116 /**
117  * stmmac_verify_args - verify the driver parameters.
118  * Description: it checks the driver parameters and set a default in case of
119  * errors.
120  */
121 static void stmmac_verify_args(void)
122 {
123         if (unlikely(watchdog < 0))
124                 watchdog = TX_TIMEO;
125         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
126                 buf_sz = DEFAULT_BUFSIZE;
127         if (unlikely(flow_ctrl > 1))
128                 flow_ctrl = FLOW_AUTO;
129         else if (likely(flow_ctrl < 0))
130                 flow_ctrl = FLOW_OFF;
131         if (unlikely((pause < 0) || (pause > 0xffff)))
132                 pause = PAUSE_TIME;
133         if (eee_timer < 0)
134                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
135 }
136
137 /**
138  * stmmac_disable_all_queues - Disable all queues
139  * @priv: driver private structure
140  */
141 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
142 {
143         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
144         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
145         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
146         u32 queue;
147
148         for (queue = 0; queue < maxq; queue++) {
149                 struct stmmac_channel *ch = &priv->channel[queue];
150
151                 if (queue < rx_queues_cnt)
152                         napi_disable(&ch->rx_napi);
153                 if (queue < tx_queues_cnt)
154                         napi_disable(&ch->tx_napi);
155         }
156 }
157
158 /**
159  * stmmac_enable_all_queues - Enable all queues
160  * @priv: driver private structure
161  */
162 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
163 {
164         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
165         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
166         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
167         u32 queue;
168
169         for (queue = 0; queue < maxq; queue++) {
170                 struct stmmac_channel *ch = &priv->channel[queue];
171
172                 if (queue < rx_queues_cnt)
173                         napi_enable(&ch->rx_napi);
174                 if (queue < tx_queues_cnt)
175                         napi_enable(&ch->tx_napi);
176         }
177 }
178
179 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
180 {
181         if (!test_bit(STMMAC_DOWN, &priv->state) &&
182             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
183                 queue_work(priv->wq, &priv->service_task);
184 }
185
186 static void stmmac_global_err(struct stmmac_priv *priv)
187 {
188         netif_carrier_off(priv->dev);
189         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
190         stmmac_service_event_schedule(priv);
191 }
192
193 /**
194  * stmmac_clk_csr_set - dynamically set the MDC clock
195  * @priv: driver private structure
196  * Description: this is to dynamically set the MDC clock according to the csr
197  * clock input.
198  * Note:
199  *      If a specific clk_csr value is passed from the platform
200  *      this means that the CSR Clock Range selection cannot be
201  *      changed at run-time and it is fixed (as reported in the driver
202  *      documentation). Viceversa the driver will try to set the MDC
203  *      clock dynamically according to the actual clock input.
204  */
205 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
206 {
207         u32 clk_rate;
208
209         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
210
211         /* Platform provided default clk_csr would be assumed valid
212          * for all other cases except for the below mentioned ones.
213          * For values higher than the IEEE 802.3 specified frequency
214          * we can not estimate the proper divider as it is not known
215          * the frequency of clk_csr_i. So we do not change the default
216          * divider.
217          */
218         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
219                 if (clk_rate < CSR_F_35M)
220                         priv->clk_csr = STMMAC_CSR_20_35M;
221                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
222                         priv->clk_csr = STMMAC_CSR_35_60M;
223                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
224                         priv->clk_csr = STMMAC_CSR_60_100M;
225                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
226                         priv->clk_csr = STMMAC_CSR_100_150M;
227                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
228                         priv->clk_csr = STMMAC_CSR_150_250M;
229                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
230                         priv->clk_csr = STMMAC_CSR_250_300M;
231         }
232
233         if (priv->plat->has_sun8i) {
234                 if (clk_rate > 160000000)
235                         priv->clk_csr = 0x03;
236                 else if (clk_rate > 80000000)
237                         priv->clk_csr = 0x02;
238                 else if (clk_rate > 40000000)
239                         priv->clk_csr = 0x01;
240                 else
241                         priv->clk_csr = 0;
242         }
243
244         if (priv->plat->has_xgmac) {
245                 if (clk_rate > 400000000)
246                         priv->clk_csr = 0x5;
247                 else if (clk_rate > 350000000)
248                         priv->clk_csr = 0x4;
249                 else if (clk_rate > 300000000)
250                         priv->clk_csr = 0x3;
251                 else if (clk_rate > 250000000)
252                         priv->clk_csr = 0x2;
253                 else if (clk_rate > 150000000)
254                         priv->clk_csr = 0x1;
255                 else
256                         priv->clk_csr = 0x0;
257         }
258 }
259
260 static void print_pkt(unsigned char *buf, int len)
261 {
262         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
263         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
264 }
265
266 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
267 {
268         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
269         u32 avail;
270
271         if (tx_q->dirty_tx > tx_q->cur_tx)
272                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
273         else
274                 avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
275
276         return avail;
277 }
278
279 /**
280  * stmmac_rx_dirty - Get RX queue dirty
281  * @priv: driver private structure
282  * @queue: RX queue index
283  */
284 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
285 {
286         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
287         u32 dirty;
288
289         if (rx_q->dirty_rx <= rx_q->cur_rx)
290                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
291         else
292                 dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
293
294         return dirty;
295 }
296
297 /**
298  * stmmac_enable_eee_mode - check and enter in LPI mode
299  * @priv: driver private structure
300  * Description: this function is to verify and enter in LPI mode in case of
301  * EEE.
302  */
303 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
304 {
305         u32 tx_cnt = priv->plat->tx_queues_to_use;
306         u32 queue;
307
308         /* check if all TX queues have the work finished */
309         for (queue = 0; queue < tx_cnt; queue++) {
310                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
311
312                 if (tx_q->dirty_tx != tx_q->cur_tx)
313                         return; /* still unfinished work */
314         }
315
316         /* Check and enter in LPI mode */
317         if (!priv->tx_path_in_lpi_mode)
318                 stmmac_set_eee_mode(priv, priv->hw,
319                                 priv->plat->en_tx_lpi_clockgating);
320 }
321
322 /**
323  * stmmac_disable_eee_mode - disable and exit from LPI mode
324  * @priv: driver private structure
325  * Description: this function is to exit and disable EEE in case of
326  * LPI state is true. This is called by the xmit.
327  */
328 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
329 {
330         stmmac_reset_eee_mode(priv, priv->hw);
331         del_timer_sync(&priv->eee_ctrl_timer);
332         priv->tx_path_in_lpi_mode = false;
333 }
334
335 /**
336  * stmmac_eee_ctrl_timer - EEE TX SW timer.
337  * @t:  timer_list struct containing private info
338  * Description:
339  *  if there is no data transfer and if we are not in LPI state,
340  *  then MAC Transmitter can be moved to LPI state.
341  */
342 static void stmmac_eee_ctrl_timer(struct timer_list *t)
343 {
344         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
345
346         stmmac_enable_eee_mode(priv);
347         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
348 }
349
350 /**
351  * stmmac_eee_init - init EEE
352  * @priv: driver private structure
353  * Description:
354  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
355  *  can also manage EEE, this function enable the LPI state and start related
356  *  timer.
357  */
358 bool stmmac_eee_init(struct stmmac_priv *priv)
359 {
360         int eee_tw_timer = priv->eee_tw_timer;
361
362         /* Using PCS we cannot dial with the phy registers at this stage
363          * so we do not support extra feature like EEE.
364          */
365         if (priv->hw->pcs == STMMAC_PCS_TBI ||
366             priv->hw->pcs == STMMAC_PCS_RTBI)
367                 return false;
368
369         /* Check if MAC core supports the EEE feature. */
370         if (!priv->dma_cap.eee)
371                 return false;
372
373         mutex_lock(&priv->lock);
374
375         /* Check if it needs to be deactivated */
376         if (!priv->eee_active) {
377                 if (priv->eee_enabled) {
378                         netdev_dbg(priv->dev, "disable EEE\n");
379                         del_timer_sync(&priv->eee_ctrl_timer);
380                         stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
381                 }
382                 mutex_unlock(&priv->lock);
383                 return false;
384         }
385
386         if (priv->eee_active && !priv->eee_enabled) {
387                 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
388                 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
389                                      eee_tw_timer);
390         }
391
392         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
393
394         mutex_unlock(&priv->lock);
395         netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
396         return true;
397 }
398
399 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
400  * @priv: driver private structure
401  * @p : descriptor pointer
402  * @skb : the socket buffer
403  * Description :
404  * This function will read timestamp from the descriptor & pass it to stack.
405  * and also perform some sanity checks.
406  */
407 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
408                                    struct dma_desc *p, struct sk_buff *skb)
409 {
410         struct skb_shared_hwtstamps shhwtstamp;
411         bool found = false;
412         u64 ns = 0;
413
414         if (!priv->hwts_tx_en)
415                 return;
416
417         /* exit if skb doesn't support hw tstamp */
418         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
419                 return;
420
421         /* check tx tstamp status */
422         if (stmmac_get_tx_timestamp_status(priv, p)) {
423                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
424                 found = true;
425         } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
426                 found = true;
427         }
428
429         if (found) {
430                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
431                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
432
433                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
434                 /* pass tstamp to stack */
435                 skb_tstamp_tx(skb, &shhwtstamp);
436         }
437 }
438
439 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
440  * @priv: driver private structure
441  * @p : descriptor pointer
442  * @np : next descriptor pointer
443  * @skb : the socket buffer
444  * Description :
445  * This function will read received packet's timestamp from the descriptor
446  * and pass it to stack. It also perform some sanity checks.
447  */
448 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
449                                    struct dma_desc *np, struct sk_buff *skb)
450 {
451         struct skb_shared_hwtstamps *shhwtstamp = NULL;
452         struct dma_desc *desc = p;
453         u64 ns = 0;
454
455         if (!priv->hwts_rx_en)
456                 return;
457         /* For GMAC4, the valid timestamp is from CTX next desc. */
458         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
459                 desc = np;
460
461         /* Check if timestamp is available */
462         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
463                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
464                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
465                 shhwtstamp = skb_hwtstamps(skb);
466                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
467                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
468         } else  {
469                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
470         }
471 }
472
473 /**
474  *  stmmac_hwtstamp_set - control hardware timestamping.
475  *  @dev: device pointer.
476  *  @ifr: An IOCTL specific structure, that can contain a pointer to
477  *  a proprietary structure used to pass information to the driver.
478  *  Description:
479  *  This function configures the MAC to enable/disable both outgoing(TX)
480  *  and incoming(RX) packets time stamping based on user input.
481  *  Return Value:
482  *  0 on success and an appropriate -ve integer on failure.
483  */
484 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
485 {
486         struct stmmac_priv *priv = netdev_priv(dev);
487         struct hwtstamp_config config;
488         struct timespec64 now;
489         u64 temp = 0;
490         u32 ptp_v2 = 0;
491         u32 tstamp_all = 0;
492         u32 ptp_over_ipv4_udp = 0;
493         u32 ptp_over_ipv6_udp = 0;
494         u32 ptp_over_ethernet = 0;
495         u32 snap_type_sel = 0;
496         u32 ts_master_en = 0;
497         u32 ts_event_en = 0;
498         u32 sec_inc = 0;
499         u32 value = 0;
500         bool xmac;
501
502         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
503
504         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
505                 netdev_alert(priv->dev, "No support for HW time stamping\n");
506                 priv->hwts_tx_en = 0;
507                 priv->hwts_rx_en = 0;
508
509                 return -EOPNOTSUPP;
510         }
511
512         if (copy_from_user(&config, ifr->ifr_data,
513                            sizeof(config)))
514                 return -EFAULT;
515
516         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
517                    __func__, config.flags, config.tx_type, config.rx_filter);
518
519         /* reserved for future extensions */
520         if (config.flags)
521                 return -EINVAL;
522
523         if (config.tx_type != HWTSTAMP_TX_OFF &&
524             config.tx_type != HWTSTAMP_TX_ON)
525                 return -ERANGE;
526
527         if (priv->adv_ts) {
528                 switch (config.rx_filter) {
529                 case HWTSTAMP_FILTER_NONE:
530                         /* time stamp no incoming packet at all */
531                         config.rx_filter = HWTSTAMP_FILTER_NONE;
532                         break;
533
534                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
535                         /* PTP v1, UDP, any kind of event packet */
536                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
537                         /* 'xmac' hardware can support Sync, Pdelay_Req and
538                          * Pdelay_resp by setting bit14 and bits17/16 to 01
539                          * This leaves Delay_Req timestamps out.
540                          * Enable all events *and* general purpose message
541                          * timestamping
542                          */
543                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
544                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
545                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
546                         break;
547
548                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
549                         /* PTP v1, UDP, Sync packet */
550                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
551                         /* take time stamp for SYNC messages only */
552                         ts_event_en = PTP_TCR_TSEVNTENA;
553
554                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
555                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
556                         break;
557
558                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
559                         /* PTP v1, UDP, Delay_req packet */
560                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
561                         /* take time stamp for Delay_Req messages only */
562                         ts_master_en = PTP_TCR_TSMSTRENA;
563                         ts_event_en = PTP_TCR_TSEVNTENA;
564
565                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
566                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
567                         break;
568
569                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
570                         /* PTP v2, UDP, any kind of event packet */
571                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
572                         ptp_v2 = PTP_TCR_TSVER2ENA;
573                         /* take time stamp for all event messages */
574                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
575
576                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
577                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
578                         break;
579
580                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
581                         /* PTP v2, UDP, Sync packet */
582                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
583                         ptp_v2 = PTP_TCR_TSVER2ENA;
584                         /* take time stamp for SYNC messages only */
585                         ts_event_en = PTP_TCR_TSEVNTENA;
586
587                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
588                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
589                         break;
590
591                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
592                         /* PTP v2, UDP, Delay_req packet */
593                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
594                         ptp_v2 = PTP_TCR_TSVER2ENA;
595                         /* take time stamp for Delay_Req messages only */
596                         ts_master_en = PTP_TCR_TSMSTRENA;
597                         ts_event_en = PTP_TCR_TSEVNTENA;
598
599                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
600                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
601                         break;
602
603                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
604                         /* PTP v2/802.AS1 any layer, any kind of event packet */
605                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
606                         ptp_v2 = PTP_TCR_TSVER2ENA;
607                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
608                         if (priv->synopsys_id != DWMAC_CORE_5_10)
609                                 ts_event_en = PTP_TCR_TSEVNTENA;
610                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
611                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
612                         ptp_over_ethernet = PTP_TCR_TSIPENA;
613                         break;
614
615                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
616                         /* PTP v2/802.AS1, any layer, Sync packet */
617                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
618                         ptp_v2 = PTP_TCR_TSVER2ENA;
619                         /* take time stamp for SYNC messages only */
620                         ts_event_en = PTP_TCR_TSEVNTENA;
621
622                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
623                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
624                         ptp_over_ethernet = PTP_TCR_TSIPENA;
625                         break;
626
627                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
628                         /* PTP v2/802.AS1, any layer, Delay_req packet */
629                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
630                         ptp_v2 = PTP_TCR_TSVER2ENA;
631                         /* take time stamp for Delay_Req messages only */
632                         ts_master_en = PTP_TCR_TSMSTRENA;
633                         ts_event_en = PTP_TCR_TSEVNTENA;
634
635                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
636                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
637                         ptp_over_ethernet = PTP_TCR_TSIPENA;
638                         break;
639
640                 case HWTSTAMP_FILTER_NTP_ALL:
641                 case HWTSTAMP_FILTER_ALL:
642                         /* time stamp any incoming packet */
643                         config.rx_filter = HWTSTAMP_FILTER_ALL;
644                         tstamp_all = PTP_TCR_TSENALL;
645                         break;
646
647                 default:
648                         return -ERANGE;
649                 }
650         } else {
651                 switch (config.rx_filter) {
652                 case HWTSTAMP_FILTER_NONE:
653                         config.rx_filter = HWTSTAMP_FILTER_NONE;
654                         break;
655                 default:
656                         /* PTP v1, UDP, any kind of event packet */
657                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
658                         break;
659                 }
660         }
661         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
662         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
663
664         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
665                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
666         else {
667                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
668                          tstamp_all | ptp_v2 | ptp_over_ethernet |
669                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
670                          ts_master_en | snap_type_sel);
671                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
672
673                 /* program Sub Second Increment reg */
674                 stmmac_config_sub_second_increment(priv,
675                                 priv->ptpaddr, priv->plat->clk_ptp_rate,
676                                 xmac, &sec_inc);
677                 temp = div_u64(1000000000ULL, sec_inc);
678
679                 /* Store sub second increment and flags for later use */
680                 priv->sub_second_inc = sec_inc;
681                 priv->systime_flags = value;
682
683                 /* calculate default added value:
684                  * formula is :
685                  * addend = (2^32)/freq_div_ratio;
686                  * where, freq_div_ratio = 1e9ns/sec_inc
687                  */
688                 temp = (u64)(temp << 32);
689                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
690                 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
691
692                 /* initialize system time */
693                 ktime_get_real_ts64(&now);
694
695                 /* lower 32 bits of tv_sec are safe until y2106 */
696                 stmmac_init_systime(priv, priv->ptpaddr,
697                                 (u32)now.tv_sec, now.tv_nsec);
698         }
699
700         memcpy(&priv->tstamp_config, &config, sizeof(config));
701
702         return copy_to_user(ifr->ifr_data, &config,
703                             sizeof(config)) ? -EFAULT : 0;
704 }
705
706 /**
707  *  stmmac_hwtstamp_get - read hardware timestamping.
708  *  @dev: device pointer.
709  *  @ifr: An IOCTL specific structure, that can contain a pointer to
710  *  a proprietary structure used to pass information to the driver.
711  *  Description:
712  *  This function obtain the current hardware timestamping settings
713  *  as requested.
714  */
715 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
716 {
717         struct stmmac_priv *priv = netdev_priv(dev);
718         struct hwtstamp_config *config = &priv->tstamp_config;
719
720         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
721                 return -EOPNOTSUPP;
722
723         return copy_to_user(ifr->ifr_data, config,
724                             sizeof(*config)) ? -EFAULT : 0;
725 }
726
727 /**
728  * stmmac_init_ptp - init PTP
729  * @priv: driver private structure
730  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
731  * This is done by looking at the HW cap. register.
732  * This function also registers the ptp driver.
733  */
734 static int stmmac_init_ptp(struct stmmac_priv *priv)
735 {
736         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
737
738         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
739                 return -EOPNOTSUPP;
740
741         priv->adv_ts = 0;
742         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
743         if (xmac && priv->dma_cap.atime_stamp)
744                 priv->adv_ts = 1;
745         /* Dwmac 3.x core with extend_desc can support adv_ts */
746         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
747                 priv->adv_ts = 1;
748
749         if (priv->dma_cap.time_stamp)
750                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
751
752         if (priv->adv_ts)
753                 netdev_info(priv->dev,
754                             "IEEE 1588-2008 Advanced Timestamp supported\n");
755
756         priv->hwts_tx_en = 0;
757         priv->hwts_rx_en = 0;
758
759         stmmac_ptp_register(priv);
760
761         return 0;
762 }
763
764 static void stmmac_release_ptp(struct stmmac_priv *priv)
765 {
766         clk_disable_unprepare(priv->plat->clk_ptp_ref);
767         stmmac_ptp_unregister(priv);
768 }
769
770 /**
771  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
772  *  @priv: driver private structure
773  *  @duplex: duplex passed to the next function
774  *  Description: It is used for configuring the flow control in all queues
775  */
776 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
777 {
778         u32 tx_cnt = priv->plat->tx_queues_to_use;
779
780         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
781                         priv->pause, tx_cnt);
782 }
783
784 static void stmmac_validate(struct phylink_config *config,
785                             unsigned long *supported,
786                             struct phylink_link_state *state)
787 {
788         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
789         __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
790         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
791         int tx_cnt = priv->plat->tx_queues_to_use;
792         int max_speed = priv->plat->max_speed;
793
794         phylink_set(mac_supported, 10baseT_Half);
795         phylink_set(mac_supported, 10baseT_Full);
796         phylink_set(mac_supported, 100baseT_Half);
797         phylink_set(mac_supported, 100baseT_Full);
798         phylink_set(mac_supported, 1000baseT_Half);
799         phylink_set(mac_supported, 1000baseT_Full);
800         phylink_set(mac_supported, 1000baseKX_Full);
801
802         phylink_set(mac_supported, Autoneg);
803         phylink_set(mac_supported, Pause);
804         phylink_set(mac_supported, Asym_Pause);
805         phylink_set_port_modes(mac_supported);
806
807         /* Cut down 1G if asked to */
808         if ((max_speed > 0) && (max_speed < 1000)) {
809                 phylink_set(mask, 1000baseT_Full);
810                 phylink_set(mask, 1000baseX_Full);
811         } else if (priv->plat->has_xgmac) {
812                 if (!max_speed || (max_speed >= 2500)) {
813                         phylink_set(mac_supported, 2500baseT_Full);
814                         phylink_set(mac_supported, 2500baseX_Full);
815                 }
816                 if (!max_speed || (max_speed >= 5000)) {
817                         phylink_set(mac_supported, 5000baseT_Full);
818                 }
819                 if (!max_speed || (max_speed >= 10000)) {
820                         phylink_set(mac_supported, 10000baseSR_Full);
821                         phylink_set(mac_supported, 10000baseLR_Full);
822                         phylink_set(mac_supported, 10000baseER_Full);
823                         phylink_set(mac_supported, 10000baseLRM_Full);
824                         phylink_set(mac_supported, 10000baseT_Full);
825                         phylink_set(mac_supported, 10000baseKX4_Full);
826                         phylink_set(mac_supported, 10000baseKR_Full);
827                 }
828                 if (!max_speed || (max_speed >= 25000)) {
829                         phylink_set(mac_supported, 25000baseCR_Full);
830                         phylink_set(mac_supported, 25000baseKR_Full);
831                         phylink_set(mac_supported, 25000baseSR_Full);
832                 }
833                 if (!max_speed || (max_speed >= 40000)) {
834                         phylink_set(mac_supported, 40000baseKR4_Full);
835                         phylink_set(mac_supported, 40000baseCR4_Full);
836                         phylink_set(mac_supported, 40000baseSR4_Full);
837                         phylink_set(mac_supported, 40000baseLR4_Full);
838                 }
839                 if (!max_speed || (max_speed >= 50000)) {
840                         phylink_set(mac_supported, 50000baseCR2_Full);
841                         phylink_set(mac_supported, 50000baseKR2_Full);
842                         phylink_set(mac_supported, 50000baseSR2_Full);
843                         phylink_set(mac_supported, 50000baseKR_Full);
844                         phylink_set(mac_supported, 50000baseSR_Full);
845                         phylink_set(mac_supported, 50000baseCR_Full);
846                         phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
847                         phylink_set(mac_supported, 50000baseDR_Full);
848                 }
849                 if (!max_speed || (max_speed >= 100000)) {
850                         phylink_set(mac_supported, 100000baseKR4_Full);
851                         phylink_set(mac_supported, 100000baseSR4_Full);
852                         phylink_set(mac_supported, 100000baseCR4_Full);
853                         phylink_set(mac_supported, 100000baseLR4_ER4_Full);
854                         phylink_set(mac_supported, 100000baseKR2_Full);
855                         phylink_set(mac_supported, 100000baseSR2_Full);
856                         phylink_set(mac_supported, 100000baseCR2_Full);
857                         phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
858                         phylink_set(mac_supported, 100000baseDR2_Full);
859                 }
860         }
861
862         /* Half-Duplex can only work with single queue */
863         if (tx_cnt > 1) {
864                 phylink_set(mask, 10baseT_Half);
865                 phylink_set(mask, 100baseT_Half);
866                 phylink_set(mask, 1000baseT_Half);
867         }
868
869         linkmode_and(supported, supported, mac_supported);
870         linkmode_andnot(supported, supported, mask);
871
872         linkmode_and(state->advertising, state->advertising, mac_supported);
873         linkmode_andnot(state->advertising, state->advertising, mask);
874
875         /* If PCS is supported, check which modes it supports. */
876         stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
877 }
878
879 static void stmmac_mac_pcs_get_state(struct phylink_config *config,
880                                      struct phylink_link_state *state)
881 {
882         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
883
884         state->link = 0;
885         stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
886 }
887
888 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
889                               const struct phylink_link_state *state)
890 {
891         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
892
893         stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
894 }
895
896 static void stmmac_mac_an_restart(struct phylink_config *config)
897 {
898         /* Not Supported */
899 }
900
901 static void stmmac_mac_link_down(struct phylink_config *config,
902                                  unsigned int mode, phy_interface_t interface)
903 {
904         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
905
906         stmmac_mac_set(priv, priv->ioaddr, false);
907         priv->eee_active = false;
908         priv->tx_lpi_enabled = false;
909         stmmac_eee_init(priv);
910         stmmac_set_eee_pls(priv, priv->hw, false);
911 }
912
913 static void stmmac_mac_link_up(struct phylink_config *config,
914                                struct phy_device *phy,
915                                unsigned int mode, phy_interface_t interface,
916                                int speed, int duplex,
917                                bool tx_pause, bool rx_pause)
918 {
919         struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
920         u32 ctrl;
921
922         stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
923
924         ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
925         ctrl &= ~priv->hw->link.speed_mask;
926
927         if (interface == PHY_INTERFACE_MODE_USXGMII) {
928                 switch (speed) {
929                 case SPEED_10000:
930                         ctrl |= priv->hw->link.xgmii.speed10000;
931                         break;
932                 case SPEED_5000:
933                         ctrl |= priv->hw->link.xgmii.speed5000;
934                         break;
935                 case SPEED_2500:
936                         ctrl |= priv->hw->link.xgmii.speed2500;
937                         break;
938                 default:
939                         return;
940                 }
941         } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
942                 switch (speed) {
943                 case SPEED_100000:
944                         ctrl |= priv->hw->link.xlgmii.speed100000;
945                         break;
946                 case SPEED_50000:
947                         ctrl |= priv->hw->link.xlgmii.speed50000;
948                         break;
949                 case SPEED_40000:
950                         ctrl |= priv->hw->link.xlgmii.speed40000;
951                         break;
952                 case SPEED_25000:
953                         ctrl |= priv->hw->link.xlgmii.speed25000;
954                         break;
955                 case SPEED_10000:
956                         ctrl |= priv->hw->link.xgmii.speed10000;
957                         break;
958                 case SPEED_2500:
959                         ctrl |= priv->hw->link.speed2500;
960                         break;
961                 case SPEED_1000:
962                         ctrl |= priv->hw->link.speed1000;
963                         break;
964                 default:
965                         return;
966                 }
967         } else {
968                 switch (speed) {
969                 case SPEED_2500:
970                         ctrl |= priv->hw->link.speed2500;
971                         break;
972                 case SPEED_1000:
973                         ctrl |= priv->hw->link.speed1000;
974                         break;
975                 case SPEED_100:
976                         ctrl |= priv->hw->link.speed100;
977                         break;
978                 case SPEED_10:
979                         ctrl |= priv->hw->link.speed10;
980                         break;
981                 default:
982                         return;
983                 }
984         }
985
986         priv->speed = speed;
987
988         if (priv->plat->fix_mac_speed)
989                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
990
991         if (!duplex)
992                 ctrl &= ~priv->hw->link.duplex;
993         else
994                 ctrl |= priv->hw->link.duplex;
995
996         /* Flow Control operation */
997         if (tx_pause && rx_pause)
998                 stmmac_mac_flow_ctrl(priv, duplex);
999
1000         writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1001
1002         stmmac_mac_set(priv, priv->ioaddr, true);
1003         if (phy && priv->dma_cap.eee) {
1004                 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1005                 priv->eee_enabled = stmmac_eee_init(priv);
1006                 priv->tx_lpi_enabled = priv->eee_enabled;
1007                 stmmac_set_eee_pls(priv, priv->hw, true);
1008         }
1009 }
1010
1011 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1012         .validate = stmmac_validate,
1013         .mac_pcs_get_state = stmmac_mac_pcs_get_state,
1014         .mac_config = stmmac_mac_config,
1015         .mac_an_restart = stmmac_mac_an_restart,
1016         .mac_link_down = stmmac_mac_link_down,
1017         .mac_link_up = stmmac_mac_link_up,
1018 };
1019
1020 /**
1021  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1022  * @priv: driver private structure
1023  * Description: this is to verify if the HW supports the PCS.
1024  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1025  * configured for the TBI, RTBI, or SGMII PHY interface.
1026  */
1027 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1028 {
1029         int interface = priv->plat->interface;
1030
1031         if (priv->dma_cap.pcs) {
1032                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1033                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1034                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1035                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1036                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1037                         priv->hw->pcs = STMMAC_PCS_RGMII;
1038                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1039                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1040                         priv->hw->pcs = STMMAC_PCS_SGMII;
1041                 }
1042         }
1043 }
1044
1045 /**
1046  * stmmac_init_phy - PHY initialization
1047  * @dev: net device structure
1048  * Description: it initializes the driver's PHY state, and attaches the PHY
1049  * to the mac driver.
1050  *  Return value:
1051  *  0 on success
1052  */
1053 static int stmmac_init_phy(struct net_device *dev)
1054 {
1055         struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1056         struct stmmac_priv *priv = netdev_priv(dev);
1057         struct device_node *node;
1058         int ret;
1059
1060         node = priv->plat->phylink_node;
1061
1062         if (node)
1063                 ret = phylink_of_phy_connect(priv->phylink, node, 0);
1064
1065         /* Some DT bindings do not set-up the PHY handle. Let's try to
1066          * manually parse it
1067          */
1068         if (!node || ret) {
1069                 int addr = priv->plat->phy_addr;
1070                 struct phy_device *phydev;
1071
1072                 phydev = mdiobus_get_phy(priv->mii, addr);
1073                 if (!phydev) {
1074                         netdev_err(priv->dev, "no phy at addr %d\n", addr);
1075                         return -ENODEV;
1076                 }
1077
1078                 ret = phylink_connect_phy(priv->phylink, phydev);
1079         }
1080
1081         phylink_ethtool_get_wol(priv->phylink, &wol);
1082         device_set_wakeup_capable(priv->device, !!wol.supported);
1083
1084         return ret;
1085 }
1086
1087 static int stmmac_phy_setup(struct stmmac_priv *priv)
1088 {
1089         struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1090         int mode = priv->plat->phy_interface;
1091         struct phylink *phylink;
1092
1093         priv->phylink_config.dev = &priv->dev->dev;
1094         priv->phylink_config.type = PHYLINK_NETDEV;
1095         priv->phylink_config.pcs_poll = true;
1096
1097         if (!fwnode)
1098                 fwnode = dev_fwnode(priv->device);
1099
1100         phylink = phylink_create(&priv->phylink_config, fwnode,
1101                                  mode, &stmmac_phylink_mac_ops);
1102         if (IS_ERR(phylink))
1103                 return PTR_ERR(phylink);
1104
1105         priv->phylink = phylink;
1106         return 0;
1107 }
1108
1109 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1110 {
1111         u32 rx_cnt = priv->plat->rx_queues_to_use;
1112         void *head_rx;
1113         u32 queue;
1114
1115         /* Display RX rings */
1116         for (queue = 0; queue < rx_cnt; queue++) {
1117                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1118
1119                 pr_info("\tRX Queue %u rings\n", queue);
1120
1121                 if (priv->extend_desc)
1122                         head_rx = (void *)rx_q->dma_erx;
1123                 else
1124                         head_rx = (void *)rx_q->dma_rx;
1125
1126                 /* Display RX ring */
1127                 stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true);
1128         }
1129 }
1130
1131 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1132 {
1133         u32 tx_cnt = priv->plat->tx_queues_to_use;
1134         void *head_tx;
1135         u32 queue;
1136
1137         /* Display TX rings */
1138         for (queue = 0; queue < tx_cnt; queue++) {
1139                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1140
1141                 pr_info("\tTX Queue %d rings\n", queue);
1142
1143                 if (priv->extend_desc)
1144                         head_tx = (void *)tx_q->dma_etx;
1145                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1146                         head_tx = (void *)tx_q->dma_entx;
1147                 else
1148                         head_tx = (void *)tx_q->dma_tx;
1149
1150                 stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false);
1151         }
1152 }
1153
1154 static void stmmac_display_rings(struct stmmac_priv *priv)
1155 {
1156         /* Display RX ring */
1157         stmmac_display_rx_rings(priv);
1158
1159         /* Display TX ring */
1160         stmmac_display_tx_rings(priv);
1161 }
1162
1163 static int stmmac_set_bfsize(int mtu, int bufsize)
1164 {
1165         int ret = bufsize;
1166
1167         if (mtu >= BUF_SIZE_8KiB)
1168                 ret = BUF_SIZE_16KiB;
1169         else if (mtu >= BUF_SIZE_4KiB)
1170                 ret = BUF_SIZE_8KiB;
1171         else if (mtu >= BUF_SIZE_2KiB)
1172                 ret = BUF_SIZE_4KiB;
1173         else if (mtu > DEFAULT_BUFSIZE)
1174                 ret = BUF_SIZE_2KiB;
1175         else
1176                 ret = DEFAULT_BUFSIZE;
1177
1178         return ret;
1179 }
1180
1181 /**
1182  * stmmac_clear_rx_descriptors - clear RX descriptors
1183  * @priv: driver private structure
1184  * @queue: RX queue index
1185  * Description: this function is called to clear the RX descriptors
1186  * in case of both basic and extended descriptors are used.
1187  */
1188 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1189 {
1190         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1191         int i;
1192
1193         /* Clear the RX descriptors */
1194         for (i = 0; i < priv->dma_rx_size; i++)
1195                 if (priv->extend_desc)
1196                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1197                                         priv->use_riwt, priv->mode,
1198                                         (i == priv->dma_rx_size - 1),
1199                                         priv->dma_buf_sz);
1200                 else
1201                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1202                                         priv->use_riwt, priv->mode,
1203                                         (i == priv->dma_rx_size - 1),
1204                                         priv->dma_buf_sz);
1205 }
1206
1207 /**
1208  * stmmac_clear_tx_descriptors - clear tx descriptors
1209  * @priv: driver private structure
1210  * @queue: TX queue index.
1211  * Description: this function is called to clear the TX descriptors
1212  * in case of both basic and extended descriptors are used.
1213  */
1214 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1215 {
1216         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1217         int i;
1218
1219         /* Clear the TX descriptors */
1220         for (i = 0; i < priv->dma_tx_size; i++) {
1221                 int last = (i == (priv->dma_tx_size - 1));
1222                 struct dma_desc *p;
1223
1224                 if (priv->extend_desc)
1225                         p = &tx_q->dma_etx[i].basic;
1226                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1227                         p = &tx_q->dma_entx[i].basic;
1228                 else
1229                         p = &tx_q->dma_tx[i];
1230
1231                 stmmac_init_tx_desc(priv, p, priv->mode, last);
1232         }
1233 }
1234
1235 /**
1236  * stmmac_clear_descriptors - clear descriptors
1237  * @priv: driver private structure
1238  * Description: this function is called to clear the TX and RX descriptors
1239  * in case of both basic and extended descriptors are used.
1240  */
1241 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1242 {
1243         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1244         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1245         u32 queue;
1246
1247         /* Clear the RX descriptors */
1248         for (queue = 0; queue < rx_queue_cnt; queue++)
1249                 stmmac_clear_rx_descriptors(priv, queue);
1250
1251         /* Clear the TX descriptors */
1252         for (queue = 0; queue < tx_queue_cnt; queue++)
1253                 stmmac_clear_tx_descriptors(priv, queue);
1254 }
1255
1256 /**
1257  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1258  * @priv: driver private structure
1259  * @p: descriptor pointer
1260  * @i: descriptor index
1261  * @flags: gfp flag
1262  * @queue: RX queue index
1263  * Description: this function is called to allocate a receive buffer, perform
1264  * the DMA mapping and init the descriptor.
1265  */
1266 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1267                                   int i, gfp_t flags, u32 queue)
1268 {
1269         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1270         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1271
1272         buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1273         if (!buf->page)
1274                 return -ENOMEM;
1275
1276         if (priv->sph) {
1277                 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1278                 if (!buf->sec_page)
1279                         return -ENOMEM;
1280
1281                 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1282                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
1283         } else {
1284                 buf->sec_page = NULL;
1285         }
1286
1287         buf->addr = page_pool_get_dma_addr(buf->page);
1288         stmmac_set_desc_addr(priv, p, buf->addr);
1289         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1290                 stmmac_init_desc3(priv, p);
1291
1292         return 0;
1293 }
1294
1295 /**
1296  * stmmac_free_rx_buffer - free RX dma buffers
1297  * @priv: private structure
1298  * @queue: RX queue index
1299  * @i: buffer index.
1300  */
1301 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1302 {
1303         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1304         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1305
1306         if (buf->page)
1307                 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1308         buf->page = NULL;
1309
1310         if (buf->sec_page)
1311                 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1312         buf->sec_page = NULL;
1313 }
1314
1315 /**
1316  * stmmac_free_tx_buffer - free RX dma buffers
1317  * @priv: private structure
1318  * @queue: RX queue index
1319  * @i: buffer index.
1320  */
1321 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1322 {
1323         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1324
1325         if (tx_q->tx_skbuff_dma[i].buf) {
1326                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1327                         dma_unmap_page(priv->device,
1328                                        tx_q->tx_skbuff_dma[i].buf,
1329                                        tx_q->tx_skbuff_dma[i].len,
1330                                        DMA_TO_DEVICE);
1331                 else
1332                         dma_unmap_single(priv->device,
1333                                          tx_q->tx_skbuff_dma[i].buf,
1334                                          tx_q->tx_skbuff_dma[i].len,
1335                                          DMA_TO_DEVICE);
1336         }
1337
1338         if (tx_q->tx_skbuff[i]) {
1339                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1340                 tx_q->tx_skbuff[i] = NULL;
1341                 tx_q->tx_skbuff_dma[i].buf = 0;
1342                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1343         }
1344 }
1345
1346 /**
1347  * init_dma_rx_desc_rings - init the RX descriptor rings
1348  * @dev: net device structure
1349  * @flags: gfp flag.
1350  * Description: this function initializes the DMA RX descriptors
1351  * and allocates the socket buffers. It supports the chained and ring
1352  * modes.
1353  */
1354 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1355 {
1356         struct stmmac_priv *priv = netdev_priv(dev);
1357         u32 rx_count = priv->plat->rx_queues_to_use;
1358         int ret = -ENOMEM;
1359         int queue;
1360         int i;
1361
1362         /* RX INITIALIZATION */
1363         netif_dbg(priv, probe, priv->dev,
1364                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1365
1366         for (queue = 0; queue < rx_count; queue++) {
1367                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1368
1369                 netif_dbg(priv, probe, priv->dev,
1370                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1371                           (u32)rx_q->dma_rx_phy);
1372
1373                 stmmac_clear_rx_descriptors(priv, queue);
1374
1375                 for (i = 0; i < priv->dma_rx_size; i++) {
1376                         struct dma_desc *p;
1377
1378                         if (priv->extend_desc)
1379                                 p = &((rx_q->dma_erx + i)->basic);
1380                         else
1381                                 p = rx_q->dma_rx + i;
1382
1383                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1384                                                      queue);
1385                         if (ret)
1386                                 goto err_init_rx_buffers;
1387                 }
1388
1389                 rx_q->cur_rx = 0;
1390                 rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
1391
1392                 /* Setup the chained descriptor addresses */
1393                 if (priv->mode == STMMAC_CHAIN_MODE) {
1394                         if (priv->extend_desc)
1395                                 stmmac_mode_init(priv, rx_q->dma_erx,
1396                                                  rx_q->dma_rx_phy,
1397                                                  priv->dma_rx_size, 1);
1398                         else
1399                                 stmmac_mode_init(priv, rx_q->dma_rx,
1400                                                  rx_q->dma_rx_phy,
1401                                                  priv->dma_rx_size, 0);
1402                 }
1403         }
1404
1405         return 0;
1406
1407 err_init_rx_buffers:
1408         while (queue >= 0) {
1409                 while (--i >= 0)
1410                         stmmac_free_rx_buffer(priv, queue, i);
1411
1412                 if (queue == 0)
1413                         break;
1414
1415                 i = priv->dma_rx_size;
1416                 queue--;
1417         }
1418
1419         return ret;
1420 }
1421
1422 /**
1423  * init_dma_tx_desc_rings - init the TX descriptor rings
1424  * @dev: net device structure.
1425  * Description: this function initializes the DMA TX descriptors
1426  * and allocates the socket buffers. It supports the chained and ring
1427  * modes.
1428  */
1429 static int init_dma_tx_desc_rings(struct net_device *dev)
1430 {
1431         struct stmmac_priv *priv = netdev_priv(dev);
1432         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1433         u32 queue;
1434         int i;
1435
1436         for (queue = 0; queue < tx_queue_cnt; queue++) {
1437                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1438
1439                 netif_dbg(priv, probe, priv->dev,
1440                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1441                          (u32)tx_q->dma_tx_phy);
1442
1443                 /* Setup the chained descriptor addresses */
1444                 if (priv->mode == STMMAC_CHAIN_MODE) {
1445                         if (priv->extend_desc)
1446                                 stmmac_mode_init(priv, tx_q->dma_etx,
1447                                                  tx_q->dma_tx_phy,
1448                                                  priv->dma_tx_size, 1);
1449                         else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1450                                 stmmac_mode_init(priv, tx_q->dma_tx,
1451                                                  tx_q->dma_tx_phy,
1452                                                  priv->dma_tx_size, 0);
1453                 }
1454
1455                 for (i = 0; i < priv->dma_tx_size; i++) {
1456                         struct dma_desc *p;
1457                         if (priv->extend_desc)
1458                                 p = &((tx_q->dma_etx + i)->basic);
1459                         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1460                                 p = &((tx_q->dma_entx + i)->basic);
1461                         else
1462                                 p = tx_q->dma_tx + i;
1463
1464                         stmmac_clear_desc(priv, p);
1465
1466                         tx_q->tx_skbuff_dma[i].buf = 0;
1467                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1468                         tx_q->tx_skbuff_dma[i].len = 0;
1469                         tx_q->tx_skbuff_dma[i].last_segment = false;
1470                         tx_q->tx_skbuff[i] = NULL;
1471                 }
1472
1473                 tx_q->dirty_tx = 0;
1474                 tx_q->cur_tx = 0;
1475                 tx_q->mss = 0;
1476
1477                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1478         }
1479
1480         return 0;
1481 }
1482
1483 /**
1484  * init_dma_desc_rings - init the RX/TX descriptor rings
1485  * @dev: net device structure
1486  * @flags: gfp flag.
1487  * Description: this function initializes the DMA RX/TX descriptors
1488  * and allocates the socket buffers. It supports the chained and ring
1489  * modes.
1490  */
1491 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1492 {
1493         struct stmmac_priv *priv = netdev_priv(dev);
1494         int ret;
1495
1496         ret = init_dma_rx_desc_rings(dev, flags);
1497         if (ret)
1498                 return ret;
1499
1500         ret = init_dma_tx_desc_rings(dev);
1501
1502         stmmac_clear_descriptors(priv);
1503
1504         if (netif_msg_hw(priv))
1505                 stmmac_display_rings(priv);
1506
1507         return ret;
1508 }
1509
1510 /**
1511  * dma_free_rx_skbufs - free RX dma buffers
1512  * @priv: private structure
1513  * @queue: RX queue index
1514  */
1515 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1516 {
1517         int i;
1518
1519         for (i = 0; i < priv->dma_rx_size; i++)
1520                 stmmac_free_rx_buffer(priv, queue, i);
1521 }
1522
1523 /**
1524  * dma_free_tx_skbufs - free TX dma buffers
1525  * @priv: private structure
1526  * @queue: TX queue index
1527  */
1528 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1529 {
1530         int i;
1531
1532         for (i = 0; i < priv->dma_tx_size; i++)
1533                 stmmac_free_tx_buffer(priv, queue, i);
1534 }
1535
1536 /**
1537  * free_dma_rx_desc_resources - free RX dma desc resources
1538  * @priv: private structure
1539  */
1540 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1541 {
1542         u32 rx_count = priv->plat->rx_queues_to_use;
1543         u32 queue;
1544
1545         /* Free RX queue resources */
1546         for (queue = 0; queue < rx_count; queue++) {
1547                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1548
1549                 /* Release the DMA RX socket buffers */
1550                 dma_free_rx_skbufs(priv, queue);
1551
1552                 /* Free DMA regions of consistent memory previously allocated */
1553                 if (!priv->extend_desc)
1554                         dma_free_coherent(priv->device, priv->dma_rx_size *
1555                                           sizeof(struct dma_desc),
1556                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1557                 else
1558                         dma_free_coherent(priv->device, priv->dma_rx_size *
1559                                           sizeof(struct dma_extended_desc),
1560                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1561
1562                 kfree(rx_q->buf_pool);
1563                 if (rx_q->page_pool)
1564                         page_pool_destroy(rx_q->page_pool);
1565         }
1566 }
1567
1568 /**
1569  * free_dma_tx_desc_resources - free TX dma desc resources
1570  * @priv: private structure
1571  */
1572 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1573 {
1574         u32 tx_count = priv->plat->tx_queues_to_use;
1575         u32 queue;
1576
1577         /* Free TX queue resources */
1578         for (queue = 0; queue < tx_count; queue++) {
1579                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1580                 size_t size;
1581                 void *addr;
1582
1583                 /* Release the DMA TX socket buffers */
1584                 dma_free_tx_skbufs(priv, queue);
1585
1586                 if (priv->extend_desc) {
1587                         size = sizeof(struct dma_extended_desc);
1588                         addr = tx_q->dma_etx;
1589                 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1590                         size = sizeof(struct dma_edesc);
1591                         addr = tx_q->dma_entx;
1592                 } else {
1593                         size = sizeof(struct dma_desc);
1594                         addr = tx_q->dma_tx;
1595                 }
1596
1597                 size *= priv->dma_tx_size;
1598
1599                 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1600
1601                 kfree(tx_q->tx_skbuff_dma);
1602                 kfree(tx_q->tx_skbuff);
1603         }
1604 }
1605
1606 /**
1607  * alloc_dma_rx_desc_resources - alloc RX resources.
1608  * @priv: private structure
1609  * Description: according to which descriptor can be used (extend or basic)
1610  * this function allocates the resources for TX and RX paths. In case of
1611  * reception, for example, it pre-allocated the RX socket buffer in order to
1612  * allow zero-copy mechanism.
1613  */
1614 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1615 {
1616         u32 rx_count = priv->plat->rx_queues_to_use;
1617         int ret = -ENOMEM;
1618         u32 queue;
1619
1620         /* RX queues buffers and DMA */
1621         for (queue = 0; queue < rx_count; queue++) {
1622                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1623                 struct page_pool_params pp_params = { 0 };
1624                 unsigned int num_pages;
1625
1626                 rx_q->queue_index = queue;
1627                 rx_q->priv_data = priv;
1628
1629                 pp_params.flags = PP_FLAG_DMA_MAP;
1630                 pp_params.pool_size = priv->dma_rx_size;
1631                 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1632                 pp_params.order = ilog2(num_pages);
1633                 pp_params.nid = dev_to_node(priv->device);
1634                 pp_params.dev = priv->device;
1635                 pp_params.dma_dir = DMA_FROM_DEVICE;
1636
1637                 rx_q->page_pool = page_pool_create(&pp_params);
1638                 if (IS_ERR(rx_q->page_pool)) {
1639                         ret = PTR_ERR(rx_q->page_pool);
1640                         rx_q->page_pool = NULL;
1641                         goto err_dma;
1642                 }
1643
1644                 rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1645                                          sizeof(*rx_q->buf_pool),
1646                                          GFP_KERNEL);
1647                 if (!rx_q->buf_pool)
1648                         goto err_dma;
1649
1650                 if (priv->extend_desc) {
1651                         rx_q->dma_erx = dma_alloc_coherent(priv->device,
1652                                                            priv->dma_rx_size *
1653                                                            sizeof(struct dma_extended_desc),
1654                                                            &rx_q->dma_rx_phy,
1655                                                            GFP_KERNEL);
1656                         if (!rx_q->dma_erx)
1657                                 goto err_dma;
1658
1659                 } else {
1660                         rx_q->dma_rx = dma_alloc_coherent(priv->device,
1661                                                           priv->dma_rx_size *
1662                                                           sizeof(struct dma_desc),
1663                                                           &rx_q->dma_rx_phy,
1664                                                           GFP_KERNEL);
1665                         if (!rx_q->dma_rx)
1666                                 goto err_dma;
1667                 }
1668         }
1669
1670         return 0;
1671
1672 err_dma:
1673         free_dma_rx_desc_resources(priv);
1674
1675         return ret;
1676 }
1677
1678 /**
1679  * alloc_dma_tx_desc_resources - alloc TX resources.
1680  * @priv: private structure
1681  * Description: according to which descriptor can be used (extend or basic)
1682  * this function allocates the resources for TX and RX paths. In case of
1683  * reception, for example, it pre-allocated the RX socket buffer in order to
1684  * allow zero-copy mechanism.
1685  */
1686 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1687 {
1688         u32 tx_count = priv->plat->tx_queues_to_use;
1689         int ret = -ENOMEM;
1690         u32 queue;
1691
1692         /* TX queues buffers and DMA */
1693         for (queue = 0; queue < tx_count; queue++) {
1694                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1695                 size_t size;
1696                 void *addr;
1697
1698                 tx_q->queue_index = queue;
1699                 tx_q->priv_data = priv;
1700
1701                 tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1702                                               sizeof(*tx_q->tx_skbuff_dma),
1703                                               GFP_KERNEL);
1704                 if (!tx_q->tx_skbuff_dma)
1705                         goto err_dma;
1706
1707                 tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1708                                           sizeof(struct sk_buff *),
1709                                           GFP_KERNEL);
1710                 if (!tx_q->tx_skbuff)
1711                         goto err_dma;
1712
1713                 if (priv->extend_desc)
1714                         size = sizeof(struct dma_extended_desc);
1715                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1716                         size = sizeof(struct dma_edesc);
1717                 else
1718                         size = sizeof(struct dma_desc);
1719
1720                 size *= priv->dma_tx_size;
1721
1722                 addr = dma_alloc_coherent(priv->device, size,
1723                                           &tx_q->dma_tx_phy, GFP_KERNEL);
1724                 if (!addr)
1725                         goto err_dma;
1726
1727                 if (priv->extend_desc)
1728                         tx_q->dma_etx = addr;
1729                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1730                         tx_q->dma_entx = addr;
1731                 else
1732                         tx_q->dma_tx = addr;
1733         }
1734
1735         return 0;
1736
1737 err_dma:
1738         free_dma_tx_desc_resources(priv);
1739         return ret;
1740 }
1741
1742 /**
1743  * alloc_dma_desc_resources - alloc TX/RX resources.
1744  * @priv: private structure
1745  * Description: according to which descriptor can be used (extend or basic)
1746  * this function allocates the resources for TX and RX paths. In case of
1747  * reception, for example, it pre-allocated the RX socket buffer in order to
1748  * allow zero-copy mechanism.
1749  */
1750 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1751 {
1752         /* RX Allocation */
1753         int ret = alloc_dma_rx_desc_resources(priv);
1754
1755         if (ret)
1756                 return ret;
1757
1758         ret = alloc_dma_tx_desc_resources(priv);
1759
1760         return ret;
1761 }
1762
1763 /**
1764  * free_dma_desc_resources - free dma desc resources
1765  * @priv: private structure
1766  */
1767 static void free_dma_desc_resources(struct stmmac_priv *priv)
1768 {
1769         /* Release the DMA RX socket buffers */
1770         free_dma_rx_desc_resources(priv);
1771
1772         /* Release the DMA TX socket buffers */
1773         free_dma_tx_desc_resources(priv);
1774 }
1775
1776 /**
1777  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1778  *  @priv: driver private structure
1779  *  Description: It is used for enabling the rx queues in the MAC
1780  */
1781 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1782 {
1783         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1784         int queue;
1785         u8 mode;
1786
1787         for (queue = 0; queue < rx_queues_count; queue++) {
1788                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1789                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1790         }
1791 }
1792
1793 /**
1794  * stmmac_start_rx_dma - start RX DMA channel
1795  * @priv: driver private structure
1796  * @chan: RX channel index
1797  * Description:
1798  * This starts a RX DMA channel
1799  */
1800 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1801 {
1802         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1803         stmmac_start_rx(priv, priv->ioaddr, chan);
1804 }
1805
1806 /**
1807  * stmmac_start_tx_dma - start TX DMA channel
1808  * @priv: driver private structure
1809  * @chan: TX channel index
1810  * Description:
1811  * This starts a TX DMA channel
1812  */
1813 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1814 {
1815         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1816         stmmac_start_tx(priv, priv->ioaddr, chan);
1817 }
1818
1819 /**
1820  * stmmac_stop_rx_dma - stop RX DMA channel
1821  * @priv: driver private structure
1822  * @chan: RX channel index
1823  * Description:
1824  * This stops a RX DMA channel
1825  */
1826 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1827 {
1828         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1829         stmmac_stop_rx(priv, priv->ioaddr, chan);
1830 }
1831
1832 /**
1833  * stmmac_stop_tx_dma - stop TX DMA channel
1834  * @priv: driver private structure
1835  * @chan: TX channel index
1836  * Description:
1837  * This stops a TX DMA channel
1838  */
1839 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1840 {
1841         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1842         stmmac_stop_tx(priv, priv->ioaddr, chan);
1843 }
1844
1845 /**
1846  * stmmac_start_all_dma - start all RX and TX DMA channels
1847  * @priv: driver private structure
1848  * Description:
1849  * This starts all the RX and TX DMA channels
1850  */
1851 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1852 {
1853         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1854         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1855         u32 chan = 0;
1856
1857         for (chan = 0; chan < rx_channels_count; chan++)
1858                 stmmac_start_rx_dma(priv, chan);
1859
1860         for (chan = 0; chan < tx_channels_count; chan++)
1861                 stmmac_start_tx_dma(priv, chan);
1862 }
1863
1864 /**
1865  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1866  * @priv: driver private structure
1867  * Description:
1868  * This stops the RX and TX DMA channels
1869  */
1870 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1871 {
1872         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1873         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1874         u32 chan = 0;
1875
1876         for (chan = 0; chan < rx_channels_count; chan++)
1877                 stmmac_stop_rx_dma(priv, chan);
1878
1879         for (chan = 0; chan < tx_channels_count; chan++)
1880                 stmmac_stop_tx_dma(priv, chan);
1881 }
1882
1883 /**
1884  *  stmmac_dma_operation_mode - HW DMA operation mode
1885  *  @priv: driver private structure
1886  *  Description: it is used for configuring the DMA operation mode register in
1887  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1888  */
1889 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1890 {
1891         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1892         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1893         int rxfifosz = priv->plat->rx_fifo_size;
1894         int txfifosz = priv->plat->tx_fifo_size;
1895         u32 txmode = 0;
1896         u32 rxmode = 0;
1897         u32 chan = 0;
1898         u8 qmode = 0;
1899
1900         if (rxfifosz == 0)
1901                 rxfifosz = priv->dma_cap.rx_fifo_size;
1902         if (txfifosz == 0)
1903                 txfifosz = priv->dma_cap.tx_fifo_size;
1904
1905         /* Adjust for real per queue fifo size */
1906         rxfifosz /= rx_channels_count;
1907         txfifosz /= tx_channels_count;
1908
1909         if (priv->plat->force_thresh_dma_mode) {
1910                 txmode = tc;
1911                 rxmode = tc;
1912         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1913                 /*
1914                  * In case of GMAC, SF mode can be enabled
1915                  * to perform the TX COE in HW. This depends on:
1916                  * 1) TX COE if actually supported
1917                  * 2) There is no bugged Jumbo frame support
1918                  *    that needs to not insert csum in the TDES.
1919                  */
1920                 txmode = SF_DMA_MODE;
1921                 rxmode = SF_DMA_MODE;
1922                 priv->xstats.threshold = SF_DMA_MODE;
1923         } else {
1924                 txmode = tc;
1925                 rxmode = SF_DMA_MODE;
1926         }
1927
1928         /* configure all channels */
1929         for (chan = 0; chan < rx_channels_count; chan++) {
1930                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1931
1932                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1933                                 rxfifosz, qmode);
1934                 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1935                                 chan);
1936         }
1937
1938         for (chan = 0; chan < tx_channels_count; chan++) {
1939                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1940
1941                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1942                                 txfifosz, qmode);
1943         }
1944 }
1945
1946 /**
1947  * stmmac_tx_clean - to manage the transmission completion
1948  * @priv: driver private structure
1949  * @budget: napi budget limiting this functions packet handling
1950  * @queue: TX queue index
1951  * Description: it reclaims the transmit resources after transmission completes.
1952  */
1953 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1954 {
1955         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1956         unsigned int bytes_compl = 0, pkts_compl = 0;
1957         unsigned int entry, count = 0;
1958
1959         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1960
1961         priv->xstats.tx_clean++;
1962
1963         entry = tx_q->dirty_tx;
1964         while ((entry != tx_q->cur_tx) && (count < budget)) {
1965                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1966                 struct dma_desc *p;
1967                 int status;
1968
1969                 if (priv->extend_desc)
1970                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1971                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1972                         p = &tx_q->dma_entx[entry].basic;
1973                 else
1974                         p = tx_q->dma_tx + entry;
1975
1976                 status = stmmac_tx_status(priv, &priv->dev->stats,
1977                                 &priv->xstats, p, priv->ioaddr);
1978                 /* Check if the descriptor is owned by the DMA */
1979                 if (unlikely(status & tx_dma_own))
1980                         break;
1981
1982                 count++;
1983
1984                 /* Make sure descriptor fields are read after reading
1985                  * the own bit.
1986                  */
1987                 dma_rmb();
1988
1989                 /* Just consider the last segment and ...*/
1990                 if (likely(!(status & tx_not_ls))) {
1991                         /* ... verify the status error condition */
1992                         if (unlikely(status & tx_err)) {
1993                                 priv->dev->stats.tx_errors++;
1994                         } else {
1995                                 priv->dev->stats.tx_packets++;
1996                                 priv->xstats.tx_pkt_n++;
1997                         }
1998                         stmmac_get_tx_hwtstamp(priv, p, skb);
1999                 }
2000
2001                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
2002                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
2003                                 dma_unmap_page(priv->device,
2004                                                tx_q->tx_skbuff_dma[entry].buf,
2005                                                tx_q->tx_skbuff_dma[entry].len,
2006                                                DMA_TO_DEVICE);
2007                         else
2008                                 dma_unmap_single(priv->device,
2009                                                  tx_q->tx_skbuff_dma[entry].buf,
2010                                                  tx_q->tx_skbuff_dma[entry].len,
2011                                                  DMA_TO_DEVICE);
2012                         tx_q->tx_skbuff_dma[entry].buf = 0;
2013                         tx_q->tx_skbuff_dma[entry].len = 0;
2014                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
2015                 }
2016
2017                 stmmac_clean_desc3(priv, tx_q, p);
2018
2019                 tx_q->tx_skbuff_dma[entry].last_segment = false;
2020                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2021
2022                 if (likely(skb != NULL)) {
2023                         pkts_compl++;
2024                         bytes_compl += skb->len;
2025                         dev_consume_skb_any(skb);
2026                         tx_q->tx_skbuff[entry] = NULL;
2027                 }
2028
2029                 stmmac_release_tx_desc(priv, p, priv->mode);
2030
2031                 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2032         }
2033         tx_q->dirty_tx = entry;
2034
2035         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2036                                   pkts_compl, bytes_compl);
2037
2038         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2039                                                                 queue))) &&
2040             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2041
2042                 netif_dbg(priv, tx_done, priv->dev,
2043                           "%s: restart transmit\n", __func__);
2044                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2045         }
2046
2047         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
2048                 stmmac_enable_eee_mode(priv);
2049                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2050         }
2051
2052         /* We still have pending packets, let's call for a new scheduling */
2053         if (tx_q->dirty_tx != tx_q->cur_tx)
2054                 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2055
2056         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2057
2058         return count;
2059 }
2060
2061 /**
2062  * stmmac_tx_err - to manage the tx error
2063  * @priv: driver private structure
2064  * @chan: channel index
2065  * Description: it cleans the descriptors and restarts the transmission
2066  * in case of transmission errors.
2067  */
2068 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2069 {
2070         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2071
2072         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2073
2074         stmmac_stop_tx_dma(priv, chan);
2075         dma_free_tx_skbufs(priv, chan);
2076         stmmac_clear_tx_descriptors(priv, chan);
2077         tx_q->dirty_tx = 0;
2078         tx_q->cur_tx = 0;
2079         tx_q->mss = 0;
2080         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2081         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2082                             tx_q->dma_tx_phy, chan);
2083         stmmac_start_tx_dma(priv, chan);
2084
2085         priv->dev->stats.tx_errors++;
2086         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2087 }
2088
2089 /**
2090  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2091  *  @priv: driver private structure
2092  *  @txmode: TX operating mode
2093  *  @rxmode: RX operating mode
2094  *  @chan: channel index
2095  *  Description: it is used for configuring of the DMA operation mode in
2096  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2097  *  mode.
2098  */
2099 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2100                                           u32 rxmode, u32 chan)
2101 {
2102         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2103         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2104         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2105         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2106         int rxfifosz = priv->plat->rx_fifo_size;
2107         int txfifosz = priv->plat->tx_fifo_size;
2108
2109         if (rxfifosz == 0)
2110                 rxfifosz = priv->dma_cap.rx_fifo_size;
2111         if (txfifosz == 0)
2112                 txfifosz = priv->dma_cap.tx_fifo_size;
2113
2114         /* Adjust for real per queue fifo size */
2115         rxfifosz /= rx_channels_count;
2116         txfifosz /= tx_channels_count;
2117
2118         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2119         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2120 }
2121
2122 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2123 {
2124         int ret;
2125
2126         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2127                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2128         if (ret && (ret != -EINVAL)) {
2129                 stmmac_global_err(priv);
2130                 return true;
2131         }
2132
2133         return false;
2134 }
2135
2136 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2137 {
2138         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2139                                                  &priv->xstats, chan);
2140         struct stmmac_channel *ch = &priv->channel[chan];
2141         unsigned long flags;
2142
2143         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2144                 if (napi_schedule_prep(&ch->rx_napi)) {
2145                         spin_lock_irqsave(&ch->lock, flags);
2146                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2147                         spin_unlock_irqrestore(&ch->lock, flags);
2148                         __napi_schedule_irqoff(&ch->rx_napi);
2149                 }
2150         }
2151
2152         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2153                 if (napi_schedule_prep(&ch->tx_napi)) {
2154                         spin_lock_irqsave(&ch->lock, flags);
2155                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2156                         spin_unlock_irqrestore(&ch->lock, flags);
2157                         __napi_schedule_irqoff(&ch->tx_napi);
2158                 }
2159         }
2160
2161         return status;
2162 }
2163
2164 /**
2165  * stmmac_dma_interrupt - DMA ISR
2166  * @priv: driver private structure
2167  * Description: this is the DMA ISR. It is called by the main ISR.
2168  * It calls the dwmac dma routine and schedule poll method in case of some
2169  * work can be done.
2170  */
2171 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2172 {
2173         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2174         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2175         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2176                                 tx_channel_count : rx_channel_count;
2177         u32 chan;
2178         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2179
2180         /* Make sure we never check beyond our status buffer. */
2181         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2182                 channels_to_check = ARRAY_SIZE(status);
2183
2184         for (chan = 0; chan < channels_to_check; chan++)
2185                 status[chan] = stmmac_napi_check(priv, chan);
2186
2187         for (chan = 0; chan < tx_channel_count; chan++) {
2188                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2189                         /* Try to bump up the dma threshold on this failure */
2190                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2191                             (tc <= 256)) {
2192                                 tc += 64;
2193                                 if (priv->plat->force_thresh_dma_mode)
2194                                         stmmac_set_dma_operation_mode(priv,
2195                                                                       tc,
2196                                                                       tc,
2197                                                                       chan);
2198                                 else
2199                                         stmmac_set_dma_operation_mode(priv,
2200                                                                     tc,
2201                                                                     SF_DMA_MODE,
2202                                                                     chan);
2203                                 priv->xstats.threshold = tc;
2204                         }
2205                 } else if (unlikely(status[chan] == tx_hard_error)) {
2206                         stmmac_tx_err(priv, chan);
2207                 }
2208         }
2209 }
2210
2211 /**
2212  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2213  * @priv: driver private structure
2214  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2215  */
2216 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2217 {
2218         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2219                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2220
2221         stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2222
2223         if (priv->dma_cap.rmon) {
2224                 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2225                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2226         } else
2227                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2228 }
2229
2230 /**
2231  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2232  * @priv: driver private structure
2233  * Description:
2234  *  new GMAC chip generations have a new register to indicate the
2235  *  presence of the optional feature/functions.
2236  *  This can be also used to override the value passed through the
2237  *  platform and necessary for old MAC10/100 and GMAC chips.
2238  */
2239 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2240 {
2241         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2242 }
2243
2244 /**
2245  * stmmac_check_ether_addr - check if the MAC addr is valid
2246  * @priv: driver private structure
2247  * Description:
2248  * it is to verify if the MAC address is valid, in case of failures it
2249  * generates a random MAC address
2250  */
2251 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2252 {
2253         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2254                 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2255                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2256                         eth_hw_addr_random(priv->dev);
2257                 dev_info(priv->device, "device MAC address %pM\n",
2258                          priv->dev->dev_addr);
2259         }
2260 }
2261
2262 /**
2263  * stmmac_init_dma_engine - DMA init.
2264  * @priv: driver private structure
2265  * Description:
2266  * It inits the DMA invoking the specific MAC/GMAC callback.
2267  * Some DMA parameters can be passed from the platform;
2268  * in case of these are not passed a default is kept for the MAC or GMAC.
2269  */
2270 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2271 {
2272         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2273         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2274         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2275         struct stmmac_rx_queue *rx_q;
2276         struct stmmac_tx_queue *tx_q;
2277         u32 chan = 0;
2278         int atds = 0;
2279         int ret = 0;
2280
2281         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2282                 dev_err(priv->device, "Invalid DMA configuration\n");
2283                 return -EINVAL;
2284         }
2285
2286         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2287                 atds = 1;
2288
2289         ret = stmmac_reset(priv, priv->ioaddr);
2290         if (ret) {
2291                 dev_err(priv->device, "Failed to reset the dma\n");
2292                 return ret;
2293         }
2294
2295         /* DMA Configuration */
2296         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2297
2298         if (priv->plat->axi)
2299                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2300
2301         /* DMA CSR Channel configuration */
2302         for (chan = 0; chan < dma_csr_ch; chan++)
2303                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2304
2305         /* DMA RX Channel Configuration */
2306         for (chan = 0; chan < rx_channels_count; chan++) {
2307                 rx_q = &priv->rx_queue[chan];
2308
2309                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2310                                     rx_q->dma_rx_phy, chan);
2311
2312                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2313                                      (priv->dma_rx_size *
2314                                       sizeof(struct dma_desc));
2315                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2316                                        rx_q->rx_tail_addr, chan);
2317         }
2318
2319         /* DMA TX Channel Configuration */
2320         for (chan = 0; chan < tx_channels_count; chan++) {
2321                 tx_q = &priv->tx_queue[chan];
2322
2323                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2324                                     tx_q->dma_tx_phy, chan);
2325
2326                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2327                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2328                                        tx_q->tx_tail_addr, chan);
2329         }
2330
2331         return ret;
2332 }
2333
2334 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2335 {
2336         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2337
2338         mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2339 }
2340
2341 /**
2342  * stmmac_tx_timer - mitigation sw timer for tx.
2343  * @t: data pointer
2344  * Description:
2345  * This is the timer handler to directly invoke the stmmac_tx_clean.
2346  */
2347 static void stmmac_tx_timer(struct timer_list *t)
2348 {
2349         struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2350         struct stmmac_priv *priv = tx_q->priv_data;
2351         struct stmmac_channel *ch;
2352
2353         ch = &priv->channel[tx_q->queue_index];
2354
2355         if (likely(napi_schedule_prep(&ch->tx_napi))) {
2356                 unsigned long flags;
2357
2358                 spin_lock_irqsave(&ch->lock, flags);
2359                 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2360                 spin_unlock_irqrestore(&ch->lock, flags);
2361                 __napi_schedule(&ch->tx_napi);
2362         }
2363 }
2364
2365 /**
2366  * stmmac_init_coalesce - init mitigation options.
2367  * @priv: driver private structure
2368  * Description:
2369  * This inits the coalesce parameters: i.e. timer rate,
2370  * timer handler and default threshold used for enabling the
2371  * interrupt on completion bit.
2372  */
2373 static void stmmac_init_coalesce(struct stmmac_priv *priv)
2374 {
2375         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2376         u32 chan;
2377
2378         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2379         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2380         priv->rx_coal_frames = STMMAC_RX_FRAMES;
2381
2382         for (chan = 0; chan < tx_channel_count; chan++) {
2383                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2384
2385                 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2386         }
2387 }
2388
2389 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2390 {
2391         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2392         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2393         u32 chan;
2394
2395         /* set TX ring length */
2396         for (chan = 0; chan < tx_channels_count; chan++)
2397                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2398                                        (priv->dma_tx_size - 1), chan);
2399
2400         /* set RX ring length */
2401         for (chan = 0; chan < rx_channels_count; chan++)
2402                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2403                                        (priv->dma_rx_size - 1), chan);
2404 }
2405
2406 /**
2407  *  stmmac_set_tx_queue_weight - Set TX queue weight
2408  *  @priv: driver private structure
2409  *  Description: It is used for setting TX queues weight
2410  */
2411 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2412 {
2413         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2414         u32 weight;
2415         u32 queue;
2416
2417         for (queue = 0; queue < tx_queues_count; queue++) {
2418                 weight = priv->plat->tx_queues_cfg[queue].weight;
2419                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2420         }
2421 }
2422
2423 /**
2424  *  stmmac_configure_cbs - Configure CBS in TX queue
2425  *  @priv: driver private structure
2426  *  Description: It is used for configuring CBS in AVB TX queues
2427  */
2428 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2429 {
2430         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2431         u32 mode_to_use;
2432         u32 queue;
2433
2434         /* queue 0 is reserved for legacy traffic */
2435         for (queue = 1; queue < tx_queues_count; queue++) {
2436                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2437                 if (mode_to_use == MTL_QUEUE_DCB)
2438                         continue;
2439
2440                 stmmac_config_cbs(priv, priv->hw,
2441                                 priv->plat->tx_queues_cfg[queue].send_slope,
2442                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2443                                 priv->plat->tx_queues_cfg[queue].high_credit,
2444                                 priv->plat->tx_queues_cfg[queue].low_credit,
2445                                 queue);
2446         }
2447 }
2448
2449 /**
2450  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2451  *  @priv: driver private structure
2452  *  Description: It is used for mapping RX queues to RX dma channels
2453  */
2454 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2455 {
2456         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2457         u32 queue;
2458         u32 chan;
2459
2460         for (queue = 0; queue < rx_queues_count; queue++) {
2461                 chan = priv->plat->rx_queues_cfg[queue].chan;
2462                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2463         }
2464 }
2465
2466 /**
2467  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2468  *  @priv: driver private structure
2469  *  Description: It is used for configuring the RX Queue Priority
2470  */
2471 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2472 {
2473         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2474         u32 queue;
2475         u32 prio;
2476
2477         for (queue = 0; queue < rx_queues_count; queue++) {
2478                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2479                         continue;
2480
2481                 prio = priv->plat->rx_queues_cfg[queue].prio;
2482                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2483         }
2484 }
2485
2486 /**
2487  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2488  *  @priv: driver private structure
2489  *  Description: It is used for configuring the TX Queue Priority
2490  */
2491 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2492 {
2493         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2494         u32 queue;
2495         u32 prio;
2496
2497         for (queue = 0; queue < tx_queues_count; queue++) {
2498                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2499                         continue;
2500
2501                 prio = priv->plat->tx_queues_cfg[queue].prio;
2502                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2503         }
2504 }
2505
2506 /**
2507  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2508  *  @priv: driver private structure
2509  *  Description: It is used for configuring the RX queue routing
2510  */
2511 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2512 {
2513         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2514         u32 queue;
2515         u8 packet;
2516
2517         for (queue = 0; queue < rx_queues_count; queue++) {
2518                 /* no specific packet type routing specified for the queue */
2519                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2520                         continue;
2521
2522                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2523                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2524         }
2525 }
2526
2527 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2528 {
2529         if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2530                 priv->rss.enable = false;
2531                 return;
2532         }
2533
2534         if (priv->dev->features & NETIF_F_RXHASH)
2535                 priv->rss.enable = true;
2536         else
2537                 priv->rss.enable = false;
2538
2539         stmmac_rss_configure(priv, priv->hw, &priv->rss,
2540                              priv->plat->rx_queues_to_use);
2541 }
2542
2543 /**
2544  *  stmmac_mtl_configuration - Configure MTL
2545  *  @priv: driver private structure
2546  *  Description: It is used for configurring MTL
2547  */
2548 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2549 {
2550         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2551         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2552
2553         if (tx_queues_count > 1)
2554                 stmmac_set_tx_queue_weight(priv);
2555
2556         /* Configure MTL RX algorithms */
2557         if (rx_queues_count > 1)
2558                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2559                                 priv->plat->rx_sched_algorithm);
2560
2561         /* Configure MTL TX algorithms */
2562         if (tx_queues_count > 1)
2563                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2564                                 priv->plat->tx_sched_algorithm);
2565
2566         /* Configure CBS in AVB TX queues */
2567         if (tx_queues_count > 1)
2568                 stmmac_configure_cbs(priv);
2569
2570         /* Map RX MTL to DMA channels */
2571         stmmac_rx_queue_dma_chan_map(priv);
2572
2573         /* Enable MAC RX Queues */
2574         stmmac_mac_enable_rx_queues(priv);
2575
2576         /* Set RX priorities */
2577         if (rx_queues_count > 1)
2578                 stmmac_mac_config_rx_queues_prio(priv);
2579
2580         /* Set TX priorities */
2581         if (tx_queues_count > 1)
2582                 stmmac_mac_config_tx_queues_prio(priv);
2583
2584         /* Set RX routing */
2585         if (rx_queues_count > 1)
2586                 stmmac_mac_config_rx_queues_routing(priv);
2587
2588         /* Receive Side Scaling */
2589         if (rx_queues_count > 1)
2590                 stmmac_mac_config_rss(priv);
2591 }
2592
2593 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2594 {
2595         if (priv->dma_cap.asp) {
2596                 netdev_info(priv->dev, "Enabling Safety Features\n");
2597                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2598         } else {
2599                 netdev_info(priv->dev, "No Safety Features support found\n");
2600         }
2601 }
2602
2603 /**
2604  * stmmac_hw_setup - setup mac in a usable state.
2605  *  @dev : pointer to the device structure.
2606  *  @init_ptp: initialize PTP if set
2607  *  Description:
2608  *  this is the main function to setup the HW in a usable state because the
2609  *  dma engine is reset, the core registers are configured (e.g. AXI,
2610  *  Checksum features, timers). The DMA is ready to start receiving and
2611  *  transmitting.
2612  *  Return value:
2613  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2614  *  file on failure.
2615  */
2616 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2617 {
2618         struct stmmac_priv *priv = netdev_priv(dev);
2619         u32 rx_cnt = priv->plat->rx_queues_to_use;
2620         u32 tx_cnt = priv->plat->tx_queues_to_use;
2621         u32 chan;
2622         int ret;
2623
2624         /* DMA initialization and SW reset */
2625         ret = stmmac_init_dma_engine(priv);
2626         if (ret < 0) {
2627                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2628                            __func__);
2629                 return ret;
2630         }
2631
2632         /* Copy the MAC addr into the HW  */
2633         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2634
2635         /* PS and related bits will be programmed according to the speed */
2636         if (priv->hw->pcs) {
2637                 int speed = priv->plat->mac_port_sel_speed;
2638
2639                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2640                     (speed == SPEED_1000)) {
2641                         priv->hw->ps = speed;
2642                 } else {
2643                         dev_warn(priv->device, "invalid port speed\n");
2644                         priv->hw->ps = 0;
2645                 }
2646         }
2647
2648         /* Initialize the MAC Core */
2649         stmmac_core_init(priv, priv->hw, dev);
2650
2651         /* Initialize MTL*/
2652         stmmac_mtl_configuration(priv);
2653
2654         /* Initialize Safety Features */
2655         stmmac_safety_feat_configuration(priv);
2656
2657         ret = stmmac_rx_ipc(priv, priv->hw);
2658         if (!ret) {
2659                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2660                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2661                 priv->hw->rx_csum = 0;
2662         }
2663
2664         /* Enable the MAC Rx/Tx */
2665         stmmac_mac_set(priv, priv->ioaddr, true);
2666
2667         /* Set the HW DMA mode and the COE */
2668         stmmac_dma_operation_mode(priv);
2669
2670         stmmac_mmc_setup(priv);
2671
2672         if (init_ptp) {
2673                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2674                 if (ret < 0)
2675                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2676
2677                 ret = stmmac_init_ptp(priv);
2678                 if (ret == -EOPNOTSUPP)
2679                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2680                 else if (ret)
2681                         netdev_warn(priv->dev, "PTP init failed\n");
2682         }
2683
2684         priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2685
2686         /* Convert the timer from msec to usec */
2687         if (!priv->tx_lpi_timer)
2688                 priv->tx_lpi_timer = eee_timer * 1000;
2689
2690         if (priv->use_riwt) {
2691                 if (!priv->rx_riwt)
2692                         priv->rx_riwt = DEF_DMA_RIWT;
2693
2694                 ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
2695         }
2696
2697         if (priv->hw->pcs)
2698                 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2699
2700         /* set TX and RX rings length */
2701         stmmac_set_rings_length(priv);
2702
2703         /* Enable TSO */
2704         if (priv->tso) {
2705                 for (chan = 0; chan < tx_cnt; chan++)
2706                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2707         }
2708
2709         /* Enable Split Header */
2710         if (priv->sph && priv->hw->rx_csum) {
2711                 for (chan = 0; chan < rx_cnt; chan++)
2712                         stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
2713         }
2714
2715         /* VLAN Tag Insertion */
2716         if (priv->dma_cap.vlins)
2717                 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2718
2719         /* TBS */
2720         for (chan = 0; chan < tx_cnt; chan++) {
2721                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2722                 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2723
2724                 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2725         }
2726
2727         /* Configure real RX and TX queues */
2728         netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
2729         netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
2730
2731         /* Start the ball rolling... */
2732         stmmac_start_all_dma(priv);
2733
2734         return 0;
2735 }
2736
2737 static void stmmac_hw_teardown(struct net_device *dev)
2738 {
2739         struct stmmac_priv *priv = netdev_priv(dev);
2740
2741         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2742 }
2743
2744 /**
2745  *  stmmac_open - open entry point of the driver
2746  *  @dev : pointer to the device structure.
2747  *  Description:
2748  *  This function is the open entry point of the driver.
2749  *  Return value:
2750  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2751  *  file on failure.
2752  */
2753 static int stmmac_open(struct net_device *dev)
2754 {
2755         struct stmmac_priv *priv = netdev_priv(dev);
2756         int bfsize = 0;
2757         u32 chan;
2758         int ret;
2759
2760         if (priv->hw->pcs != STMMAC_PCS_TBI &&
2761             priv->hw->pcs != STMMAC_PCS_RTBI &&
2762             priv->hw->xpcs == NULL) {
2763                 ret = stmmac_init_phy(dev);
2764                 if (ret) {
2765                         netdev_err(priv->dev,
2766                                    "%s: Cannot attach to PHY (error: %d)\n",
2767                                    __func__, ret);
2768                         return ret;
2769                 }
2770         }
2771
2772         /* Extra statistics */
2773         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2774         priv->xstats.threshold = tc;
2775
2776         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
2777         if (bfsize < 0)
2778                 bfsize = 0;
2779
2780         if (bfsize < BUF_SIZE_16KiB)
2781                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
2782
2783         priv->dma_buf_sz = bfsize;
2784         buf_sz = bfsize;
2785
2786         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2787
2788         if (!priv->dma_tx_size)
2789                 priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
2790         if (!priv->dma_rx_size)
2791                 priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
2792
2793         /* Earlier check for TBS */
2794         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
2795                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2796                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
2797
2798                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
2799                 if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
2800                         tx_q->tbs &= ~STMMAC_TBS_AVAIL;
2801         }
2802
2803         ret = alloc_dma_desc_resources(priv);
2804         if (ret < 0) {
2805                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2806                            __func__);
2807                 goto dma_desc_error;
2808         }
2809
2810         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2811         if (ret < 0) {
2812                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2813                            __func__);
2814                 goto init_error;
2815         }
2816
2817         ret = stmmac_hw_setup(dev, true);
2818         if (ret < 0) {
2819                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2820                 goto init_error;
2821         }
2822
2823         stmmac_init_coalesce(priv);
2824
2825         phylink_start(priv->phylink);
2826         /* We may have called phylink_speed_down before */
2827         phylink_speed_up(priv->phylink);
2828
2829         /* Request the IRQ lines */
2830         ret = request_irq(dev->irq, stmmac_interrupt,
2831                           IRQF_SHARED, dev->name, dev);
2832         if (unlikely(ret < 0)) {
2833                 netdev_err(priv->dev,
2834                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2835                            __func__, dev->irq, ret);
2836                 goto irq_error;
2837         }
2838
2839         /* Request the Wake IRQ in case of another line is used for WoL */
2840         if (priv->wol_irq != dev->irq) {
2841                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2842                                   IRQF_SHARED, dev->name, dev);
2843                 if (unlikely(ret < 0)) {
2844                         netdev_err(priv->dev,
2845                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2846                                    __func__, priv->wol_irq, ret);
2847                         goto wolirq_error;
2848                 }
2849         }
2850
2851         /* Request the IRQ lines */
2852         if (priv->lpi_irq > 0) {
2853                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2854                                   dev->name, dev);
2855                 if (unlikely(ret < 0)) {
2856                         netdev_err(priv->dev,
2857                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2858                                    __func__, priv->lpi_irq, ret);
2859                         goto lpiirq_error;
2860                 }
2861         }
2862
2863         stmmac_enable_all_queues(priv);
2864         netif_tx_start_all_queues(priv->dev);
2865
2866         return 0;
2867
2868 lpiirq_error:
2869         if (priv->wol_irq != dev->irq)
2870                 free_irq(priv->wol_irq, dev);
2871 wolirq_error:
2872         free_irq(dev->irq, dev);
2873 irq_error:
2874         phylink_stop(priv->phylink);
2875
2876         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2877                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2878
2879         stmmac_hw_teardown(dev);
2880 init_error:
2881         free_dma_desc_resources(priv);
2882 dma_desc_error:
2883         phylink_disconnect_phy(priv->phylink);
2884         return ret;
2885 }
2886
2887 /**
2888  *  stmmac_release - close entry point of the driver
2889  *  @dev : device pointer.
2890  *  Description:
2891  *  This is the stop entry point of the driver.
2892  */
2893 static int stmmac_release(struct net_device *dev)
2894 {
2895         struct stmmac_priv *priv = netdev_priv(dev);
2896         u32 chan;
2897
2898         if (priv->eee_enabled)
2899                 del_timer_sync(&priv->eee_ctrl_timer);
2900
2901         if (device_may_wakeup(priv->device))
2902                 phylink_speed_down(priv->phylink, false);
2903         /* Stop and disconnect the PHY */
2904         phylink_stop(priv->phylink);
2905         phylink_disconnect_phy(priv->phylink);
2906
2907         stmmac_disable_all_queues(priv);
2908
2909         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2910                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2911
2912         /* Free the IRQ lines */
2913         free_irq(dev->irq, dev);
2914         if (priv->wol_irq != dev->irq)
2915                 free_irq(priv->wol_irq, dev);
2916         if (priv->lpi_irq > 0)
2917                 free_irq(priv->lpi_irq, dev);
2918
2919         /* Stop TX/RX DMA and clear the descriptors */
2920         stmmac_stop_all_dma(priv);
2921
2922         /* Release and free the Rx/Tx resources */
2923         free_dma_desc_resources(priv);
2924
2925         /* Disable the MAC Rx/Tx */
2926         stmmac_mac_set(priv, priv->ioaddr, false);
2927
2928         netif_carrier_off(dev);
2929
2930         stmmac_release_ptp(priv);
2931
2932         return 0;
2933 }
2934
2935 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
2936                                struct stmmac_tx_queue *tx_q)
2937 {
2938         u16 tag = 0x0, inner_tag = 0x0;
2939         u32 inner_type = 0x0;
2940         struct dma_desc *p;
2941
2942         if (!priv->dma_cap.vlins)
2943                 return false;
2944         if (!skb_vlan_tag_present(skb))
2945                 return false;
2946         if (skb->vlan_proto == htons(ETH_P_8021AD)) {
2947                 inner_tag = skb_vlan_tag_get(skb);
2948                 inner_type = STMMAC_VLAN_INSERT;
2949         }
2950
2951         tag = skb_vlan_tag_get(skb);
2952
2953         if (tx_q->tbs & STMMAC_TBS_AVAIL)
2954                 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
2955         else
2956                 p = &tx_q->dma_tx[tx_q->cur_tx];
2957
2958         if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
2959                 return false;
2960
2961         stmmac_set_tx_owner(priv, p);
2962         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2963         return true;
2964 }
2965
2966 /**
2967  *  stmmac_tso_allocator - close entry point of the driver
2968  *  @priv: driver private structure
2969  *  @des: buffer start address
2970  *  @total_len: total length to fill in descriptors
2971  *  @last_segment: condition for the last descriptor
2972  *  @queue: TX queue index
2973  *  Description:
2974  *  This function fills descriptor and request new descriptors according to
2975  *  buffer length to fill
2976  */
2977 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
2978                                  int total_len, bool last_segment, u32 queue)
2979 {
2980         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2981         struct dma_desc *desc;
2982         u32 buff_size;
2983         int tmp_len;
2984
2985         tmp_len = total_len;
2986
2987         while (tmp_len > 0) {
2988                 dma_addr_t curr_addr;
2989
2990                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
2991                                                 priv->dma_tx_size);
2992                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2993
2994                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
2995                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
2996                 else
2997                         desc = &tx_q->dma_tx[tx_q->cur_tx];
2998
2999                 curr_addr = des + (total_len - tmp_len);
3000                 if (priv->dma_cap.addr64 <= 32)
3001                         desc->des0 = cpu_to_le32(curr_addr);
3002                 else
3003                         stmmac_set_desc_addr(priv, desc, curr_addr);
3004
3005                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3006                             TSO_MAX_BUFF_SIZE : tmp_len;
3007
3008                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3009                                 0, 1,
3010                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3011                                 0, 0);
3012
3013                 tmp_len -= TSO_MAX_BUFF_SIZE;
3014         }
3015 }
3016
3017 /**
3018  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3019  *  @skb : the socket buffer
3020  *  @dev : device pointer
3021  *  Description: this is the transmit function that is called on TSO frames
3022  *  (support available on GMAC4 and newer chips).
3023  *  Diagram below show the ring programming in case of TSO frames:
3024  *
3025  *  First Descriptor
3026  *   --------
3027  *   | DES0 |---> buffer1 = L2/L3/L4 header
3028  *   | DES1 |---> TCP Payload (can continue on next descr...)
3029  *   | DES2 |---> buffer 1 and 2 len
3030  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3031  *   --------
3032  *      |
3033  *     ...
3034  *      |
3035  *   --------
3036  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3037  *   | DES1 | --|
3038  *   | DES2 | --> buffer 1 and 2 len
3039  *   | DES3 |
3040  *   --------
3041  *
3042  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3043  */
3044 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3045 {
3046         struct dma_desc *desc, *first, *mss_desc = NULL;
3047         struct stmmac_priv *priv = netdev_priv(dev);
3048         int desc_size, tmp_pay_len = 0, first_tx;
3049         int nfrags = skb_shinfo(skb)->nr_frags;
3050         u32 queue = skb_get_queue_mapping(skb);
3051         unsigned int first_entry, tx_packets;
3052         struct stmmac_tx_queue *tx_q;
3053         bool has_vlan, set_ic;
3054         u8 proto_hdr_len, hdr;
3055         u32 pay_len, mss;
3056         dma_addr_t des;
3057         int i;
3058
3059         tx_q = &priv->tx_queue[queue];
3060         first_tx = tx_q->cur_tx;
3061
3062         /* Compute header lengths */
3063         if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3064                 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3065                 hdr = sizeof(struct udphdr);
3066         } else {
3067                 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3068                 hdr = tcp_hdrlen(skb);
3069         }
3070
3071         /* Desc availability based on threshold should be enough safe */
3072         if (unlikely(stmmac_tx_avail(priv, queue) <
3073                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3074                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3075                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3076                                                                 queue));
3077                         /* This is a hard error, log it. */
3078                         netdev_err(priv->dev,
3079                                    "%s: Tx Ring full when queue awake\n",
3080                                    __func__);
3081                 }
3082                 return NETDEV_TX_BUSY;
3083         }
3084
3085         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3086
3087         mss = skb_shinfo(skb)->gso_size;
3088
3089         /* set new MSS value if needed */
3090         if (mss != tx_q->mss) {
3091                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3092                         mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3093                 else
3094                         mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3095
3096                 stmmac_set_mss(priv, mss_desc, mss);
3097                 tx_q->mss = mss;
3098                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3099                                                 priv->dma_tx_size);
3100                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3101         }
3102
3103         if (netif_msg_tx_queued(priv)) {
3104                 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3105                         __func__, hdr, proto_hdr_len, pay_len, mss);
3106                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3107                         skb->data_len);
3108         }
3109
3110         /* Check if VLAN can be inserted by HW */
3111         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3112
3113         first_entry = tx_q->cur_tx;
3114         WARN_ON(tx_q->tx_skbuff[first_entry]);
3115
3116         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3117                 desc = &tx_q->dma_entx[first_entry].basic;
3118         else
3119                 desc = &tx_q->dma_tx[first_entry];
3120         first = desc;
3121
3122         if (has_vlan)
3123                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3124
3125         /* first descriptor: fill Headers on Buf1 */
3126         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3127                              DMA_TO_DEVICE);
3128         if (dma_mapping_error(priv->device, des))
3129                 goto dma_map_err;
3130
3131         tx_q->tx_skbuff_dma[first_entry].buf = des;
3132         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
3133
3134         if (priv->dma_cap.addr64 <= 32) {
3135                 first->des0 = cpu_to_le32(des);
3136
3137                 /* Fill start of payload in buff2 of first descriptor */
3138                 if (pay_len)
3139                         first->des1 = cpu_to_le32(des + proto_hdr_len);
3140
3141                 /* If needed take extra descriptors to fill the remaining payload */
3142                 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3143         } else {
3144                 stmmac_set_desc_addr(priv, first, des);
3145                 tmp_pay_len = pay_len;
3146                 des += proto_hdr_len;
3147                 pay_len = 0;
3148         }
3149
3150         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3151
3152         /* Prepare fragments */
3153         for (i = 0; i < nfrags; i++) {
3154                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3155
3156                 des = skb_frag_dma_map(priv->device, frag, 0,
3157                                        skb_frag_size(frag),
3158                                        DMA_TO_DEVICE);
3159                 if (dma_mapping_error(priv->device, des))
3160                         goto dma_map_err;
3161
3162                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3163                                      (i == nfrags - 1), queue);
3164
3165                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3166                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3167                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3168         }
3169
3170         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3171
3172         /* Only the last descriptor gets to point to the skb. */
3173         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3174
3175         /* Manage tx mitigation */
3176         tx_packets = (tx_q->cur_tx + 1) - first_tx;
3177         tx_q->tx_count_frames += tx_packets;
3178
3179         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3180                 set_ic = true;
3181         else if (!priv->tx_coal_frames)
3182                 set_ic = false;
3183         else if (tx_packets > priv->tx_coal_frames)
3184                 set_ic = true;
3185         else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3186                 set_ic = true;
3187         else
3188                 set_ic = false;
3189
3190         if (set_ic) {
3191                 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3192                         desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3193                 else
3194                         desc = &tx_q->dma_tx[tx_q->cur_tx];
3195
3196                 tx_q->tx_count_frames = 0;
3197                 stmmac_set_tx_ic(priv, desc);
3198                 priv->xstats.tx_set_ic_bit++;
3199         }
3200
3201         /* We've used all descriptors we need for this skb, however,
3202          * advance cur_tx so that it references a fresh descriptor.
3203          * ndo_start_xmit will fill this descriptor the next time it's
3204          * called and stmmac_tx_clean may clean up to this descriptor.
3205          */
3206         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3207
3208         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3209                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3210                           __func__);
3211                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3212         }
3213
3214         dev->stats.tx_bytes += skb->len;
3215         priv->xstats.tx_tso_frames++;
3216         priv->xstats.tx_tso_nfrags += nfrags;
3217
3218         if (priv->sarc_type)
3219                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3220
3221         skb_tx_timestamp(skb);
3222
3223         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3224                      priv->hwts_tx_en)) {
3225                 /* declare that device is doing timestamping */
3226                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3227                 stmmac_enable_tx_timestamp(priv, first);
3228         }
3229
3230         /* Complete the first descriptor before granting the DMA */
3231         stmmac_prepare_tso_tx_desc(priv, first, 1,
3232                         proto_hdr_len,
3233                         pay_len,
3234                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3235                         hdr / 4, (skb->len - proto_hdr_len));
3236
3237         /* If context desc is used to change MSS */
3238         if (mss_desc) {
3239                 /* Make sure that first descriptor has been completely
3240                  * written, including its own bit. This is because MSS is
3241                  * actually before first descriptor, so we need to make
3242                  * sure that MSS's own bit is the last thing written.
3243                  */
3244                 dma_wmb();
3245                 stmmac_set_tx_owner(priv, mss_desc);
3246         }
3247
3248         /* The own bit must be the latest setting done when prepare the
3249          * descriptor and then barrier is needed to make sure that
3250          * all is coherent before granting the DMA engine.
3251          */
3252         wmb();
3253
3254         if (netif_msg_pktdata(priv)) {
3255                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3256                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3257                         tx_q->cur_tx, first, nfrags);
3258                 pr_info(">>> frame to be transmitted: ");
3259                 print_pkt(skb->data, skb_headlen(skb));
3260         }
3261
3262         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3263
3264         if (tx_q->tbs & STMMAC_TBS_AVAIL)
3265                 desc_size = sizeof(struct dma_edesc);
3266         else
3267                 desc_size = sizeof(struct dma_desc);
3268
3269         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3270         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3271         stmmac_tx_timer_arm(priv, queue);
3272
3273         return NETDEV_TX_OK;
3274
3275 dma_map_err:
3276         dev_err(priv->device, "Tx dma map failed\n");
3277         dev_kfree_skb(skb);
3278         priv->dev->stats.tx_dropped++;
3279         return NETDEV_TX_OK;
3280 }
3281
3282 /**
3283  *  stmmac_xmit - Tx entry point of the driver
3284  *  @skb : the socket buffer
3285  *  @dev : device pointer
3286  *  Description : this is the tx entry point of the driver.
3287  *  It programs the chain or the ring and supports oversized frames
3288  *  and SG feature.
3289  */
3290 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3291 {
3292         unsigned int first_entry, tx_packets, enh_desc;
3293         struct stmmac_priv *priv = netdev_priv(dev);
3294         unsigned int nopaged_len = skb_headlen(skb);
3295         int i, csum_insertion = 0, is_jumbo = 0;
3296         u32 queue = skb_get_queue_mapping(skb);
3297         int nfrags = skb_shinfo(skb)->nr_frags;
3298         int gso = skb_shinfo(skb)->gso_type;
3299         struct dma_edesc *tbs_desc = NULL;
3300         int entry, desc_size, first_tx;
3301         struct dma_desc *desc, *first;
3302         struct stmmac_tx_queue *tx_q;
3303         bool has_vlan, set_ic;
3304         dma_addr_t des;
3305
3306         tx_q = &priv->tx_queue[queue];
3307         first_tx = tx_q->cur_tx;
3308
3309         if (priv->tx_path_in_lpi_mode)
3310                 stmmac_disable_eee_mode(priv);
3311
3312         /* Manage oversized TCP frames for GMAC4 device */
3313         if (skb_is_gso(skb) && priv->tso) {
3314                 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3315                         return stmmac_tso_xmit(skb, dev);
3316                 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3317                         return stmmac_tso_xmit(skb, dev);
3318         }
3319
3320         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3321                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3322                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3323                                                                 queue));
3324                         /* This is a hard error, log it. */
3325                         netdev_err(priv->dev,
3326                                    "%s: Tx Ring full when queue awake\n",
3327                                    __func__);
3328                 }
3329                 return NETDEV_TX_BUSY;
3330         }
3331
3332         /* Check if VLAN can be inserted by HW */
3333         has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3334
3335         entry = tx_q->cur_tx;
3336         first_entry = entry;
3337         WARN_ON(tx_q->tx_skbuff[first_entry]);
3338
3339         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3340
3341         if (likely(priv->extend_desc))
3342                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3343         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3344                 desc = &tx_q->dma_entx[entry].basic;
3345         else
3346                 desc = tx_q->dma_tx + entry;
3347
3348         first = desc;
3349
3350         if (has_vlan)
3351                 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3352
3353         enh_desc = priv->plat->enh_desc;
3354         /* To program the descriptors according to the size of the frame */
3355         if (enh_desc)
3356                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3357
3358         if (unlikely(is_jumbo)) {
3359                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3360                 if (unlikely(entry < 0) && (entry != -EINVAL))
3361                         goto dma_map_err;
3362         }
3363
3364         for (i = 0; i < nfrags; i++) {
3365                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3366                 int len = skb_frag_size(frag);
3367                 bool last_segment = (i == (nfrags - 1));
3368
3369                 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3370                 WARN_ON(tx_q->tx_skbuff[entry]);
3371
3372                 if (likely(priv->extend_desc))
3373                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3374                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3375                         desc = &tx_q->dma_entx[entry].basic;
3376                 else
3377                         desc = tx_q->dma_tx + entry;
3378
3379                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3380                                        DMA_TO_DEVICE);
3381                 if (dma_mapping_error(priv->device, des))
3382                         goto dma_map_err; /* should reuse desc w/o issues */
3383
3384                 tx_q->tx_skbuff_dma[entry].buf = des;
3385
3386                 stmmac_set_desc_addr(priv, desc, des);
3387
3388                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3389                 tx_q->tx_skbuff_dma[entry].len = len;
3390                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3391
3392                 /* Prepare the descriptor and set the own bit too */
3393                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3394                                 priv->mode, 1, last_segment, skb->len);
3395         }
3396
3397         /* Only the last descriptor gets to point to the skb. */
3398         tx_q->tx_skbuff[entry] = skb;
3399
3400         /* According to the coalesce parameter the IC bit for the latest
3401          * segment is reset and the timer re-started to clean the tx status.
3402          * This approach takes care about the fragments: desc is the first
3403          * element in case of no SG.
3404          */
3405         tx_packets = (entry + 1) - first_tx;
3406         tx_q->tx_count_frames += tx_packets;
3407
3408         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3409                 set_ic = true;
3410         else if (!priv->tx_coal_frames)
3411                 set_ic = false;
3412         else if (tx_packets > priv->tx_coal_frames)
3413                 set_ic = true;
3414         else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets)
3415                 set_ic = true;
3416         else
3417                 set_ic = false;
3418
3419         if (set_ic) {
3420                 if (likely(priv->extend_desc))
3421                         desc = &tx_q->dma_etx[entry].basic;
3422                 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3423                         desc = &tx_q->dma_entx[entry].basic;
3424                 else
3425                         desc = &tx_q->dma_tx[entry];
3426
3427                 tx_q->tx_count_frames = 0;
3428                 stmmac_set_tx_ic(priv, desc);
3429                 priv->xstats.tx_set_ic_bit++;
3430         }
3431
3432         /* We've used all descriptors we need for this skb, however,
3433          * advance cur_tx so that it references a fresh descriptor.
3434          * ndo_start_xmit will fill this descriptor the next time it's
3435          * called and stmmac_tx_clean may clean up to this descriptor.
3436          */
3437         entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3438         tx_q->cur_tx = entry;
3439
3440         if (netif_msg_pktdata(priv)) {
3441                 netdev_dbg(priv->dev,
3442                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3443                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3444                            entry, first, nfrags);
3445
3446                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3447                 print_pkt(skb->data, skb->len);
3448         }
3449
3450         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3451                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3452                           __func__);
3453                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3454         }
3455
3456         dev->stats.tx_bytes += skb->len;
3457
3458         if (priv->sarc_type)
3459                 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3460
3461         skb_tx_timestamp(skb);
3462
3463         /* Ready to fill the first descriptor and set the OWN bit w/o any
3464          * problems because all the descriptors are actually ready to be
3465          * passed to the DMA engine.
3466          */
3467         if (likely(!is_jumbo)) {
3468                 bool last_segment = (nfrags == 0);
3469
3470                 des = dma_map_single(priv->device, skb->data,
3471                                      nopaged_len, DMA_TO_DEVICE);
3472                 if (dma_mapping_error(priv->device, des))
3473                         goto dma_map_err;
3474
3475                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3476
3477                 stmmac_set_desc_addr(priv, first, des);
3478
3479                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3480                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3481
3482                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3483                              priv->hwts_tx_en)) {
3484                         /* declare that device is doing timestamping */
3485                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3486                         stmmac_enable_tx_timestamp(priv, first);
3487                 }
3488
3489                 /* Prepare the first descriptor setting the OWN bit too */
3490                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3491                                 csum_insertion, priv->mode, 0, last_segment,
3492                                 skb->len);
3493         }
3494
3495         if (tx_q->tbs & STMMAC_TBS_EN) {
3496                 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3497
3498                 tbs_desc = &tx_q->dma_entx[first_entry];
3499                 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3500         }
3501
3502         stmmac_set_tx_owner(priv, first);
3503
3504         /* The own bit must be the latest setting done when prepare the
3505          * descriptor and then barrier is needed to make sure that
3506          * all is coherent before granting the DMA engine.
3507          */
3508         wmb();
3509
3510         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3511
3512         stmmac_enable_dma_transmission(priv, priv->ioaddr);
3513
3514         if (likely(priv->extend_desc))
3515                 desc_size = sizeof(struct dma_extended_desc);
3516         else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3517                 desc_size = sizeof(struct dma_edesc);
3518         else
3519                 desc_size = sizeof(struct dma_desc);
3520
3521         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3522         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3523         stmmac_tx_timer_arm(priv, queue);
3524
3525         return NETDEV_TX_OK;
3526
3527 dma_map_err:
3528         netdev_err(priv->dev, "Tx DMA map failed\n");
3529         dev_kfree_skb(skb);
3530         priv->dev->stats.tx_dropped++;
3531         return NETDEV_TX_OK;
3532 }
3533
3534 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3535 {
3536         struct vlan_ethhdr *veth;
3537         __be16 vlan_proto;
3538         u16 vlanid;
3539
3540         veth = (struct vlan_ethhdr *)skb->data;
3541         vlan_proto = veth->h_vlan_proto;
3542
3543         if ((vlan_proto == htons(ETH_P_8021Q) &&
3544              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3545             (vlan_proto == htons(ETH_P_8021AD) &&
3546              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3547                 /* pop the vlan tag */
3548                 vlanid = ntohs(veth->h_vlan_TCI);
3549                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3550                 skb_pull(skb, VLAN_HLEN);
3551                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3552         }
3553 }
3554
3555 /**
3556  * stmmac_rx_refill - refill used skb preallocated buffers
3557  * @priv: driver private structure
3558  * @queue: RX queue index
3559  * Description : this is to reallocate the skb for the reception process
3560  * that is based on zero-copy.
3561  */
3562 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3563 {
3564         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3565         int len, dirty = stmmac_rx_dirty(priv, queue);
3566         unsigned int entry = rx_q->dirty_rx;
3567
3568         len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
3569
3570         while (dirty-- > 0) {
3571                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
3572                 struct dma_desc *p;
3573                 bool use_rx_wd;
3574
3575                 if (priv->extend_desc)
3576                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3577                 else
3578                         p = rx_q->dma_rx + entry;
3579
3580                 if (!buf->page) {
3581                         buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
3582                         if (!buf->page)
3583                                 break;
3584                 }
3585
3586                 if (priv->sph && !buf->sec_page) {
3587                         buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
3588                         if (!buf->sec_page)
3589                                 break;
3590
3591                         buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
3592
3593                         dma_sync_single_for_device(priv->device, buf->sec_addr,
3594                                                    len, DMA_FROM_DEVICE);
3595                 }
3596
3597                 buf->addr = page_pool_get_dma_addr(buf->page);
3598
3599                 /* Sync whole allocation to device. This will invalidate old
3600                  * data.
3601                  */
3602                 dma_sync_single_for_device(priv->device, buf->addr, len,
3603                                            DMA_FROM_DEVICE);
3604
3605                 stmmac_set_desc_addr(priv, p, buf->addr);
3606                 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
3607                 stmmac_refill_desc3(priv, rx_q, p);
3608
3609                 rx_q->rx_count_frames++;
3610                 rx_q->rx_count_frames += priv->rx_coal_frames;
3611                 if (rx_q->rx_count_frames > priv->rx_coal_frames)
3612                         rx_q->rx_count_frames = 0;
3613
3614                 use_rx_wd = !priv->rx_coal_frames;
3615                 use_rx_wd |= rx_q->rx_count_frames > 0;
3616                 if (!priv->use_riwt)
3617                         use_rx_wd = false;
3618
3619                 dma_wmb();
3620                 stmmac_set_rx_owner(priv, p, use_rx_wd);
3621
3622                 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
3623         }
3624         rx_q->dirty_rx = entry;
3625         rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3626                             (rx_q->dirty_rx * sizeof(struct dma_desc));
3627         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3628 }
3629
3630 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
3631                                        struct dma_desc *p,
3632                                        int status, unsigned int len)
3633 {
3634         unsigned int plen = 0, hlen = 0;
3635         int coe = priv->hw->rx_csum;
3636
3637         /* Not first descriptor, buffer is always zero */
3638         if (priv->sph && len)
3639                 return 0;
3640
3641         /* First descriptor, get split header length */
3642         stmmac_get_rx_header_len(priv, p, &hlen);
3643         if (priv->sph && hlen) {
3644                 priv->xstats.rx_split_hdr_pkt_n++;
3645                 return hlen;
3646         }
3647
3648         /* First descriptor, not last descriptor and not split header */
3649         if (status & rx_not_ls)
3650                 return priv->dma_buf_sz;
3651
3652         plen = stmmac_get_rx_frame_len(priv, p, coe);
3653
3654         /* First descriptor and last descriptor and not split header */
3655         return min_t(unsigned int, priv->dma_buf_sz, plen);
3656 }
3657
3658 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
3659                                        struct dma_desc *p,
3660                                        int status, unsigned int len)
3661 {
3662         int coe = priv->hw->rx_csum;
3663         unsigned int plen = 0;
3664
3665         /* Not split header, buffer is not available */
3666         if (!priv->sph)
3667                 return 0;
3668
3669         /* Not last descriptor */
3670         if (status & rx_not_ls)
3671                 return priv->dma_buf_sz;
3672
3673         plen = stmmac_get_rx_frame_len(priv, p, coe);
3674
3675         /* Last descriptor */
3676         return plen - len;
3677 }
3678
3679 /**
3680  * stmmac_rx - manage the receive process
3681  * @priv: driver private structure
3682  * @limit: napi bugget
3683  * @queue: RX queue index.
3684  * Description :  this the function called by the napi poll method.
3685  * It gets all the frames inside the ring.
3686  */
3687 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3688 {
3689         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3690         struct stmmac_channel *ch = &priv->channel[queue];
3691         unsigned int count = 0, error = 0, len = 0;
3692         int status = 0, coe = priv->hw->rx_csum;
3693         unsigned int next_entry = rx_q->cur_rx;
3694         struct sk_buff *skb = NULL;
3695
3696         if (netif_msg_rx_status(priv)) {
3697                 void *rx_head;
3698
3699                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3700                 if (priv->extend_desc)
3701                         rx_head = (void *)rx_q->dma_erx;
3702                 else
3703                         rx_head = (void *)rx_q->dma_rx;
3704
3705                 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true);
3706         }
3707         while (count < limit) {
3708                 unsigned int buf1_len = 0, buf2_len = 0;
3709                 enum pkt_hash_types hash_type;
3710                 struct stmmac_rx_buffer *buf;
3711                 struct dma_desc *np, *p;
3712                 int entry;
3713                 u32 hash;
3714
3715                 if (!count && rx_q->state_saved) {
3716                         skb = rx_q->state.skb;
3717                         error = rx_q->state.error;
3718                         len = rx_q->state.len;
3719                 } else {
3720                         rx_q->state_saved = false;
3721                         skb = NULL;
3722                         error = 0;
3723                         len = 0;
3724                 }
3725
3726                 if (count >= limit)
3727                         break;
3728
3729 read_again:
3730                 buf1_len = 0;
3731                 buf2_len = 0;
3732                 entry = next_entry;
3733                 buf = &rx_q->buf_pool[entry];
3734
3735                 if (priv->extend_desc)
3736                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3737                 else
3738                         p = rx_q->dma_rx + entry;
3739
3740                 /* read the status of the incoming frame */
3741                 status = stmmac_rx_status(priv, &priv->dev->stats,
3742                                 &priv->xstats, p);
3743                 /* check if managed by the DMA otherwise go ahead */
3744                 if (unlikely(status & dma_own))
3745                         break;
3746
3747                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
3748                                                 priv->dma_rx_size);
3749                 next_entry = rx_q->cur_rx;
3750
3751                 if (priv->extend_desc)
3752                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3753                 else
3754                         np = rx_q->dma_rx + next_entry;
3755
3756                 prefetch(np);
3757
3758                 if (priv->extend_desc)
3759                         stmmac_rx_extended_status(priv, &priv->dev->stats,
3760                                         &priv->xstats, rx_q->dma_erx + entry);
3761                 if (unlikely(status == discard_frame)) {
3762                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
3763                         buf->page = NULL;
3764                         error = 1;
3765                         if (!priv->hwts_rx_en)
3766                                 priv->dev->stats.rx_errors++;
3767                 }
3768
3769                 if (unlikely(error && (status & rx_not_ls)))
3770                         goto read_again;
3771                 if (unlikely(error)) {
3772                         dev_kfree_skb(skb);
3773                         skb = NULL;
3774                         count++;
3775                         continue;
3776                 }
3777
3778                 /* Buffer is good. Go on. */
3779
3780                 prefetch(page_address(buf->page));
3781                 if (buf->sec_page)
3782                         prefetch(page_address(buf->sec_page));
3783
3784                 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
3785                 len += buf1_len;
3786                 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
3787                 len += buf2_len;
3788
3789                 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3790                  * Type frames (LLC/LLC-SNAP)
3791                  *
3792                  * llc_snap is never checked in GMAC >= 4, so this ACS
3793                  * feature is always disabled and packets need to be
3794                  * stripped manually.
3795                  */
3796                 if (likely(!(status & rx_not_ls)) &&
3797                     (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3798                      unlikely(status != llc_snap))) {
3799                         if (buf2_len)
3800                                 buf2_len -= ETH_FCS_LEN;
3801                         else
3802                                 buf1_len -= ETH_FCS_LEN;
3803
3804                         len -= ETH_FCS_LEN;
3805                 }
3806
3807                 if (!skb) {
3808                         skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
3809                         if (!skb) {
3810                                 priv->dev->stats.rx_dropped++;
3811                                 count++;
3812                                 goto drain_data;
3813                         }
3814
3815                         dma_sync_single_for_cpu(priv->device, buf->addr,
3816                                                 buf1_len, DMA_FROM_DEVICE);
3817                         skb_copy_to_linear_data(skb, page_address(buf->page),
3818                                                 buf1_len);
3819                         skb_put(skb, buf1_len);
3820
3821                         /* Data payload copied into SKB, page ready for recycle */
3822                         page_pool_recycle_direct(rx_q->page_pool, buf->page);
3823                         buf->page = NULL;
3824                 } else if (buf1_len) {
3825                         dma_sync_single_for_cpu(priv->device, buf->addr,
3826                                                 buf1_len, DMA_FROM_DEVICE);
3827                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3828                                         buf->page, 0, buf1_len,
3829                                         priv->dma_buf_sz);
3830
3831                         /* Data payload appended into SKB */
3832                         page_pool_release_page(rx_q->page_pool, buf->page);
3833                         buf->page = NULL;
3834                 }
3835
3836                 if (buf2_len) {
3837                         dma_sync_single_for_cpu(priv->device, buf->sec_addr,
3838                                                 buf2_len, DMA_FROM_DEVICE);
3839                         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
3840                                         buf->sec_page, 0, buf2_len,
3841                                         priv->dma_buf_sz);
3842
3843                         /* Data payload appended into SKB */
3844                         page_pool_release_page(rx_q->page_pool, buf->sec_page);
3845                         buf->sec_page = NULL;
3846                 }
3847
3848 drain_data:
3849                 if (likely(status & rx_not_ls))
3850                         goto read_again;
3851                 if (!skb)
3852                         continue;
3853
3854                 /* Got entire packet into SKB. Finish it. */
3855
3856                 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3857                 stmmac_rx_vlan(priv->dev, skb);
3858                 skb->protocol = eth_type_trans(skb, priv->dev);
3859
3860                 if (unlikely(!coe))
3861                         skb_checksum_none_assert(skb);
3862                 else
3863                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3864
3865                 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
3866                         skb_set_hash(skb, hash, hash_type);
3867
3868                 skb_record_rx_queue(skb, queue);
3869                 napi_gro_receive(&ch->rx_napi, skb);
3870                 skb = NULL;
3871
3872                 priv->dev->stats.rx_packets++;
3873                 priv->dev->stats.rx_bytes += len;
3874                 count++;
3875         }
3876
3877         if (status & rx_not_ls || skb) {
3878                 rx_q->state_saved = true;
3879                 rx_q->state.skb = skb;
3880                 rx_q->state.error = error;
3881                 rx_q->state.len = len;
3882         }
3883
3884         stmmac_rx_refill(priv, queue);
3885
3886         priv->xstats.rx_pkt_n += count;
3887
3888         return count;
3889 }
3890
3891 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3892 {
3893         struct stmmac_channel *ch =
3894                 container_of(napi, struct stmmac_channel, rx_napi);
3895         struct stmmac_priv *priv = ch->priv_data;
3896         u32 chan = ch->index;
3897         int work_done;
3898
3899         priv->xstats.napi_poll++;
3900
3901         work_done = stmmac_rx(priv, budget, chan);
3902         if (work_done < budget && napi_complete_done(napi, work_done)) {
3903                 unsigned long flags;
3904
3905                 spin_lock_irqsave(&ch->lock, flags);
3906                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3907                 spin_unlock_irqrestore(&ch->lock, flags);
3908         }
3909
3910         return work_done;
3911 }
3912
3913 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3914 {
3915         struct stmmac_channel *ch =
3916                 container_of(napi, struct stmmac_channel, tx_napi);
3917         struct stmmac_priv *priv = ch->priv_data;
3918         u32 chan = ch->index;
3919         int work_done;
3920
3921         priv->xstats.napi_poll++;
3922
3923         work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
3924         work_done = min(work_done, budget);
3925
3926         if (work_done < budget && napi_complete_done(napi, work_done)) {
3927                 unsigned long flags;
3928
3929                 spin_lock_irqsave(&ch->lock, flags);
3930                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3931                 spin_unlock_irqrestore(&ch->lock, flags);
3932         }
3933
3934         return work_done;
3935 }
3936
3937 /**
3938  *  stmmac_tx_timeout
3939  *  @dev : Pointer to net device structure
3940  *  @txqueue: the index of the hanging transmit queue
3941  *  Description: this function is called when a packet transmission fails to
3942  *   complete within a reasonable time. The driver will mark the error in the
3943  *   netdev structure and arrange for the device to be reset to a sane state
3944  *   in order to transmit a new packet.
3945  */
3946 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
3947 {
3948         struct stmmac_priv *priv = netdev_priv(dev);
3949
3950         stmmac_global_err(priv);
3951 }
3952
3953 /**
3954  *  stmmac_set_rx_mode - entry point for multicast addressing
3955  *  @dev : pointer to the device structure
3956  *  Description:
3957  *  This function is a driver entry point which gets called by the kernel
3958  *  whenever multicast addresses must be enabled/disabled.
3959  *  Return value:
3960  *  void.
3961  */
3962 static void stmmac_set_rx_mode(struct net_device *dev)
3963 {
3964         struct stmmac_priv *priv = netdev_priv(dev);
3965
3966         stmmac_set_filter(priv, priv->hw, dev);
3967 }
3968
3969 /**
3970  *  stmmac_change_mtu - entry point to change MTU size for the device.
3971  *  @dev : device pointer.
3972  *  @new_mtu : the new MTU size for the device.
3973  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3974  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3975  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3976  *  Return value:
3977  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3978  *  file on failure.
3979  */
3980 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3981 {
3982         struct stmmac_priv *priv = netdev_priv(dev);
3983         int txfifosz = priv->plat->tx_fifo_size;
3984
3985         if (txfifosz == 0)
3986                 txfifosz = priv->dma_cap.tx_fifo_size;
3987
3988         txfifosz /= priv->plat->tx_queues_to_use;
3989
3990         if (netif_running(dev)) {
3991                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3992                 return -EBUSY;
3993         }
3994
3995         new_mtu = STMMAC_ALIGN(new_mtu);
3996
3997         /* If condition true, FIFO is too small or MTU too large */
3998         if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
3999                 return -EINVAL;
4000
4001         dev->mtu = new_mtu;
4002
4003         netdev_update_features(dev);
4004
4005         return 0;
4006 }
4007
4008 static netdev_features_t stmmac_fix_features(struct net_device *dev,
4009                                              netdev_features_t features)
4010 {
4011         struct stmmac_priv *priv = netdev_priv(dev);
4012
4013         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
4014                 features &= ~NETIF_F_RXCSUM;
4015
4016         if (!priv->plat->tx_coe)
4017                 features &= ~NETIF_F_CSUM_MASK;
4018
4019         /* Some GMAC devices have a bugged Jumbo frame support that
4020          * needs to have the Tx COE disabled for oversized frames
4021          * (due to limited buffer sizes). In this case we disable
4022          * the TX csum insertion in the TDES and not use SF.
4023          */
4024         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4025                 features &= ~NETIF_F_CSUM_MASK;
4026
4027         /* Disable tso if asked by ethtool */
4028         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4029                 if (features & NETIF_F_TSO)
4030                         priv->tso = true;
4031                 else
4032                         priv->tso = false;
4033         }
4034
4035         return features;
4036 }
4037
4038 static int stmmac_set_features(struct net_device *netdev,
4039                                netdev_features_t features)
4040 {
4041         struct stmmac_priv *priv = netdev_priv(netdev);
4042         bool sph_en;
4043         u32 chan;
4044
4045         /* Keep the COE Type in case of csum is supporting */
4046         if (features & NETIF_F_RXCSUM)
4047                 priv->hw->rx_csum = priv->plat->rx_coe;
4048         else
4049                 priv->hw->rx_csum = 0;
4050         /* No check needed because rx_coe has been set before and it will be
4051          * fixed in case of issue.
4052          */
4053         stmmac_rx_ipc(priv, priv->hw);
4054
4055         sph_en = (priv->hw->rx_csum > 0) && priv->sph;
4056         for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
4057                 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
4058
4059         return 0;
4060 }
4061
4062 /**
4063  *  stmmac_interrupt - main ISR
4064  *  @irq: interrupt number.
4065  *  @dev_id: to pass the net device pointer (must be valid).
4066  *  Description: this is the main driver interrupt service routine.
4067  *  It can call:
4068  *  o DMA service routine (to manage incoming frame reception and transmission
4069  *    status)
4070  *  o Core interrupts to manage: remote wake-up, management counter, LPI
4071  *    interrupts.
4072  */
4073 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
4074 {
4075         struct net_device *dev = (struct net_device *)dev_id;
4076         struct stmmac_priv *priv = netdev_priv(dev);
4077         u32 rx_cnt = priv->plat->rx_queues_to_use;
4078         u32 tx_cnt = priv->plat->tx_queues_to_use;
4079         u32 queues_count;
4080         u32 queue;
4081         bool xmac;
4082
4083         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
4084         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
4085
4086         if (priv->irq_wake)
4087                 pm_wakeup_event(priv->device, 0);
4088
4089         /* Check if adapter is up */
4090         if (test_bit(STMMAC_DOWN, &priv->state))
4091                 return IRQ_HANDLED;
4092         /* Check if a fatal error happened */
4093         if (stmmac_safety_feat_interrupt(priv))
4094                 return IRQ_HANDLED;
4095
4096         /* To handle GMAC own interrupts */
4097         if ((priv->plat->has_gmac) || xmac) {
4098                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
4099                 int mtl_status;
4100
4101                 if (unlikely(status)) {
4102                         /* For LPI we need to save the tx status */
4103                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4104                                 priv->tx_path_in_lpi_mode = true;
4105                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4106                                 priv->tx_path_in_lpi_mode = false;
4107                 }
4108
4109                 for (queue = 0; queue < queues_count; queue++) {
4110                         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4111
4112                         mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
4113                                                                 queue);
4114                         if (mtl_status != -EINVAL)
4115                                 status |= mtl_status;
4116
4117                         if (status & CORE_IRQ_MTL_RX_OVERFLOW)
4118                                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
4119                                                        rx_q->rx_tail_addr,
4120                                                        queue);
4121                 }
4122
4123                 /* PCS link status */
4124                 if (priv->hw->pcs) {
4125                         if (priv->xstats.pcs_link)
4126                                 netif_carrier_on(dev);
4127                         else
4128                                 netif_carrier_off(dev);
4129                 }
4130         }
4131
4132         /* To handle DMA interrupts */
4133         stmmac_dma_interrupt(priv);
4134
4135         return IRQ_HANDLED;
4136 }
4137
4138 #ifdef CONFIG_NET_POLL_CONTROLLER
4139 /* Polling receive - used by NETCONSOLE and other diagnostic tools
4140  * to allow network I/O with interrupts disabled.
4141  */
4142 static void stmmac_poll_controller(struct net_device *dev)
4143 {
4144         disable_irq(dev->irq);
4145         stmmac_interrupt(dev->irq, dev);
4146         enable_irq(dev->irq);
4147 }
4148 #endif
4149
4150 /**
4151  *  stmmac_ioctl - Entry point for the Ioctl
4152  *  @dev: Device pointer.
4153  *  @rq: An IOCTL specefic structure, that can contain a pointer to
4154  *  a proprietary structure used to pass information to the driver.
4155  *  @cmd: IOCTL command
4156  *  Description:
4157  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
4158  */
4159 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4160 {
4161         struct stmmac_priv *priv = netdev_priv (dev);
4162         int ret = -EOPNOTSUPP;
4163
4164         if (!netif_running(dev))
4165                 return -EINVAL;
4166
4167         switch (cmd) {
4168         case SIOCGMIIPHY:
4169         case SIOCGMIIREG:
4170         case SIOCSMIIREG:
4171                 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4172                 break;
4173         case SIOCSHWTSTAMP:
4174                 ret = stmmac_hwtstamp_set(dev, rq);
4175                 break;
4176         case SIOCGHWTSTAMP:
4177                 ret = stmmac_hwtstamp_get(dev, rq);
4178                 break;
4179         default:
4180                 break;
4181         }
4182
4183         return ret;
4184 }
4185
4186 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4187                                     void *cb_priv)
4188 {
4189         struct stmmac_priv *priv = cb_priv;
4190         int ret = -EOPNOTSUPP;
4191
4192         if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4193                 return ret;
4194
4195         stmmac_disable_all_queues(priv);
4196
4197         switch (type) {
4198         case TC_SETUP_CLSU32:
4199                 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4200                 break;
4201         case TC_SETUP_CLSFLOWER:
4202                 ret = stmmac_tc_setup_cls(priv, priv, type_data);
4203                 break;
4204         default:
4205                 break;
4206         }
4207
4208         stmmac_enable_all_queues(priv);
4209         return ret;
4210 }
4211
4212 static LIST_HEAD(stmmac_block_cb_list);
4213
4214 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
4215                            void *type_data)
4216 {
4217         struct stmmac_priv *priv = netdev_priv(ndev);
4218
4219         switch (type) {
4220         case TC_SETUP_BLOCK:
4221                 return flow_block_cb_setup_simple(type_data,
4222                                                   &stmmac_block_cb_list,
4223                                                   stmmac_setup_tc_block_cb,
4224                                                   priv, priv, true);
4225         case TC_SETUP_QDISC_CBS:
4226                 return stmmac_tc_setup_cbs(priv, priv, type_data);
4227         case TC_SETUP_QDISC_TAPRIO:
4228                 return stmmac_tc_setup_taprio(priv, priv, type_data);
4229         case TC_SETUP_QDISC_ETF:
4230                 return stmmac_tc_setup_etf(priv, priv, type_data);
4231         default:
4232                 return -EOPNOTSUPP;
4233         }
4234 }
4235
4236 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
4237                                struct net_device *sb_dev)
4238 {
4239         int gso = skb_shinfo(skb)->gso_type;
4240
4241         if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4242                 /*
4243                  * There is no way to determine the number of TSO/USO
4244                  * capable Queues. Let's use always the Queue 0
4245                  * because if TSO/USO is supported then at least this
4246                  * one will be capable.
4247                  */
4248                 return 0;
4249         }
4250
4251         return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
4252 }
4253
4254 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4255 {
4256         struct stmmac_priv *priv = netdev_priv(ndev);
4257         int ret = 0;
4258
4259         ret = eth_mac_addr(ndev, addr);
4260         if (ret)
4261                 return ret;
4262
4263         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4264
4265         return ret;
4266 }
4267
4268 #ifdef CONFIG_DEBUG_FS
4269 static struct dentry *stmmac_fs_dir;
4270
4271 static void sysfs_display_ring(void *head, int size, int extend_desc,
4272                                struct seq_file *seq)
4273 {
4274         int i;
4275         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4276         struct dma_desc *p = (struct dma_desc *)head;
4277
4278         for (i = 0; i < size; i++) {
4279                 if (extend_desc) {
4280                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4281                                    i, (unsigned int)virt_to_phys(ep),
4282                                    le32_to_cpu(ep->basic.des0),
4283                                    le32_to_cpu(ep->basic.des1),
4284                                    le32_to_cpu(ep->basic.des2),
4285                                    le32_to_cpu(ep->basic.des3));
4286                         ep++;
4287                 } else {
4288                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
4289                                    i, (unsigned int)virt_to_phys(p),
4290                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4291                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4292                         p++;
4293                 }
4294                 seq_printf(seq, "\n");
4295         }
4296 }
4297
4298 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4299 {
4300         struct net_device *dev = seq->private;
4301         struct stmmac_priv *priv = netdev_priv(dev);
4302         u32 rx_count = priv->plat->rx_queues_to_use;
4303         u32 tx_count = priv->plat->tx_queues_to_use;
4304         u32 queue;
4305
4306         if ((dev->flags & IFF_UP) == 0)
4307                 return 0;
4308
4309         for (queue = 0; queue < rx_count; queue++) {
4310                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4311
4312                 seq_printf(seq, "RX Queue %d:\n", queue);
4313
4314                 if (priv->extend_desc) {
4315                         seq_printf(seq, "Extended descriptor ring:\n");
4316                         sysfs_display_ring((void *)rx_q->dma_erx,
4317                                            priv->dma_rx_size, 1, seq);
4318                 } else {
4319                         seq_printf(seq, "Descriptor ring:\n");
4320                         sysfs_display_ring((void *)rx_q->dma_rx,
4321                                            priv->dma_rx_size, 0, seq);
4322                 }
4323         }
4324
4325         for (queue = 0; queue < tx_count; queue++) {
4326                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4327
4328                 seq_printf(seq, "TX Queue %d:\n", queue);
4329
4330                 if (priv->extend_desc) {
4331                         seq_printf(seq, "Extended descriptor ring:\n");
4332                         sysfs_display_ring((void *)tx_q->dma_etx,
4333                                            priv->dma_tx_size, 1, seq);
4334                 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4335                         seq_printf(seq, "Descriptor ring:\n");
4336                         sysfs_display_ring((void *)tx_q->dma_tx,
4337                                            priv->dma_tx_size, 0, seq);
4338                 }
4339         }
4340
4341         return 0;
4342 }
4343 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4344
4345 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4346 {
4347         struct net_device *dev = seq->private;
4348         struct stmmac_priv *priv = netdev_priv(dev);
4349
4350         if (!priv->hw_cap_support) {
4351                 seq_printf(seq, "DMA HW features not supported\n");
4352                 return 0;
4353         }
4354
4355         seq_printf(seq, "==============================\n");
4356         seq_printf(seq, "\tDMA HW features\n");
4357         seq_printf(seq, "==============================\n");
4358
4359         seq_printf(seq, "\t10/100 Mbps: %s\n",
4360                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4361         seq_printf(seq, "\t1000 Mbps: %s\n",
4362                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
4363         seq_printf(seq, "\tHalf duplex: %s\n",
4364                    (priv->dma_cap.half_duplex) ? "Y" : "N");
4365         seq_printf(seq, "\tHash Filter: %s\n",
4366                    (priv->dma_cap.hash_filter) ? "Y" : "N");
4367         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
4368                    (priv->dma_cap.multi_addr) ? "Y" : "N");
4369         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
4370                    (priv->dma_cap.pcs) ? "Y" : "N");
4371         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
4372                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
4373         seq_printf(seq, "\tPMT Remote wake up: %s\n",
4374                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
4375         seq_printf(seq, "\tPMT Magic Frame: %s\n",
4376                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
4377         seq_printf(seq, "\tRMON module: %s\n",
4378                    (priv->dma_cap.rmon) ? "Y" : "N");
4379         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4380                    (priv->dma_cap.time_stamp) ? "Y" : "N");
4381         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4382                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
4383         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4384                    (priv->dma_cap.eee) ? "Y" : "N");
4385         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4386         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4387                    (priv->dma_cap.tx_coe) ? "Y" : "N");
4388         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4389                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4390                            (priv->dma_cap.rx_coe) ? "Y" : "N");
4391         } else {
4392                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4393                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4394                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4395                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4396         }
4397         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4398                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4399         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4400                    priv->dma_cap.number_rx_channel);
4401         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4402                    priv->dma_cap.number_tx_channel);
4403         seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
4404                    priv->dma_cap.number_rx_queues);
4405         seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
4406                    priv->dma_cap.number_tx_queues);
4407         seq_printf(seq, "\tEnhanced descriptors: %s\n",
4408                    (priv->dma_cap.enh_desc) ? "Y" : "N");
4409         seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
4410         seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
4411         seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
4412         seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
4413         seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
4414                    priv->dma_cap.pps_out_num);
4415         seq_printf(seq, "\tSafety Features: %s\n",
4416                    priv->dma_cap.asp ? "Y" : "N");
4417         seq_printf(seq, "\tFlexible RX Parser: %s\n",
4418                    priv->dma_cap.frpsel ? "Y" : "N");
4419         seq_printf(seq, "\tEnhanced Addressing: %d\n",
4420                    priv->dma_cap.addr64);
4421         seq_printf(seq, "\tReceive Side Scaling: %s\n",
4422                    priv->dma_cap.rssen ? "Y" : "N");
4423         seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
4424                    priv->dma_cap.vlhash ? "Y" : "N");
4425         seq_printf(seq, "\tSplit Header: %s\n",
4426                    priv->dma_cap.sphen ? "Y" : "N");
4427         seq_printf(seq, "\tVLAN TX Insertion: %s\n",
4428                    priv->dma_cap.vlins ? "Y" : "N");
4429         seq_printf(seq, "\tDouble VLAN: %s\n",
4430                    priv->dma_cap.dvlan ? "Y" : "N");
4431         seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
4432                    priv->dma_cap.l3l4fnum);
4433         seq_printf(seq, "\tARP Offloading: %s\n",
4434                    priv->dma_cap.arpoffsel ? "Y" : "N");
4435         seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
4436                    priv->dma_cap.estsel ? "Y" : "N");
4437         seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
4438                    priv->dma_cap.fpesel ? "Y" : "N");
4439         seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
4440                    priv->dma_cap.tbssel ? "Y" : "N");
4441         return 0;
4442 }
4443 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4444
4445 /* Use network device events to rename debugfs file entries.
4446  */
4447 static int stmmac_device_event(struct notifier_block *unused,
4448                                unsigned long event, void *ptr)
4449 {
4450         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4451         struct stmmac_priv *priv = netdev_priv(dev);
4452
4453         if (dev->netdev_ops != &stmmac_netdev_ops)
4454                 goto done;
4455
4456         switch (event) {
4457         case NETDEV_CHANGENAME:
4458                 if (priv->dbgfs_dir)
4459                         priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
4460                                                          priv->dbgfs_dir,
4461                                                          stmmac_fs_dir,
4462                                                          dev->name);
4463                 break;
4464         }
4465 done:
4466         return NOTIFY_DONE;
4467 }
4468
4469 static struct notifier_block stmmac_notifier = {
4470         .notifier_call = stmmac_device_event,
4471 };
4472
4473 static void stmmac_init_fs(struct net_device *dev)
4474 {
4475         struct stmmac_priv *priv = netdev_priv(dev);
4476
4477         rtnl_lock();
4478
4479         /* Create per netdev entries */
4480         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4481
4482         /* Entry to report DMA RX/TX rings */
4483         debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
4484                             &stmmac_rings_status_fops);
4485
4486         /* Entry to report the DMA HW features */
4487         debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
4488                             &stmmac_dma_cap_fops);
4489
4490         rtnl_unlock();
4491 }
4492
4493 static void stmmac_exit_fs(struct net_device *dev)
4494 {
4495         struct stmmac_priv *priv = netdev_priv(dev);
4496
4497         debugfs_remove_recursive(priv->dbgfs_dir);
4498 }
4499 #endif /* CONFIG_DEBUG_FS */
4500
4501 static u32 stmmac_vid_crc32_le(__le16 vid_le)
4502 {
4503         unsigned char *data = (unsigned char *)&vid_le;
4504         unsigned char data_byte = 0;
4505         u32 crc = ~0x0;
4506         u32 temp = 0;
4507         int i, bits;
4508
4509         bits = get_bitmask_order(VLAN_VID_MASK);
4510         for (i = 0; i < bits; i++) {
4511                 if ((i % 8) == 0)
4512                         data_byte = data[i / 8];
4513
4514                 temp = ((crc & 1) ^ data_byte) & 1;
4515                 crc >>= 1;
4516                 data_byte >>= 1;
4517
4518                 if (temp)
4519                         crc ^= 0xedb88320;
4520         }
4521
4522         return crc;
4523 }
4524
4525 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
4526 {
4527         u32 crc, hash = 0;
4528         __le16 pmatch = 0;
4529         int count = 0;
4530         u16 vid = 0;
4531
4532         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
4533                 __le16 vid_le = cpu_to_le16(vid);
4534                 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
4535                 hash |= (1 << crc);
4536                 count++;
4537         }
4538
4539         if (!priv->dma_cap.vlhash) {
4540                 if (count > 2) /* VID = 0 always passes filter */
4541                         return -EOPNOTSUPP;
4542
4543                 pmatch = cpu_to_le16(vid);
4544                 hash = 0;
4545         }
4546
4547         return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
4548 }
4549
4550 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
4551 {
4552         struct stmmac_priv *priv = netdev_priv(ndev);
4553         bool is_double = false;
4554         int ret;
4555
4556         if (be16_to_cpu(proto) == ETH_P_8021AD)
4557                 is_double = true;
4558
4559         set_bit(vid, priv->active_vlans);
4560         ret = stmmac_vlan_update(priv, is_double);
4561         if (ret) {
4562                 clear_bit(vid, priv->active_vlans);
4563                 return ret;
4564         }
4565
4566         if (priv->hw->num_vlan) {
4567                 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4568                 if (ret)
4569                         return ret;
4570         }
4571
4572         return 0;
4573 }
4574
4575 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
4576 {
4577         struct stmmac_priv *priv = netdev_priv(ndev);
4578         bool is_double = false;
4579         int ret;
4580
4581         if (be16_to_cpu(proto) == ETH_P_8021AD)
4582                 is_double = true;
4583
4584         clear_bit(vid, priv->active_vlans);
4585
4586         if (priv->hw->num_vlan) {
4587                 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
4588                 if (ret)
4589                         return ret;
4590         }
4591
4592         return stmmac_vlan_update(priv, is_double);
4593 }
4594
4595 static const struct net_device_ops stmmac_netdev_ops = {
4596         .ndo_open = stmmac_open,
4597         .ndo_start_xmit = stmmac_xmit,
4598         .ndo_stop = stmmac_release,
4599         .ndo_change_mtu = stmmac_change_mtu,
4600         .ndo_fix_features = stmmac_fix_features,
4601         .ndo_set_features = stmmac_set_features,
4602         .ndo_set_rx_mode = stmmac_set_rx_mode,
4603         .ndo_tx_timeout = stmmac_tx_timeout,
4604         .ndo_do_ioctl = stmmac_ioctl,
4605         .ndo_setup_tc = stmmac_setup_tc,
4606         .ndo_select_queue = stmmac_select_queue,
4607 #ifdef CONFIG_NET_POLL_CONTROLLER
4608         .ndo_poll_controller = stmmac_poll_controller,
4609 #endif
4610         .ndo_set_mac_address = stmmac_set_mac_address,
4611         .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
4612         .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
4613 };
4614
4615 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4616 {
4617         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4618                 return;
4619         if (test_bit(STMMAC_DOWN, &priv->state))
4620                 return;
4621
4622         netdev_err(priv->dev, "Reset adapter.\n");
4623
4624         rtnl_lock();
4625         netif_trans_update(priv->dev);
4626         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4627                 usleep_range(1000, 2000);
4628
4629         set_bit(STMMAC_DOWN, &priv->state);
4630         dev_close(priv->dev);
4631         dev_open(priv->dev, NULL);
4632         clear_bit(STMMAC_DOWN, &priv->state);
4633         clear_bit(STMMAC_RESETING, &priv->state);
4634         rtnl_unlock();
4635 }
4636
4637 static void stmmac_service_task(struct work_struct *work)
4638 {
4639         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4640                         service_task);
4641
4642         stmmac_reset_subtask(priv);
4643         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4644 }
4645
4646 /**
4647  *  stmmac_hw_init - Init the MAC device
4648  *  @priv: driver private structure
4649  *  Description: this function is to configure the MAC device according to
4650  *  some platform parameters or the HW capability register. It prepares the
4651  *  driver to use either ring or chain modes and to setup either enhanced or
4652  *  normal descriptors.
4653  */
4654 static int stmmac_hw_init(struct stmmac_priv *priv)
4655 {
4656         int ret;
4657
4658         /* dwmac-sun8i only work in chain mode */
4659         if (priv->plat->has_sun8i)
4660                 chain_mode = 1;
4661         priv->chain_mode = chain_mode;
4662
4663         /* Initialize HW Interface */
4664         ret = stmmac_hwif_init(priv);
4665         if (ret)
4666                 return ret;
4667
4668         /* Get the HW capability (new GMAC newer than 3.50a) */
4669         priv->hw_cap_support = stmmac_get_hw_features(priv);
4670         if (priv->hw_cap_support) {
4671                 dev_info(priv->device, "DMA HW capability register supported\n");
4672
4673                 /* We can override some gmac/dma configuration fields: e.g.
4674                  * enh_desc, tx_coe (e.g. that are passed through the
4675                  * platform) with the values from the HW capability
4676                  * register (if supported).
4677                  */
4678                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4679                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4680                 priv->hw->pmt = priv->plat->pmt;
4681                 if (priv->dma_cap.hash_tb_sz) {
4682                         priv->hw->multicast_filter_bins =
4683                                         (BIT(priv->dma_cap.hash_tb_sz) << 5);
4684                         priv->hw->mcast_bits_log2 =
4685                                         ilog2(priv->hw->multicast_filter_bins);
4686                 }
4687
4688                 /* TXCOE doesn't work in thresh DMA mode */
4689                 if (priv->plat->force_thresh_dma_mode)
4690                         priv->plat->tx_coe = 0;
4691                 else
4692                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4693
4694                 /* In case of GMAC4 rx_coe is from HW cap register. */
4695                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4696
4697                 if (priv->dma_cap.rx_coe_type2)
4698                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4699                 else if (priv->dma_cap.rx_coe_type1)
4700                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4701
4702         } else {
4703                 dev_info(priv->device, "No HW DMA feature register supported\n");
4704         }
4705
4706         if (priv->plat->rx_coe) {
4707                 priv->hw->rx_csum = priv->plat->rx_coe;
4708                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4709                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4710                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4711         }
4712         if (priv->plat->tx_coe)
4713                 dev_info(priv->device, "TX Checksum insertion supported\n");
4714
4715         if (priv->plat->pmt) {
4716                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4717                 device_set_wakeup_capable(priv->device, 1);
4718         }
4719
4720         if (priv->dma_cap.tsoen)
4721                 dev_info(priv->device, "TSO supported\n");
4722
4723         priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
4724         priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
4725
4726         /* Run HW quirks, if any */
4727         if (priv->hwif_quirks) {
4728                 ret = priv->hwif_quirks(priv);
4729                 if (ret)
4730                         return ret;
4731         }
4732
4733         /* Rx Watchdog is available in the COREs newer than the 3.40.
4734          * In some case, for example on bugged HW this feature
4735          * has to be disable and this can be done by passing the
4736          * riwt_off field from the platform.
4737          */
4738         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4739             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4740                 priv->use_riwt = 1;
4741                 dev_info(priv->device,
4742                          "Enable RX Mitigation via HW Watchdog Timer\n");
4743         }
4744
4745         return 0;
4746 }
4747
4748 static void stmmac_napi_add(struct net_device *dev)
4749 {
4750         struct stmmac_priv *priv = netdev_priv(dev);
4751         u32 queue, maxq;
4752
4753         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4754
4755         for (queue = 0; queue < maxq; queue++) {
4756                 struct stmmac_channel *ch = &priv->channel[queue];
4757
4758                 ch->priv_data = priv;
4759                 ch->index = queue;
4760                 spin_lock_init(&ch->lock);
4761
4762                 if (queue < priv->plat->rx_queues_to_use) {
4763                         netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
4764                                        NAPI_POLL_WEIGHT);
4765                 }
4766                 if (queue < priv->plat->tx_queues_to_use) {
4767                         netif_tx_napi_add(dev, &ch->tx_napi,
4768                                           stmmac_napi_poll_tx,
4769                                           NAPI_POLL_WEIGHT);
4770                 }
4771         }
4772 }
4773
4774 static void stmmac_napi_del(struct net_device *dev)
4775 {
4776         struct stmmac_priv *priv = netdev_priv(dev);
4777         u32 queue, maxq;
4778
4779         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4780
4781         for (queue = 0; queue < maxq; queue++) {
4782                 struct stmmac_channel *ch = &priv->channel[queue];
4783
4784                 if (queue < priv->plat->rx_queues_to_use)
4785                         netif_napi_del(&ch->rx_napi);
4786                 if (queue < priv->plat->tx_queues_to_use)
4787                         netif_napi_del(&ch->tx_napi);
4788         }
4789 }
4790
4791 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
4792 {
4793         struct stmmac_priv *priv = netdev_priv(dev);
4794         int ret = 0;
4795
4796         if (netif_running(dev))
4797                 stmmac_release(dev);
4798
4799         stmmac_napi_del(dev);
4800
4801         priv->plat->rx_queues_to_use = rx_cnt;
4802         priv->plat->tx_queues_to_use = tx_cnt;
4803
4804         stmmac_napi_add(dev);
4805
4806         if (netif_running(dev))
4807                 ret = stmmac_open(dev);
4808
4809         return ret;
4810 }
4811
4812 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
4813 {
4814         struct stmmac_priv *priv = netdev_priv(dev);
4815         int ret = 0;
4816
4817         if (netif_running(dev))
4818                 stmmac_release(dev);
4819
4820         priv->dma_rx_size = rx_size;
4821         priv->dma_tx_size = tx_size;
4822
4823         if (netif_running(dev))
4824                 ret = stmmac_open(dev);
4825
4826         return ret;
4827 }
4828
4829 /**
4830  * stmmac_dvr_probe
4831  * @device: device pointer
4832  * @plat_dat: platform data pointer
4833  * @res: stmmac resource pointer
4834  * Description: this is the main probe function used to
4835  * call the alloc_etherdev, allocate the priv structure.
4836  * Return:
4837  * returns 0 on success, otherwise errno.
4838  */
4839 int stmmac_dvr_probe(struct device *device,
4840                      struct plat_stmmacenet_data *plat_dat,
4841                      struct stmmac_resources *res)
4842 {
4843         struct net_device *ndev = NULL;
4844         struct stmmac_priv *priv;
4845         u32 rxq;
4846         int i, ret = 0;
4847
4848         ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
4849                                        MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
4850         if (!ndev)
4851                 return -ENOMEM;
4852
4853         SET_NETDEV_DEV(ndev, device);
4854
4855         priv = netdev_priv(ndev);
4856         priv->device = device;
4857         priv->dev = ndev;
4858
4859         stmmac_set_ethtool_ops(ndev);
4860         priv->pause = pause;
4861         priv->plat = plat_dat;
4862         priv->ioaddr = res->addr;
4863         priv->dev->base_addr = (unsigned long)res->addr;
4864
4865         priv->dev->irq = res->irq;
4866         priv->wol_irq = res->wol_irq;
4867         priv->lpi_irq = res->lpi_irq;
4868
4869         if (!IS_ERR_OR_NULL(res->mac))
4870                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4871
4872         dev_set_drvdata(device, priv->dev);
4873
4874         /* Verify driver arguments */
4875         stmmac_verify_args();
4876
4877         /* Allocate workqueue */
4878         priv->wq = create_singlethread_workqueue("stmmac_wq");
4879         if (!priv->wq) {
4880                 dev_err(priv->device, "failed to create workqueue\n");
4881                 return -ENOMEM;
4882         }
4883
4884         INIT_WORK(&priv->service_task, stmmac_service_task);
4885
4886         /* Override with kernel parameters if supplied XXX CRS XXX
4887          * this needs to have multiple instances
4888          */
4889         if ((phyaddr >= 0) && (phyaddr <= 31))
4890                 priv->plat->phy_addr = phyaddr;
4891
4892         if (priv->plat->stmmac_rst) {
4893                 ret = reset_control_assert(priv->plat->stmmac_rst);
4894                 reset_control_deassert(priv->plat->stmmac_rst);
4895                 /* Some reset controllers have only reset callback instead of
4896                  * assert + deassert callbacks pair.
4897                  */
4898                 if (ret == -ENOTSUPP)
4899                         reset_control_reset(priv->plat->stmmac_rst);
4900         }
4901
4902         /* Init MAC and get the capabilities */
4903         ret = stmmac_hw_init(priv);
4904         if (ret)
4905                 goto error_hw_init;
4906
4907         stmmac_check_ether_addr(priv);
4908
4909         ndev->netdev_ops = &stmmac_netdev_ops;
4910
4911         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4912                             NETIF_F_RXCSUM;
4913
4914         ret = stmmac_tc_init(priv, priv);
4915         if (!ret) {
4916                 ndev->hw_features |= NETIF_F_HW_TC;
4917         }
4918
4919         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4920                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4921                 if (priv->plat->has_gmac4)
4922                         ndev->hw_features |= NETIF_F_GSO_UDP_L4;
4923                 priv->tso = true;
4924                 dev_info(priv->device, "TSO feature enabled\n");
4925         }
4926
4927         if (priv->dma_cap.sphen) {
4928                 ndev->hw_features |= NETIF_F_GRO;
4929                 priv->sph = true;
4930                 dev_info(priv->device, "SPH feature enabled\n");
4931         }
4932
4933         if (priv->dma_cap.addr64) {
4934                 ret = dma_set_mask_and_coherent(device,
4935                                 DMA_BIT_MASK(priv->dma_cap.addr64));
4936                 if (!ret) {
4937                         dev_info(priv->device, "Using %d bits DMA width\n",
4938                                  priv->dma_cap.addr64);
4939
4940                         /*
4941                          * If more than 32 bits can be addressed, make sure to
4942                          * enable enhanced addressing mode.
4943                          */
4944                         if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
4945                                 priv->plat->dma_cfg->eame = true;
4946                 } else {
4947                         ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
4948                         if (ret) {
4949                                 dev_err(priv->device, "Failed to set DMA Mask\n");
4950                                 goto error_hw_init;
4951                         }
4952
4953                         priv->dma_cap.addr64 = 32;
4954                 }
4955         }
4956
4957         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4958         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4959 #ifdef STMMAC_VLAN_TAG_USED
4960         /* Both mac100 and gmac support receive VLAN tag detection */
4961         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4962         if (priv->dma_cap.vlhash) {
4963                 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4964                 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
4965         }
4966         if (priv->dma_cap.vlins) {
4967                 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
4968                 if (priv->dma_cap.dvlan)
4969                         ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
4970         }
4971 #endif
4972         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4973
4974         /* Initialize RSS */
4975         rxq = priv->plat->rx_queues_to_use;
4976         netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
4977         for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
4978                 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
4979
4980         if (priv->dma_cap.rssen && priv->plat->rss_en)
4981                 ndev->features |= NETIF_F_RXHASH;
4982
4983         /* MTU range: 46 - hw-specific max */
4984         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4985         if (priv->plat->has_xgmac)
4986                 ndev->max_mtu = XGMAC_JUMBO_LEN;
4987         else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4988                 ndev->max_mtu = JUMBO_LEN;
4989         else
4990                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4991         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4992          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4993          */
4994         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4995             (priv->plat->maxmtu >= ndev->min_mtu))
4996                 ndev->max_mtu = priv->plat->maxmtu;
4997         else if (priv->plat->maxmtu < ndev->min_mtu)
4998                 dev_warn(priv->device,
4999                          "%s: warning: maxmtu having invalid value (%d)\n",
5000                          __func__, priv->plat->maxmtu);
5001
5002         if (flow_ctrl)
5003                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
5004
5005         /* Setup channels NAPI */
5006         stmmac_napi_add(ndev);
5007
5008         mutex_init(&priv->lock);
5009
5010         /* If a specific clk_csr value is passed from the platform
5011          * this means that the CSR Clock Range selection cannot be
5012          * changed at run-time and it is fixed. Viceversa the driver'll try to
5013          * set the MDC clock dynamically according to the csr actual
5014          * clock input.
5015          */
5016         if (priv->plat->clk_csr >= 0)
5017                 priv->clk_csr = priv->plat->clk_csr;
5018         else
5019                 stmmac_clk_csr_set(priv);
5020
5021         stmmac_check_pcs_mode(priv);
5022
5023         if (priv->hw->pcs != STMMAC_PCS_TBI &&
5024             priv->hw->pcs != STMMAC_PCS_RTBI) {
5025                 /* MDIO bus Registration */
5026                 ret = stmmac_mdio_register(ndev);
5027                 if (ret < 0) {
5028                         dev_err(priv->device,
5029                                 "%s: MDIO bus (id: %d) registration failed",
5030                                 __func__, priv->plat->bus_id);
5031                         goto error_mdio_register;
5032                 }
5033         }
5034
5035         ret = stmmac_phy_setup(priv);
5036         if (ret) {
5037                 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
5038                 goto error_phy_setup;
5039         }
5040
5041         ret = register_netdev(ndev);
5042         if (ret) {
5043                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
5044                         __func__, ret);
5045                 goto error_netdev_register;
5046         }
5047
5048         if (priv->plat->serdes_powerup) {
5049                 ret = priv->plat->serdes_powerup(ndev,
5050                                                  priv->plat->bsp_priv);
5051
5052                 if (ret < 0)
5053                         goto error_serdes_powerup;
5054         }
5055
5056 #ifdef CONFIG_DEBUG_FS
5057         stmmac_init_fs(ndev);
5058 #endif
5059
5060         return ret;
5061
5062 error_serdes_powerup:
5063         unregister_netdev(ndev);
5064 error_netdev_register:
5065         phylink_destroy(priv->phylink);
5066 error_phy_setup:
5067         if (priv->hw->pcs != STMMAC_PCS_TBI &&
5068             priv->hw->pcs != STMMAC_PCS_RTBI)
5069                 stmmac_mdio_unregister(ndev);
5070 error_mdio_register:
5071         stmmac_napi_del(ndev);
5072 error_hw_init:
5073         destroy_workqueue(priv->wq);
5074
5075         return ret;
5076 }
5077 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
5078
5079 /**
5080  * stmmac_dvr_remove
5081  * @dev: device pointer
5082  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5083  * changes the link status, releases the DMA descriptor rings.
5084  */
5085 int stmmac_dvr_remove(struct device *dev)
5086 {
5087         struct net_device *ndev = dev_get_drvdata(dev);
5088         struct stmmac_priv *priv = netdev_priv(ndev);
5089
5090         netdev_info(priv->dev, "%s: removing driver", __func__);
5091
5092         stmmac_stop_all_dma(priv);
5093
5094         if (priv->plat->serdes_powerdown)
5095                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5096
5097         stmmac_mac_set(priv, priv->ioaddr, false);
5098         netif_carrier_off(ndev);
5099         unregister_netdev(ndev);
5100 #ifdef CONFIG_DEBUG_FS
5101         stmmac_exit_fs(ndev);
5102 #endif
5103         phylink_destroy(priv->phylink);
5104         if (priv->plat->stmmac_rst)
5105                 reset_control_assert(priv->plat->stmmac_rst);
5106         clk_disable_unprepare(priv->plat->pclk);
5107         clk_disable_unprepare(priv->plat->stmmac_clk);
5108         if (priv->hw->pcs != STMMAC_PCS_TBI &&
5109             priv->hw->pcs != STMMAC_PCS_RTBI)
5110                 stmmac_mdio_unregister(ndev);
5111         destroy_workqueue(priv->wq);
5112         mutex_destroy(&priv->lock);
5113
5114         return 0;
5115 }
5116 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
5117
5118 /**
5119  * stmmac_suspend - suspend callback
5120  * @dev: device pointer
5121  * Description: this is the function to suspend the device and it is called
5122  * by the platform driver to stop the network queue, release the resources,
5123  * program the PMT register (for WoL), clean and release driver resources.
5124  */
5125 int stmmac_suspend(struct device *dev)
5126 {
5127         struct net_device *ndev = dev_get_drvdata(dev);
5128         struct stmmac_priv *priv = netdev_priv(ndev);
5129         u32 chan;
5130
5131         if (!ndev || !netif_running(ndev))
5132                 return 0;
5133
5134         phylink_mac_change(priv->phylink, false);
5135
5136         mutex_lock(&priv->lock);
5137
5138         netif_device_detach(ndev);
5139
5140         stmmac_disable_all_queues(priv);
5141
5142         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
5143                 del_timer_sync(&priv->tx_queue[chan].txtimer);
5144
5145         /* Stop TX/RX DMA */
5146         stmmac_stop_all_dma(priv);
5147
5148         if (priv->plat->serdes_powerdown)
5149                 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5150
5151         /* Enable Power down mode by programming the PMT regs */
5152         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5153                 stmmac_pmt(priv, priv->hw, priv->wolopts);
5154                 priv->irq_wake = 1;
5155         } else {
5156                 mutex_unlock(&priv->lock);
5157                 rtnl_lock();
5158                 if (device_may_wakeup(priv->device))
5159                         phylink_speed_down(priv->phylink, false);
5160                 phylink_stop(priv->phylink);
5161                 rtnl_unlock();
5162                 mutex_lock(&priv->lock);
5163
5164                 stmmac_mac_set(priv, priv->ioaddr, false);
5165                 pinctrl_pm_select_sleep_state(priv->device);
5166                 /* Disable clock in case of PWM is off */
5167                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
5168                 clk_disable_unprepare(priv->plat->pclk);
5169                 clk_disable_unprepare(priv->plat->stmmac_clk);
5170         }
5171         mutex_unlock(&priv->lock);
5172
5173         priv->speed = SPEED_UNKNOWN;
5174         return 0;
5175 }
5176 EXPORT_SYMBOL_GPL(stmmac_suspend);
5177
5178 /**
5179  * stmmac_reset_queues_param - reset queue parameters
5180  * @priv: device pointer
5181  */
5182 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
5183 {
5184         u32 rx_cnt = priv->plat->rx_queues_to_use;
5185         u32 tx_cnt = priv->plat->tx_queues_to_use;
5186         u32 queue;
5187
5188         for (queue = 0; queue < rx_cnt; queue++) {
5189                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5190
5191                 rx_q->cur_rx = 0;
5192                 rx_q->dirty_rx = 0;
5193         }
5194
5195         for (queue = 0; queue < tx_cnt; queue++) {
5196                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5197
5198                 tx_q->cur_tx = 0;
5199                 tx_q->dirty_tx = 0;
5200                 tx_q->mss = 0;
5201         }
5202 }
5203
5204 /**
5205  * stmmac_resume - resume callback
5206  * @dev: device pointer
5207  * Description: when resume this function is invoked to setup the DMA and CORE
5208  * in a usable state.
5209  */
5210 int stmmac_resume(struct device *dev)
5211 {
5212         struct net_device *ndev = dev_get_drvdata(dev);
5213         struct stmmac_priv *priv = netdev_priv(ndev);
5214         int ret;
5215
5216         if (!netif_running(ndev))
5217                 return 0;
5218
5219         /* Power Down bit, into the PM register, is cleared
5220          * automatically as soon as a magic packet or a Wake-up frame
5221          * is received. Anyway, it's better to manually clear
5222          * this bit because it can generate problems while resuming
5223          * from another devices (e.g. serial console).
5224          */
5225         if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5226                 mutex_lock(&priv->lock);
5227                 stmmac_pmt(priv, priv->hw, 0);
5228                 mutex_unlock(&priv->lock);
5229                 priv->irq_wake = 0;
5230         } else {
5231                 pinctrl_pm_select_default_state(priv->device);
5232                 /* enable the clk previously disabled */
5233                 clk_prepare_enable(priv->plat->stmmac_clk);
5234                 clk_prepare_enable(priv->plat->pclk);
5235                 if (priv->plat->clk_ptp_ref)
5236                         clk_prepare_enable(priv->plat->clk_ptp_ref);
5237                 /* reset the phy so that it's ready */
5238                 if (priv->mii)
5239                         stmmac_mdio_reset(priv->mii);
5240         }
5241
5242         if (priv->plat->serdes_powerup) {
5243                 ret = priv->plat->serdes_powerup(ndev,
5244                                                  priv->plat->bsp_priv);
5245
5246                 if (ret < 0)
5247                         return ret;
5248         }
5249
5250         mutex_lock(&priv->lock);
5251
5252         stmmac_reset_queues_param(priv);
5253
5254         stmmac_clear_descriptors(priv);
5255
5256         stmmac_hw_setup(ndev, false);
5257         stmmac_init_coalesce(priv);
5258         stmmac_set_rx_mode(ndev);
5259
5260         stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
5261
5262         stmmac_enable_all_queues(priv);
5263
5264         mutex_unlock(&priv->lock);
5265
5266         if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
5267                 rtnl_lock();
5268                 phylink_start(priv->phylink);
5269                 /* We may have called phylink_speed_down before */
5270                 phylink_speed_up(priv->phylink);
5271                 rtnl_unlock();
5272         }
5273
5274         phylink_mac_change(priv->phylink, true);
5275
5276         netif_device_attach(ndev);
5277
5278         return 0;
5279 }
5280 EXPORT_SYMBOL_GPL(stmmac_resume);
5281
5282 #ifndef MODULE
5283 static int __init stmmac_cmdline_opt(char *str)
5284 {
5285         char *opt;
5286
5287         if (!str || !*str)
5288                 return -EINVAL;
5289         while ((opt = strsep(&str, ",")) != NULL) {
5290                 if (!strncmp(opt, "debug:", 6)) {
5291                         if (kstrtoint(opt + 6, 0, &debug))
5292                                 goto err;
5293                 } else if (!strncmp(opt, "phyaddr:", 8)) {
5294                         if (kstrtoint(opt + 8, 0, &phyaddr))
5295                                 goto err;
5296                 } else if (!strncmp(opt, "buf_sz:", 7)) {
5297                         if (kstrtoint(opt + 7, 0, &buf_sz))
5298                                 goto err;
5299                 } else if (!strncmp(opt, "tc:", 3)) {
5300                         if (kstrtoint(opt + 3, 0, &tc))
5301                                 goto err;
5302                 } else if (!strncmp(opt, "watchdog:", 9)) {
5303                         if (kstrtoint(opt + 9, 0, &watchdog))
5304                                 goto err;
5305                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
5306                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
5307                                 goto err;
5308                 } else if (!strncmp(opt, "pause:", 6)) {
5309                         if (kstrtoint(opt + 6, 0, &pause))
5310                                 goto err;
5311                 } else if (!strncmp(opt, "eee_timer:", 10)) {
5312                         if (kstrtoint(opt + 10, 0, &eee_timer))
5313                                 goto err;
5314                 } else if (!strncmp(opt, "chain_mode:", 11)) {
5315                         if (kstrtoint(opt + 11, 0, &chain_mode))
5316                                 goto err;
5317                 }
5318         }
5319         return 0;
5320
5321 err:
5322         pr_err("%s: ERROR broken module parameter conversion", __func__);
5323         return -EINVAL;
5324 }
5325
5326 __setup("stmmaceth=", stmmac_cmdline_opt);
5327 #endif /* MODULE */
5328
5329 static int __init stmmac_init(void)
5330 {
5331 #ifdef CONFIG_DEBUG_FS
5332         /* Create debugfs main directory if it doesn't exist yet */
5333         if (!stmmac_fs_dir)
5334                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
5335         register_netdevice_notifier(&stmmac_notifier);
5336 #endif
5337
5338         return 0;
5339 }
5340
5341 static void __exit stmmac_exit(void)
5342 {
5343 #ifdef CONFIG_DEBUG_FS
5344         unregister_netdevice_notifier(&stmmac_notifier);
5345         debugfs_remove_recursive(stmmac_fs_dir);
5346 #endif
5347 }
5348
5349 module_init(stmmac_init)
5350 module_exit(stmmac_exit)
5351
5352 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
5353 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
5354 MODULE_LICENSE("GPL");