Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5
6         Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11   Documentation available at:
12         http://www.stlinux.com
13   Support available at:
14         https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/prefetch.h>
32 #include <linux/pinctrl/consumer.h>
33 #ifdef CONFIG_DEBUG_FS
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
36 #endif /* CONFIG_DEBUG_FS */
37 #include <linux/net_tstamp.h>
38 #include <net/pkt_cls.h>
39 #include "stmmac_ptp.h"
40 #include "stmmac.h"
41 #include <linux/reset.h>
42 #include <linux/of_mdio.h>
43 #include "dwmac1000.h"
44 #include "dwxgmac2.h"
45 #include "hwif.h"
46
47 #define STMMAC_ALIGN(x)         __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
48 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
49
50 /* Module parameters */
51 #define TX_TIMEO        5000
52 static int watchdog = TX_TIMEO;
53 module_param(watchdog, int, 0644);
54 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
55
56 static int debug = -1;
57 module_param(debug, int, 0644);
58 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
59
60 static int phyaddr = -1;
61 module_param(phyaddr, int, 0444);
62 MODULE_PARM_DESC(phyaddr, "Physical device address");
63
64 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
65 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
66
67 static int flow_ctrl = FLOW_AUTO;
68 module_param(flow_ctrl, int, 0644);
69 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
70
71 static int pause = PAUSE_TIME;
72 module_param(pause, int, 0644);
73 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
74
75 #define TC_DEFAULT 64
76 static int tc = TC_DEFAULT;
77 module_param(tc, int, 0644);
78 MODULE_PARM_DESC(tc, "DMA threshold control value");
79
80 #define DEFAULT_BUFSIZE 1536
81 static int buf_sz = DEFAULT_BUFSIZE;
82 module_param(buf_sz, int, 0644);
83 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
84
85 #define STMMAC_RX_COPYBREAK     256
86
87 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
88                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
89                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
90
91 #define STMMAC_DEFAULT_LPI_TIMER        1000
92 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
93 module_param(eee_timer, int, 0644);
94 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
95 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
96
97 /* By default the driver will use the ring mode to manage tx and rx descriptors,
98  * but allow user to force to use the chain instead of the ring
99  */
100 static unsigned int chain_mode;
101 module_param(chain_mode, int, 0444);
102 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
103
104 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
105
106 #ifdef CONFIG_DEBUG_FS
107 static int stmmac_init_fs(struct net_device *dev);
108 static void stmmac_exit_fs(struct net_device *dev);
109 #endif
110
111 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
112
113 /**
114  * stmmac_verify_args - verify the driver parameters.
115  * Description: it checks the driver parameters and set a default in case of
116  * errors.
117  */
118 static void stmmac_verify_args(void)
119 {
120         if (unlikely(watchdog < 0))
121                 watchdog = TX_TIMEO;
122         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
123                 buf_sz = DEFAULT_BUFSIZE;
124         if (unlikely(flow_ctrl > 1))
125                 flow_ctrl = FLOW_AUTO;
126         else if (likely(flow_ctrl < 0))
127                 flow_ctrl = FLOW_OFF;
128         if (unlikely((pause < 0) || (pause > 0xffff)))
129                 pause = PAUSE_TIME;
130         if (eee_timer < 0)
131                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
132 }
133
134 /**
135  * stmmac_disable_all_queues - Disable all queues
136  * @priv: driver private structure
137  */
138 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
139 {
140         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
141         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
142         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
143         u32 queue;
144
145         for (queue = 0; queue < maxq; queue++) {
146                 struct stmmac_channel *ch = &priv->channel[queue];
147
148                 if (queue < rx_queues_cnt)
149                         napi_disable(&ch->rx_napi);
150                 if (queue < tx_queues_cnt)
151                         napi_disable(&ch->tx_napi);
152         }
153 }
154
155 /**
156  * stmmac_enable_all_queues - Enable all queues
157  * @priv: driver private structure
158  */
159 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
160 {
161         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
162         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
163         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
164         u32 queue;
165
166         for (queue = 0; queue < maxq; queue++) {
167                 struct stmmac_channel *ch = &priv->channel[queue];
168
169                 if (queue < rx_queues_cnt)
170                         napi_enable(&ch->rx_napi);
171                 if (queue < tx_queues_cnt)
172                         napi_enable(&ch->tx_napi);
173         }
174 }
175
176 /**
177  * stmmac_stop_all_queues - Stop all queues
178  * @priv: driver private structure
179  */
180 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
181 {
182         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
183         u32 queue;
184
185         for (queue = 0; queue < tx_queues_cnt; queue++)
186                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
187 }
188
189 /**
190  * stmmac_start_all_queues - Start all queues
191  * @priv: driver private structure
192  */
193 static void stmmac_start_all_queues(struct stmmac_priv *priv)
194 {
195         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
196         u32 queue;
197
198         for (queue = 0; queue < tx_queues_cnt; queue++)
199                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
200 }
201
202 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
203 {
204         if (!test_bit(STMMAC_DOWN, &priv->state) &&
205             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
206                 queue_work(priv->wq, &priv->service_task);
207 }
208
209 static void stmmac_global_err(struct stmmac_priv *priv)
210 {
211         netif_carrier_off(priv->dev);
212         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
213         stmmac_service_event_schedule(priv);
214 }
215
216 /**
217  * stmmac_clk_csr_set - dynamically set the MDC clock
218  * @priv: driver private structure
219  * Description: this is to dynamically set the MDC clock according to the csr
220  * clock input.
221  * Note:
222  *      If a specific clk_csr value is passed from the platform
223  *      this means that the CSR Clock Range selection cannot be
224  *      changed at run-time and it is fixed (as reported in the driver
225  *      documentation). Viceversa the driver will try to set the MDC
226  *      clock dynamically according to the actual clock input.
227  */
228 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
229 {
230         u32 clk_rate;
231
232         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
233
234         /* Platform provided default clk_csr would be assumed valid
235          * for all other cases except for the below mentioned ones.
236          * For values higher than the IEEE 802.3 specified frequency
237          * we can not estimate the proper divider as it is not known
238          * the frequency of clk_csr_i. So we do not change the default
239          * divider.
240          */
241         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
242                 if (clk_rate < CSR_F_35M)
243                         priv->clk_csr = STMMAC_CSR_20_35M;
244                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
245                         priv->clk_csr = STMMAC_CSR_35_60M;
246                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
247                         priv->clk_csr = STMMAC_CSR_60_100M;
248                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
249                         priv->clk_csr = STMMAC_CSR_100_150M;
250                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
251                         priv->clk_csr = STMMAC_CSR_150_250M;
252                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
253                         priv->clk_csr = STMMAC_CSR_250_300M;
254         }
255
256         if (priv->plat->has_sun8i) {
257                 if (clk_rate > 160000000)
258                         priv->clk_csr = 0x03;
259                 else if (clk_rate > 80000000)
260                         priv->clk_csr = 0x02;
261                 else if (clk_rate > 40000000)
262                         priv->clk_csr = 0x01;
263                 else
264                         priv->clk_csr = 0;
265         }
266
267         if (priv->plat->has_xgmac) {
268                 if (clk_rate > 400000000)
269                         priv->clk_csr = 0x5;
270                 else if (clk_rate > 350000000)
271                         priv->clk_csr = 0x4;
272                 else if (clk_rate > 300000000)
273                         priv->clk_csr = 0x3;
274                 else if (clk_rate > 250000000)
275                         priv->clk_csr = 0x2;
276                 else if (clk_rate > 150000000)
277                         priv->clk_csr = 0x1;
278                 else
279                         priv->clk_csr = 0x0;
280         }
281 }
282
283 static void print_pkt(unsigned char *buf, int len)
284 {
285         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
286         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
287 }
288
289 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
290 {
291         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
292         u32 avail;
293
294         if (tx_q->dirty_tx > tx_q->cur_tx)
295                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
296         else
297                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
298
299         return avail;
300 }
301
302 /**
303  * stmmac_rx_dirty - Get RX queue dirty
304  * @priv: driver private structure
305  * @queue: RX queue index
306  */
307 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
308 {
309         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
310         u32 dirty;
311
312         if (rx_q->dirty_rx <= rx_q->cur_rx)
313                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
314         else
315                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
316
317         return dirty;
318 }
319
320 /**
321  * stmmac_hw_fix_mac_speed - callback for speed selection
322  * @priv: driver private structure
323  * Description: on some platforms (e.g. ST), some HW system configuration
324  * registers have to be set according to the link speed negotiated.
325  */
326 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
327 {
328         struct net_device *ndev = priv->dev;
329         struct phy_device *phydev = ndev->phydev;
330
331         if (likely(priv->plat->fix_mac_speed))
332                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
333 }
334
335 /**
336  * stmmac_enable_eee_mode - check and enter in LPI mode
337  * @priv: driver private structure
338  * Description: this function is to verify and enter in LPI mode in case of
339  * EEE.
340  */
341 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
342 {
343         u32 tx_cnt = priv->plat->tx_queues_to_use;
344         u32 queue;
345
346         /* check if all TX queues have the work finished */
347         for (queue = 0; queue < tx_cnt; queue++) {
348                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
349
350                 if (tx_q->dirty_tx != tx_q->cur_tx)
351                         return; /* still unfinished work */
352         }
353
354         /* Check and enter in LPI mode */
355         if (!priv->tx_path_in_lpi_mode)
356                 stmmac_set_eee_mode(priv, priv->hw,
357                                 priv->plat->en_tx_lpi_clockgating);
358 }
359
360 /**
361  * stmmac_disable_eee_mode - disable and exit from LPI mode
362  * @priv: driver private structure
363  * Description: this function is to exit and disable EEE in case of
364  * LPI state is true. This is called by the xmit.
365  */
366 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
367 {
368         stmmac_reset_eee_mode(priv, priv->hw);
369         del_timer_sync(&priv->eee_ctrl_timer);
370         priv->tx_path_in_lpi_mode = false;
371 }
372
373 /**
374  * stmmac_eee_ctrl_timer - EEE TX SW timer.
375  * @arg : data hook
376  * Description:
377  *  if there is no data transfer and if we are not in LPI state,
378  *  then MAC Transmitter can be moved to LPI state.
379  */
380 static void stmmac_eee_ctrl_timer(struct timer_list *t)
381 {
382         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
383
384         stmmac_enable_eee_mode(priv);
385         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
386 }
387
388 /**
389  * stmmac_eee_init - init EEE
390  * @priv: driver private structure
391  * Description:
392  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
393  *  can also manage EEE, this function enable the LPI state and start related
394  *  timer.
395  */
396 bool stmmac_eee_init(struct stmmac_priv *priv)
397 {
398         struct net_device *ndev = priv->dev;
399         int interface = priv->plat->interface;
400         bool ret = false;
401
402         if ((interface != PHY_INTERFACE_MODE_MII) &&
403             (interface != PHY_INTERFACE_MODE_GMII) &&
404             !phy_interface_mode_is_rgmii(interface))
405                 goto out;
406
407         /* Using PCS we cannot dial with the phy registers at this stage
408          * so we do not support extra feature like EEE.
409          */
410         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
411             (priv->hw->pcs == STMMAC_PCS_TBI) ||
412             (priv->hw->pcs == STMMAC_PCS_RTBI))
413                 goto out;
414
415         /* MAC core supports the EEE feature. */
416         if (priv->dma_cap.eee) {
417                 int tx_lpi_timer = priv->tx_lpi_timer;
418
419                 /* Check if the PHY supports EEE */
420                 if (phy_init_eee(ndev->phydev, 1)) {
421                         /* To manage at run-time if the EEE cannot be supported
422                          * anymore (for example because the lp caps have been
423                          * changed).
424                          * In that case the driver disable own timers.
425                          */
426                         mutex_lock(&priv->lock);
427                         if (priv->eee_active) {
428                                 netdev_dbg(priv->dev, "disable EEE\n");
429                                 del_timer_sync(&priv->eee_ctrl_timer);
430                                 stmmac_set_eee_timer(priv, priv->hw, 0,
431                                                 tx_lpi_timer);
432                         }
433                         priv->eee_active = 0;
434                         mutex_unlock(&priv->lock);
435                         goto out;
436                 }
437                 /* Activate the EEE and start timers */
438                 mutex_lock(&priv->lock);
439                 if (!priv->eee_active) {
440                         priv->eee_active = 1;
441                         timer_setup(&priv->eee_ctrl_timer,
442                                     stmmac_eee_ctrl_timer, 0);
443                         mod_timer(&priv->eee_ctrl_timer,
444                                   STMMAC_LPI_T(eee_timer));
445
446                         stmmac_set_eee_timer(priv, priv->hw,
447                                         STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
448                 }
449                 /* Set HW EEE according to the speed */
450                 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
451
452                 ret = true;
453                 mutex_unlock(&priv->lock);
454
455                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
456         }
457 out:
458         return ret;
459 }
460
461 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
462  * @priv: driver private structure
463  * @p : descriptor pointer
464  * @skb : the socket buffer
465  * Description :
466  * This function will read timestamp from the descriptor & pass it to stack.
467  * and also perform some sanity checks.
468  */
469 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
470                                    struct dma_desc *p, struct sk_buff *skb)
471 {
472         struct skb_shared_hwtstamps shhwtstamp;
473         u64 ns = 0;
474
475         if (!priv->hwts_tx_en)
476                 return;
477
478         /* exit if skb doesn't support hw tstamp */
479         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
480                 return;
481
482         /* check tx tstamp status */
483         if (stmmac_get_tx_timestamp_status(priv, p)) {
484                 /* get the valid tstamp */
485                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
486
487                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
488                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
489
490                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
491                 /* pass tstamp to stack */
492                 skb_tstamp_tx(skb, &shhwtstamp);
493         }
494
495         return;
496 }
497
498 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
499  * @priv: driver private structure
500  * @p : descriptor pointer
501  * @np : next descriptor pointer
502  * @skb : the socket buffer
503  * Description :
504  * This function will read received packet's timestamp from the descriptor
505  * and pass it to stack. It also perform some sanity checks.
506  */
507 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
508                                    struct dma_desc *np, struct sk_buff *skb)
509 {
510         struct skb_shared_hwtstamps *shhwtstamp = NULL;
511         struct dma_desc *desc = p;
512         u64 ns = 0;
513
514         if (!priv->hwts_rx_en)
515                 return;
516         /* For GMAC4, the valid timestamp is from CTX next desc. */
517         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
518                 desc = np;
519
520         /* Check if timestamp is available */
521         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
522                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
523                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
524                 shhwtstamp = skb_hwtstamps(skb);
525                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
526                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
527         } else  {
528                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
529         }
530 }
531
532 /**
533  *  stmmac_hwtstamp_set - control hardware timestamping.
534  *  @dev: device pointer.
535  *  @ifr: An IOCTL specific structure, that can contain a pointer to
536  *  a proprietary structure used to pass information to the driver.
537  *  Description:
538  *  This function configures the MAC to enable/disable both outgoing(TX)
539  *  and incoming(RX) packets time stamping based on user input.
540  *  Return Value:
541  *  0 on success and an appropriate -ve integer on failure.
542  */
543 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
544 {
545         struct stmmac_priv *priv = netdev_priv(dev);
546         struct hwtstamp_config config;
547         struct timespec64 now;
548         u64 temp = 0;
549         u32 ptp_v2 = 0;
550         u32 tstamp_all = 0;
551         u32 ptp_over_ipv4_udp = 0;
552         u32 ptp_over_ipv6_udp = 0;
553         u32 ptp_over_ethernet = 0;
554         u32 snap_type_sel = 0;
555         u32 ts_master_en = 0;
556         u32 ts_event_en = 0;
557         u32 sec_inc = 0;
558         u32 value = 0;
559         bool xmac;
560
561         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
562
563         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
564                 netdev_alert(priv->dev, "No support for HW time stamping\n");
565                 priv->hwts_tx_en = 0;
566                 priv->hwts_rx_en = 0;
567
568                 return -EOPNOTSUPP;
569         }
570
571         if (copy_from_user(&config, ifr->ifr_data,
572                            sizeof(config)))
573                 return -EFAULT;
574
575         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
576                    __func__, config.flags, config.tx_type, config.rx_filter);
577
578         /* reserved for future extensions */
579         if (config.flags)
580                 return -EINVAL;
581
582         if (config.tx_type != HWTSTAMP_TX_OFF &&
583             config.tx_type != HWTSTAMP_TX_ON)
584                 return -ERANGE;
585
586         if (priv->adv_ts) {
587                 switch (config.rx_filter) {
588                 case HWTSTAMP_FILTER_NONE:
589                         /* time stamp no incoming packet at all */
590                         config.rx_filter = HWTSTAMP_FILTER_NONE;
591                         break;
592
593                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
594                         /* PTP v1, UDP, any kind of event packet */
595                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
596                         /* 'xmac' hardware can support Sync, Pdelay_Req and
597                          * Pdelay_resp by setting bit14 and bits17/16 to 01
598                          * This leaves Delay_Req timestamps out.
599                          * Enable all events *and* general purpose message
600                          * timestamping
601                          */
602                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
603                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
604                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
605                         break;
606
607                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
608                         /* PTP v1, UDP, Sync packet */
609                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
610                         /* take time stamp for SYNC messages only */
611                         ts_event_en = PTP_TCR_TSEVNTENA;
612
613                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
614                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
615                         break;
616
617                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
618                         /* PTP v1, UDP, Delay_req packet */
619                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
620                         /* take time stamp for Delay_Req messages only */
621                         ts_master_en = PTP_TCR_TSMSTRENA;
622                         ts_event_en = PTP_TCR_TSEVNTENA;
623
624                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
625                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
626                         break;
627
628                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
629                         /* PTP v2, UDP, any kind of event packet */
630                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
631                         ptp_v2 = PTP_TCR_TSVER2ENA;
632                         /* take time stamp for all event messages */
633                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
634
635                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
636                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
637                         break;
638
639                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
640                         /* PTP v2, UDP, Sync packet */
641                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
642                         ptp_v2 = PTP_TCR_TSVER2ENA;
643                         /* take time stamp for SYNC messages only */
644                         ts_event_en = PTP_TCR_TSEVNTENA;
645
646                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
647                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
648                         break;
649
650                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
651                         /* PTP v2, UDP, Delay_req packet */
652                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
653                         ptp_v2 = PTP_TCR_TSVER2ENA;
654                         /* take time stamp for Delay_Req messages only */
655                         ts_master_en = PTP_TCR_TSMSTRENA;
656                         ts_event_en = PTP_TCR_TSEVNTENA;
657
658                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
659                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
660                         break;
661
662                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
663                         /* PTP v2/802.AS1 any layer, any kind of event packet */
664                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
665                         ptp_v2 = PTP_TCR_TSVER2ENA;
666                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669                         ptp_over_ethernet = PTP_TCR_TSIPENA;
670                         break;
671
672                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
673                         /* PTP v2/802.AS1, any layer, Sync packet */
674                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
675                         ptp_v2 = PTP_TCR_TSVER2ENA;
676                         /* take time stamp for SYNC messages only */
677                         ts_event_en = PTP_TCR_TSEVNTENA;
678
679                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
680                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
681                         ptp_over_ethernet = PTP_TCR_TSIPENA;
682                         break;
683
684                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
685                         /* PTP v2/802.AS1, any layer, Delay_req packet */
686                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
687                         ptp_v2 = PTP_TCR_TSVER2ENA;
688                         /* take time stamp for Delay_Req messages only */
689                         ts_master_en = PTP_TCR_TSMSTRENA;
690                         ts_event_en = PTP_TCR_TSEVNTENA;
691
692                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
693                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
694                         ptp_over_ethernet = PTP_TCR_TSIPENA;
695                         break;
696
697                 case HWTSTAMP_FILTER_NTP_ALL:
698                 case HWTSTAMP_FILTER_ALL:
699                         /* time stamp any incoming packet */
700                         config.rx_filter = HWTSTAMP_FILTER_ALL;
701                         tstamp_all = PTP_TCR_TSENALL;
702                         break;
703
704                 default:
705                         return -ERANGE;
706                 }
707         } else {
708                 switch (config.rx_filter) {
709                 case HWTSTAMP_FILTER_NONE:
710                         config.rx_filter = HWTSTAMP_FILTER_NONE;
711                         break;
712                 default:
713                         /* PTP v1, UDP, any kind of event packet */
714                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
715                         break;
716                 }
717         }
718         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
719         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
720
721         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
722                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
723         else {
724                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
725                          tstamp_all | ptp_v2 | ptp_over_ethernet |
726                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
727                          ts_master_en | snap_type_sel);
728                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
729
730                 /* program Sub Second Increment reg */
731                 stmmac_config_sub_second_increment(priv,
732                                 priv->ptpaddr, priv->plat->clk_ptp_rate,
733                                 xmac, &sec_inc);
734                 temp = div_u64(1000000000ULL, sec_inc);
735
736                 /* Store sub second increment and flags for later use */
737                 priv->sub_second_inc = sec_inc;
738                 priv->systime_flags = value;
739
740                 /* calculate default added value:
741                  * formula is :
742                  * addend = (2^32)/freq_div_ratio;
743                  * where, freq_div_ratio = 1e9ns/sec_inc
744                  */
745                 temp = (u64)(temp << 32);
746                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
747                 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
748
749                 /* initialize system time */
750                 ktime_get_real_ts64(&now);
751
752                 /* lower 32 bits of tv_sec are safe until y2106 */
753                 stmmac_init_systime(priv, priv->ptpaddr,
754                                 (u32)now.tv_sec, now.tv_nsec);
755         }
756
757         memcpy(&priv->tstamp_config, &config, sizeof(config));
758
759         return copy_to_user(ifr->ifr_data, &config,
760                             sizeof(config)) ? -EFAULT : 0;
761 }
762
763 /**
764  *  stmmac_hwtstamp_get - read hardware timestamping.
765  *  @dev: device pointer.
766  *  @ifr: An IOCTL specific structure, that can contain a pointer to
767  *  a proprietary structure used to pass information to the driver.
768  *  Description:
769  *  This function obtain the current hardware timestamping settings
770     as requested.
771  */
772 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
773 {
774         struct stmmac_priv *priv = netdev_priv(dev);
775         struct hwtstamp_config *config = &priv->tstamp_config;
776
777         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
778                 return -EOPNOTSUPP;
779
780         return copy_to_user(ifr->ifr_data, config,
781                             sizeof(*config)) ? -EFAULT : 0;
782 }
783
784 /**
785  * stmmac_init_ptp - init PTP
786  * @priv: driver private structure
787  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
788  * This is done by looking at the HW cap. register.
789  * This function also registers the ptp driver.
790  */
791 static int stmmac_init_ptp(struct stmmac_priv *priv)
792 {
793         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
794
795         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
796                 return -EOPNOTSUPP;
797
798         priv->adv_ts = 0;
799         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
800         if (xmac && priv->dma_cap.atime_stamp)
801                 priv->adv_ts = 1;
802         /* Dwmac 3.x core with extend_desc can support adv_ts */
803         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
804                 priv->adv_ts = 1;
805
806         if (priv->dma_cap.time_stamp)
807                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
808
809         if (priv->adv_ts)
810                 netdev_info(priv->dev,
811                             "IEEE 1588-2008 Advanced Timestamp supported\n");
812
813         priv->hwts_tx_en = 0;
814         priv->hwts_rx_en = 0;
815
816         stmmac_ptp_register(priv);
817
818         return 0;
819 }
820
821 static void stmmac_release_ptp(struct stmmac_priv *priv)
822 {
823         if (priv->plat->clk_ptp_ref)
824                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
825         stmmac_ptp_unregister(priv);
826 }
827
828 /**
829  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
830  *  @priv: driver private structure
831  *  Description: It is used for configuring the flow control in all queues
832  */
833 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
834 {
835         u32 tx_cnt = priv->plat->tx_queues_to_use;
836
837         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
838                         priv->pause, tx_cnt);
839 }
840
841 /**
842  * stmmac_adjust_link - adjusts the link parameters
843  * @dev: net device structure
844  * Description: this is the helper called by the physical abstraction layer
845  * drivers to communicate the phy link status. According the speed and duplex
846  * this driver can invoke registered glue-logic as well.
847  * It also invoke the eee initialization because it could happen when switch
848  * on different networks (that are eee capable).
849  */
850 static void stmmac_adjust_link(struct net_device *dev)
851 {
852         struct stmmac_priv *priv = netdev_priv(dev);
853         struct phy_device *phydev = dev->phydev;
854         bool new_state = false;
855
856         if (!phydev)
857                 return;
858
859         mutex_lock(&priv->lock);
860
861         if (phydev->link) {
862                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
863
864                 /* Now we make sure that we can be in full duplex mode.
865                  * If not, we operate in half-duplex mode. */
866                 if (phydev->duplex != priv->oldduplex) {
867                         new_state = true;
868                         if (!phydev->duplex)
869                                 ctrl &= ~priv->hw->link.duplex;
870                         else
871                                 ctrl |= priv->hw->link.duplex;
872                         priv->oldduplex = phydev->duplex;
873                 }
874                 /* Flow Control operation */
875                 if (phydev->pause)
876                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
877
878                 if (phydev->speed != priv->speed) {
879                         new_state = true;
880                         ctrl &= ~priv->hw->link.speed_mask;
881                         switch (phydev->speed) {
882                         case SPEED_1000:
883                                 ctrl |= priv->hw->link.speed1000;
884                                 break;
885                         case SPEED_100:
886                                 ctrl |= priv->hw->link.speed100;
887                                 break;
888                         case SPEED_10:
889                                 ctrl |= priv->hw->link.speed10;
890                                 break;
891                         default:
892                                 netif_warn(priv, link, priv->dev,
893                                            "broken speed: %d\n", phydev->speed);
894                                 phydev->speed = SPEED_UNKNOWN;
895                                 break;
896                         }
897                         if (phydev->speed != SPEED_UNKNOWN)
898                                 stmmac_hw_fix_mac_speed(priv);
899                         priv->speed = phydev->speed;
900                 }
901
902                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
903
904                 if (!priv->oldlink) {
905                         new_state = true;
906                         priv->oldlink = true;
907                 }
908         } else if (priv->oldlink) {
909                 new_state = true;
910                 priv->oldlink = false;
911                 priv->speed = SPEED_UNKNOWN;
912                 priv->oldduplex = DUPLEX_UNKNOWN;
913         }
914
915         if (new_state && netif_msg_link(priv))
916                 phy_print_status(phydev);
917
918         mutex_unlock(&priv->lock);
919
920         if (phydev->is_pseudo_fixed_link)
921                 /* Stop PHY layer to call the hook to adjust the link in case
922                  * of a switch is attached to the stmmac driver.
923                  */
924                 phydev->irq = PHY_IGNORE_INTERRUPT;
925         else
926                 /* At this stage, init the EEE if supported.
927                  * Never called in case of fixed_link.
928                  */
929                 priv->eee_enabled = stmmac_eee_init(priv);
930 }
931
932 /**
933  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
934  * @priv: driver private structure
935  * Description: this is to verify if the HW supports the PCS.
936  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
937  * configured for the TBI, RTBI, or SGMII PHY interface.
938  */
939 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
940 {
941         int interface = priv->plat->interface;
942
943         if (priv->dma_cap.pcs) {
944                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
945                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
946                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
947                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
948                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
949                         priv->hw->pcs = STMMAC_PCS_RGMII;
950                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
951                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
952                         priv->hw->pcs = STMMAC_PCS_SGMII;
953                 }
954         }
955 }
956
957 /**
958  * stmmac_init_phy - PHY initialization
959  * @dev: net device structure
960  * Description: it initializes the driver's PHY state, and attaches the PHY
961  * to the mac driver.
962  *  Return value:
963  *  0 on success
964  */
965 static int stmmac_init_phy(struct net_device *dev)
966 {
967         struct stmmac_priv *priv = netdev_priv(dev);
968         u32 tx_cnt = priv->plat->tx_queues_to_use;
969         struct phy_device *phydev;
970         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
971         char bus_id[MII_BUS_ID_SIZE];
972         int interface = priv->plat->interface;
973         int max_speed = priv->plat->max_speed;
974         priv->oldlink = false;
975         priv->speed = SPEED_UNKNOWN;
976         priv->oldduplex = DUPLEX_UNKNOWN;
977
978         if (priv->plat->phy_node) {
979                 phydev = of_phy_connect(dev, priv->plat->phy_node,
980                                         &stmmac_adjust_link, 0, interface);
981         } else {
982                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
983                          priv->plat->bus_id);
984
985                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
986                          priv->plat->phy_addr);
987                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
988                            phy_id_fmt);
989
990                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
991                                      interface);
992         }
993
994         if (IS_ERR_OR_NULL(phydev)) {
995                 netdev_err(priv->dev, "Could not attach to PHY\n");
996                 if (!phydev)
997                         return -ENODEV;
998
999                 return PTR_ERR(phydev);
1000         }
1001
1002         /* Stop Advertising 1000BASE Capability if interface is not GMII */
1003         if ((interface == PHY_INTERFACE_MODE_MII) ||
1004             (interface == PHY_INTERFACE_MODE_RMII) ||
1005                 (max_speed < 1000 && max_speed > 0))
1006                 phy_set_max_speed(phydev, SPEED_100);
1007
1008         /*
1009          * Half-duplex mode not supported with multiqueue
1010          * half-duplex can only works with single queue
1011          */
1012         if (tx_cnt > 1) {
1013                 phy_remove_link_mode(phydev,
1014                                      ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1015                 phy_remove_link_mode(phydev,
1016                                      ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1017                 phy_remove_link_mode(phydev,
1018                                      ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1019         }
1020
1021         /*
1022          * Broken HW is sometimes missing the pull-up resistor on the
1023          * MDIO line, which results in reads to non-existent devices returning
1024          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1025          * device as well.
1026          * Note: phydev->phy_id is the result of reading the UID PHY registers.
1027          */
1028         if (!priv->plat->phy_node && phydev->phy_id == 0) {
1029                 phy_disconnect(phydev);
1030                 return -ENODEV;
1031         }
1032
1033         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1034          * subsequent PHY polling, make sure we force a link transition if
1035          * we have a UP/DOWN/UP transition
1036          */
1037         if (phydev->is_pseudo_fixed_link)
1038                 phydev->irq = PHY_POLL;
1039
1040         phy_attached_info(phydev);
1041         return 0;
1042 }
1043
1044 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1045 {
1046         u32 rx_cnt = priv->plat->rx_queues_to_use;
1047         void *head_rx;
1048         u32 queue;
1049
1050         /* Display RX rings */
1051         for (queue = 0; queue < rx_cnt; queue++) {
1052                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1053
1054                 pr_info("\tRX Queue %u rings\n", queue);
1055
1056                 if (priv->extend_desc)
1057                         head_rx = (void *)rx_q->dma_erx;
1058                 else
1059                         head_rx = (void *)rx_q->dma_rx;
1060
1061                 /* Display RX ring */
1062                 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1063         }
1064 }
1065
1066 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1067 {
1068         u32 tx_cnt = priv->plat->tx_queues_to_use;
1069         void *head_tx;
1070         u32 queue;
1071
1072         /* Display TX rings */
1073         for (queue = 0; queue < tx_cnt; queue++) {
1074                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1075
1076                 pr_info("\tTX Queue %d rings\n", queue);
1077
1078                 if (priv->extend_desc)
1079                         head_tx = (void *)tx_q->dma_etx;
1080                 else
1081                         head_tx = (void *)tx_q->dma_tx;
1082
1083                 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1084         }
1085 }
1086
1087 static void stmmac_display_rings(struct stmmac_priv *priv)
1088 {
1089         /* Display RX ring */
1090         stmmac_display_rx_rings(priv);
1091
1092         /* Display TX ring */
1093         stmmac_display_tx_rings(priv);
1094 }
1095
1096 static int stmmac_set_bfsize(int mtu, int bufsize)
1097 {
1098         int ret = bufsize;
1099
1100         if (mtu >= BUF_SIZE_4KiB)
1101                 ret = BUF_SIZE_8KiB;
1102         else if (mtu >= BUF_SIZE_2KiB)
1103                 ret = BUF_SIZE_4KiB;
1104         else if (mtu > DEFAULT_BUFSIZE)
1105                 ret = BUF_SIZE_2KiB;
1106         else
1107                 ret = DEFAULT_BUFSIZE;
1108
1109         return ret;
1110 }
1111
1112 /**
1113  * stmmac_clear_rx_descriptors - clear RX descriptors
1114  * @priv: driver private structure
1115  * @queue: RX queue index
1116  * Description: this function is called to clear the RX descriptors
1117  * in case of both basic and extended descriptors are used.
1118  */
1119 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1120 {
1121         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1122         int i;
1123
1124         /* Clear the RX descriptors */
1125         for (i = 0; i < DMA_RX_SIZE; i++)
1126                 if (priv->extend_desc)
1127                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1128                                         priv->use_riwt, priv->mode,
1129                                         (i == DMA_RX_SIZE - 1),
1130                                         priv->dma_buf_sz);
1131                 else
1132                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1133                                         priv->use_riwt, priv->mode,
1134                                         (i == DMA_RX_SIZE - 1),
1135                                         priv->dma_buf_sz);
1136 }
1137
1138 /**
1139  * stmmac_clear_tx_descriptors - clear tx descriptors
1140  * @priv: driver private structure
1141  * @queue: TX queue index.
1142  * Description: this function is called to clear the TX descriptors
1143  * in case of both basic and extended descriptors are used.
1144  */
1145 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1146 {
1147         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1148         int i;
1149
1150         /* Clear the TX descriptors */
1151         for (i = 0; i < DMA_TX_SIZE; i++)
1152                 if (priv->extend_desc)
1153                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1154                                         priv->mode, (i == DMA_TX_SIZE - 1));
1155                 else
1156                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1157                                         priv->mode, (i == DMA_TX_SIZE - 1));
1158 }
1159
1160 /**
1161  * stmmac_clear_descriptors - clear descriptors
1162  * @priv: driver private structure
1163  * Description: this function is called to clear the TX and RX descriptors
1164  * in case of both basic and extended descriptors are used.
1165  */
1166 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1167 {
1168         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1169         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1170         u32 queue;
1171
1172         /* Clear the RX descriptors */
1173         for (queue = 0; queue < rx_queue_cnt; queue++)
1174                 stmmac_clear_rx_descriptors(priv, queue);
1175
1176         /* Clear the TX descriptors */
1177         for (queue = 0; queue < tx_queue_cnt; queue++)
1178                 stmmac_clear_tx_descriptors(priv, queue);
1179 }
1180
1181 /**
1182  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1183  * @priv: driver private structure
1184  * @p: descriptor pointer
1185  * @i: descriptor index
1186  * @flags: gfp flag
1187  * @queue: RX queue index
1188  * Description: this function is called to allocate a receive buffer, perform
1189  * the DMA mapping and init the descriptor.
1190  */
1191 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1192                                   int i, gfp_t flags, u32 queue)
1193 {
1194         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1195         struct sk_buff *skb;
1196
1197         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1198         if (!skb) {
1199                 netdev_err(priv->dev,
1200                            "%s: Rx init fails; skb is NULL\n", __func__);
1201                 return -ENOMEM;
1202         }
1203         rx_q->rx_skbuff[i] = skb;
1204         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1205                                                 priv->dma_buf_sz,
1206                                                 DMA_FROM_DEVICE);
1207         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1208                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1209                 dev_kfree_skb_any(skb);
1210                 return -EINVAL;
1211         }
1212
1213         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1214
1215         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1216                 stmmac_init_desc3(priv, p);
1217
1218         return 0;
1219 }
1220
1221 /**
1222  * stmmac_free_rx_buffer - free RX dma buffers
1223  * @priv: private structure
1224  * @queue: RX queue index
1225  * @i: buffer index.
1226  */
1227 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1228 {
1229         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1230
1231         if (rx_q->rx_skbuff[i]) {
1232                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1233                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1234                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1235         }
1236         rx_q->rx_skbuff[i] = NULL;
1237 }
1238
1239 /**
1240  * stmmac_free_tx_buffer - free RX dma buffers
1241  * @priv: private structure
1242  * @queue: RX queue index
1243  * @i: buffer index.
1244  */
1245 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1246 {
1247         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1248
1249         if (tx_q->tx_skbuff_dma[i].buf) {
1250                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1251                         dma_unmap_page(priv->device,
1252                                        tx_q->tx_skbuff_dma[i].buf,
1253                                        tx_q->tx_skbuff_dma[i].len,
1254                                        DMA_TO_DEVICE);
1255                 else
1256                         dma_unmap_single(priv->device,
1257                                          tx_q->tx_skbuff_dma[i].buf,
1258                                          tx_q->tx_skbuff_dma[i].len,
1259                                          DMA_TO_DEVICE);
1260         }
1261
1262         if (tx_q->tx_skbuff[i]) {
1263                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1264                 tx_q->tx_skbuff[i] = NULL;
1265                 tx_q->tx_skbuff_dma[i].buf = 0;
1266                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1267         }
1268 }
1269
1270 /**
1271  * init_dma_rx_desc_rings - init the RX descriptor rings
1272  * @dev: net device structure
1273  * @flags: gfp flag.
1274  * Description: this function initializes the DMA RX descriptors
1275  * and allocates the socket buffers. It supports the chained and ring
1276  * modes.
1277  */
1278 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1279 {
1280         struct stmmac_priv *priv = netdev_priv(dev);
1281         u32 rx_count = priv->plat->rx_queues_to_use;
1282         int ret = -ENOMEM;
1283         int bfsize = 0;
1284         int queue;
1285         int i;
1286
1287         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1288         if (bfsize < 0)
1289                 bfsize = 0;
1290
1291         if (bfsize < BUF_SIZE_16KiB)
1292                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1293
1294         priv->dma_buf_sz = bfsize;
1295
1296         /* RX INITIALIZATION */
1297         netif_dbg(priv, probe, priv->dev,
1298                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1299
1300         for (queue = 0; queue < rx_count; queue++) {
1301                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1302
1303                 netif_dbg(priv, probe, priv->dev,
1304                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1305                           (u32)rx_q->dma_rx_phy);
1306
1307                 for (i = 0; i < DMA_RX_SIZE; i++) {
1308                         struct dma_desc *p;
1309
1310                         if (priv->extend_desc)
1311                                 p = &((rx_q->dma_erx + i)->basic);
1312                         else
1313                                 p = rx_q->dma_rx + i;
1314
1315                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1316                                                      queue);
1317                         if (ret)
1318                                 goto err_init_rx_buffers;
1319
1320                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1321                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1322                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1323                 }
1324
1325                 rx_q->cur_rx = 0;
1326                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1327
1328                 stmmac_clear_rx_descriptors(priv, queue);
1329
1330                 /* Setup the chained descriptor addresses */
1331                 if (priv->mode == STMMAC_CHAIN_MODE) {
1332                         if (priv->extend_desc)
1333                                 stmmac_mode_init(priv, rx_q->dma_erx,
1334                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1335                         else
1336                                 stmmac_mode_init(priv, rx_q->dma_rx,
1337                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1338                 }
1339         }
1340
1341         buf_sz = bfsize;
1342
1343         return 0;
1344
1345 err_init_rx_buffers:
1346         while (queue >= 0) {
1347                 while (--i >= 0)
1348                         stmmac_free_rx_buffer(priv, queue, i);
1349
1350                 if (queue == 0)
1351                         break;
1352
1353                 i = DMA_RX_SIZE;
1354                 queue--;
1355         }
1356
1357         return ret;
1358 }
1359
1360 /**
1361  * init_dma_tx_desc_rings - init the TX descriptor rings
1362  * @dev: net device structure.
1363  * Description: this function initializes the DMA TX descriptors
1364  * and allocates the socket buffers. It supports the chained and ring
1365  * modes.
1366  */
1367 static int init_dma_tx_desc_rings(struct net_device *dev)
1368 {
1369         struct stmmac_priv *priv = netdev_priv(dev);
1370         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1371         u32 queue;
1372         int i;
1373
1374         for (queue = 0; queue < tx_queue_cnt; queue++) {
1375                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1376
1377                 netif_dbg(priv, probe, priv->dev,
1378                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1379                          (u32)tx_q->dma_tx_phy);
1380
1381                 /* Setup the chained descriptor addresses */
1382                 if (priv->mode == STMMAC_CHAIN_MODE) {
1383                         if (priv->extend_desc)
1384                                 stmmac_mode_init(priv, tx_q->dma_etx,
1385                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1386                         else
1387                                 stmmac_mode_init(priv, tx_q->dma_tx,
1388                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1389                 }
1390
1391                 for (i = 0; i < DMA_TX_SIZE; i++) {
1392                         struct dma_desc *p;
1393                         if (priv->extend_desc)
1394                                 p = &((tx_q->dma_etx + i)->basic);
1395                         else
1396                                 p = tx_q->dma_tx + i;
1397
1398                         stmmac_clear_desc(priv, p);
1399
1400                         tx_q->tx_skbuff_dma[i].buf = 0;
1401                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1402                         tx_q->tx_skbuff_dma[i].len = 0;
1403                         tx_q->tx_skbuff_dma[i].last_segment = false;
1404                         tx_q->tx_skbuff[i] = NULL;
1405                 }
1406
1407                 tx_q->dirty_tx = 0;
1408                 tx_q->cur_tx = 0;
1409                 tx_q->mss = 0;
1410
1411                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1412         }
1413
1414         return 0;
1415 }
1416
1417 /**
1418  * init_dma_desc_rings - init the RX/TX descriptor rings
1419  * @dev: net device structure
1420  * @flags: gfp flag.
1421  * Description: this function initializes the DMA RX/TX descriptors
1422  * and allocates the socket buffers. It supports the chained and ring
1423  * modes.
1424  */
1425 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1426 {
1427         struct stmmac_priv *priv = netdev_priv(dev);
1428         int ret;
1429
1430         ret = init_dma_rx_desc_rings(dev, flags);
1431         if (ret)
1432                 return ret;
1433
1434         ret = init_dma_tx_desc_rings(dev);
1435
1436         stmmac_clear_descriptors(priv);
1437
1438         if (netif_msg_hw(priv))
1439                 stmmac_display_rings(priv);
1440
1441         return ret;
1442 }
1443
1444 /**
1445  * dma_free_rx_skbufs - free RX dma buffers
1446  * @priv: private structure
1447  * @queue: RX queue index
1448  */
1449 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1450 {
1451         int i;
1452
1453         for (i = 0; i < DMA_RX_SIZE; i++)
1454                 stmmac_free_rx_buffer(priv, queue, i);
1455 }
1456
1457 /**
1458  * dma_free_tx_skbufs - free TX dma buffers
1459  * @priv: private structure
1460  * @queue: TX queue index
1461  */
1462 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1463 {
1464         int i;
1465
1466         for (i = 0; i < DMA_TX_SIZE; i++)
1467                 stmmac_free_tx_buffer(priv, queue, i);
1468 }
1469
1470 /**
1471  * free_dma_rx_desc_resources - free RX dma desc resources
1472  * @priv: private structure
1473  */
1474 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1475 {
1476         u32 rx_count = priv->plat->rx_queues_to_use;
1477         u32 queue;
1478
1479         /* Free RX queue resources */
1480         for (queue = 0; queue < rx_count; queue++) {
1481                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1482
1483                 /* Release the DMA RX socket buffers */
1484                 dma_free_rx_skbufs(priv, queue);
1485
1486                 /* Free DMA regions of consistent memory previously allocated */
1487                 if (!priv->extend_desc)
1488                         dma_free_coherent(priv->device,
1489                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1490                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1491                 else
1492                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1493                                           sizeof(struct dma_extended_desc),
1494                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1495
1496                 kfree(rx_q->rx_skbuff_dma);
1497                 kfree(rx_q->rx_skbuff);
1498         }
1499 }
1500
1501 /**
1502  * free_dma_tx_desc_resources - free TX dma desc resources
1503  * @priv: private structure
1504  */
1505 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1506 {
1507         u32 tx_count = priv->plat->tx_queues_to_use;
1508         u32 queue;
1509
1510         /* Free TX queue resources */
1511         for (queue = 0; queue < tx_count; queue++) {
1512                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1513
1514                 /* Release the DMA TX socket buffers */
1515                 dma_free_tx_skbufs(priv, queue);
1516
1517                 /* Free DMA regions of consistent memory previously allocated */
1518                 if (!priv->extend_desc)
1519                         dma_free_coherent(priv->device,
1520                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1521                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1522                 else
1523                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1524                                           sizeof(struct dma_extended_desc),
1525                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1526
1527                 kfree(tx_q->tx_skbuff_dma);
1528                 kfree(tx_q->tx_skbuff);
1529         }
1530 }
1531
1532 /**
1533  * alloc_dma_rx_desc_resources - alloc RX resources.
1534  * @priv: private structure
1535  * Description: according to which descriptor can be used (extend or basic)
1536  * this function allocates the resources for TX and RX paths. In case of
1537  * reception, for example, it pre-allocated the RX socket buffer in order to
1538  * allow zero-copy mechanism.
1539  */
1540 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1541 {
1542         u32 rx_count = priv->plat->rx_queues_to_use;
1543         int ret = -ENOMEM;
1544         u32 queue;
1545
1546         /* RX queues buffers and DMA */
1547         for (queue = 0; queue < rx_count; queue++) {
1548                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1549
1550                 rx_q->queue_index = queue;
1551                 rx_q->priv_data = priv;
1552
1553                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1554                                                     sizeof(dma_addr_t),
1555                                                     GFP_KERNEL);
1556                 if (!rx_q->rx_skbuff_dma)
1557                         goto err_dma;
1558
1559                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1560                                                 sizeof(struct sk_buff *),
1561                                                 GFP_KERNEL);
1562                 if (!rx_q->rx_skbuff)
1563                         goto err_dma;
1564
1565                 if (priv->extend_desc) {
1566                         rx_q->dma_erx = dma_alloc_coherent(priv->device,
1567                                                            DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1568                                                            &rx_q->dma_rx_phy,
1569                                                            GFP_KERNEL);
1570                         if (!rx_q->dma_erx)
1571                                 goto err_dma;
1572
1573                 } else {
1574                         rx_q->dma_rx = dma_alloc_coherent(priv->device,
1575                                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1576                                                           &rx_q->dma_rx_phy,
1577                                                           GFP_KERNEL);
1578                         if (!rx_q->dma_rx)
1579                                 goto err_dma;
1580                 }
1581         }
1582
1583         return 0;
1584
1585 err_dma:
1586         free_dma_rx_desc_resources(priv);
1587
1588         return ret;
1589 }
1590
1591 /**
1592  * alloc_dma_tx_desc_resources - alloc TX resources.
1593  * @priv: private structure
1594  * Description: according to which descriptor can be used (extend or basic)
1595  * this function allocates the resources for TX and RX paths. In case of
1596  * reception, for example, it pre-allocated the RX socket buffer in order to
1597  * allow zero-copy mechanism.
1598  */
1599 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1600 {
1601         u32 tx_count = priv->plat->tx_queues_to_use;
1602         int ret = -ENOMEM;
1603         u32 queue;
1604
1605         /* TX queues buffers and DMA */
1606         for (queue = 0; queue < tx_count; queue++) {
1607                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1608
1609                 tx_q->queue_index = queue;
1610                 tx_q->priv_data = priv;
1611
1612                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1613                                                     sizeof(*tx_q->tx_skbuff_dma),
1614                                                     GFP_KERNEL);
1615                 if (!tx_q->tx_skbuff_dma)
1616                         goto err_dma;
1617
1618                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1619                                                 sizeof(struct sk_buff *),
1620                                                 GFP_KERNEL);
1621                 if (!tx_q->tx_skbuff)
1622                         goto err_dma;
1623
1624                 if (priv->extend_desc) {
1625                         tx_q->dma_etx = dma_alloc_coherent(priv->device,
1626                                                            DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1627                                                            &tx_q->dma_tx_phy,
1628                                                            GFP_KERNEL);
1629                         if (!tx_q->dma_etx)
1630                                 goto err_dma;
1631                 } else {
1632                         tx_q->dma_tx = dma_alloc_coherent(priv->device,
1633                                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1634                                                           &tx_q->dma_tx_phy,
1635                                                           GFP_KERNEL);
1636                         if (!tx_q->dma_tx)
1637                                 goto err_dma;
1638                 }
1639         }
1640
1641         return 0;
1642
1643 err_dma:
1644         free_dma_tx_desc_resources(priv);
1645
1646         return ret;
1647 }
1648
1649 /**
1650  * alloc_dma_desc_resources - alloc TX/RX resources.
1651  * @priv: private structure
1652  * Description: according to which descriptor can be used (extend or basic)
1653  * this function allocates the resources for TX and RX paths. In case of
1654  * reception, for example, it pre-allocated the RX socket buffer in order to
1655  * allow zero-copy mechanism.
1656  */
1657 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1658 {
1659         /* RX Allocation */
1660         int ret = alloc_dma_rx_desc_resources(priv);
1661
1662         if (ret)
1663                 return ret;
1664
1665         ret = alloc_dma_tx_desc_resources(priv);
1666
1667         return ret;
1668 }
1669
1670 /**
1671  * free_dma_desc_resources - free dma desc resources
1672  * @priv: private structure
1673  */
1674 static void free_dma_desc_resources(struct stmmac_priv *priv)
1675 {
1676         /* Release the DMA RX socket buffers */
1677         free_dma_rx_desc_resources(priv);
1678
1679         /* Release the DMA TX socket buffers */
1680         free_dma_tx_desc_resources(priv);
1681 }
1682
1683 /**
1684  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1685  *  @priv: driver private structure
1686  *  Description: It is used for enabling the rx queues in the MAC
1687  */
1688 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1689 {
1690         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1691         int queue;
1692         u8 mode;
1693
1694         for (queue = 0; queue < rx_queues_count; queue++) {
1695                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1696                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1697         }
1698 }
1699
1700 /**
1701  * stmmac_start_rx_dma - start RX DMA channel
1702  * @priv: driver private structure
1703  * @chan: RX channel index
1704  * Description:
1705  * This starts a RX DMA channel
1706  */
1707 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1708 {
1709         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1710         stmmac_start_rx(priv, priv->ioaddr, chan);
1711 }
1712
1713 /**
1714  * stmmac_start_tx_dma - start TX DMA channel
1715  * @priv: driver private structure
1716  * @chan: TX channel index
1717  * Description:
1718  * This starts a TX DMA channel
1719  */
1720 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1721 {
1722         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1723         stmmac_start_tx(priv, priv->ioaddr, chan);
1724 }
1725
1726 /**
1727  * stmmac_stop_rx_dma - stop RX DMA channel
1728  * @priv: driver private structure
1729  * @chan: RX channel index
1730  * Description:
1731  * This stops a RX DMA channel
1732  */
1733 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1734 {
1735         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1736         stmmac_stop_rx(priv, priv->ioaddr, chan);
1737 }
1738
1739 /**
1740  * stmmac_stop_tx_dma - stop TX DMA channel
1741  * @priv: driver private structure
1742  * @chan: TX channel index
1743  * Description:
1744  * This stops a TX DMA channel
1745  */
1746 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1747 {
1748         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1749         stmmac_stop_tx(priv, priv->ioaddr, chan);
1750 }
1751
1752 /**
1753  * stmmac_start_all_dma - start all RX and TX DMA channels
1754  * @priv: driver private structure
1755  * Description:
1756  * This starts all the RX and TX DMA channels
1757  */
1758 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1759 {
1760         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1761         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1762         u32 chan = 0;
1763
1764         for (chan = 0; chan < rx_channels_count; chan++)
1765                 stmmac_start_rx_dma(priv, chan);
1766
1767         for (chan = 0; chan < tx_channels_count; chan++)
1768                 stmmac_start_tx_dma(priv, chan);
1769 }
1770
1771 /**
1772  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1773  * @priv: driver private structure
1774  * Description:
1775  * This stops the RX and TX DMA channels
1776  */
1777 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1778 {
1779         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1780         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1781         u32 chan = 0;
1782
1783         for (chan = 0; chan < rx_channels_count; chan++)
1784                 stmmac_stop_rx_dma(priv, chan);
1785
1786         for (chan = 0; chan < tx_channels_count; chan++)
1787                 stmmac_stop_tx_dma(priv, chan);
1788 }
1789
1790 /**
1791  *  stmmac_dma_operation_mode - HW DMA operation mode
1792  *  @priv: driver private structure
1793  *  Description: it is used for configuring the DMA operation mode register in
1794  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1795  */
1796 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1797 {
1798         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1799         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1800         int rxfifosz = priv->plat->rx_fifo_size;
1801         int txfifosz = priv->plat->tx_fifo_size;
1802         u32 txmode = 0;
1803         u32 rxmode = 0;
1804         u32 chan = 0;
1805         u8 qmode = 0;
1806
1807         if (rxfifosz == 0)
1808                 rxfifosz = priv->dma_cap.rx_fifo_size;
1809         if (txfifosz == 0)
1810                 txfifosz = priv->dma_cap.tx_fifo_size;
1811
1812         /* Adjust for real per queue fifo size */
1813         rxfifosz /= rx_channels_count;
1814         txfifosz /= tx_channels_count;
1815
1816         if (priv->plat->force_thresh_dma_mode) {
1817                 txmode = tc;
1818                 rxmode = tc;
1819         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1820                 /*
1821                  * In case of GMAC, SF mode can be enabled
1822                  * to perform the TX COE in HW. This depends on:
1823                  * 1) TX COE if actually supported
1824                  * 2) There is no bugged Jumbo frame support
1825                  *    that needs to not insert csum in the TDES.
1826                  */
1827                 txmode = SF_DMA_MODE;
1828                 rxmode = SF_DMA_MODE;
1829                 priv->xstats.threshold = SF_DMA_MODE;
1830         } else {
1831                 txmode = tc;
1832                 rxmode = SF_DMA_MODE;
1833         }
1834
1835         /* configure all channels */
1836         for (chan = 0; chan < rx_channels_count; chan++) {
1837                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1838
1839                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1840                                 rxfifosz, qmode);
1841                 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1842                                 chan);
1843         }
1844
1845         for (chan = 0; chan < tx_channels_count; chan++) {
1846                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1847
1848                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1849                                 txfifosz, qmode);
1850         }
1851 }
1852
1853 /**
1854  * stmmac_tx_clean - to manage the transmission completion
1855  * @priv: driver private structure
1856  * @queue: TX queue index
1857  * Description: it reclaims the transmit resources after transmission completes.
1858  */
1859 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1860 {
1861         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1862         unsigned int bytes_compl = 0, pkts_compl = 0;
1863         unsigned int entry, count = 0;
1864
1865         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1866
1867         priv->xstats.tx_clean++;
1868
1869         entry = tx_q->dirty_tx;
1870         while ((entry != tx_q->cur_tx) && (count < budget)) {
1871                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1872                 struct dma_desc *p;
1873                 int status;
1874
1875                 if (priv->extend_desc)
1876                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1877                 else
1878                         p = tx_q->dma_tx + entry;
1879
1880                 status = stmmac_tx_status(priv, &priv->dev->stats,
1881                                 &priv->xstats, p, priv->ioaddr);
1882                 /* Check if the descriptor is owned by the DMA */
1883                 if (unlikely(status & tx_dma_own))
1884                         break;
1885
1886                 count++;
1887
1888                 /* Make sure descriptor fields are read after reading
1889                  * the own bit.
1890                  */
1891                 dma_rmb();
1892
1893                 /* Just consider the last segment and ...*/
1894                 if (likely(!(status & tx_not_ls))) {
1895                         /* ... verify the status error condition */
1896                         if (unlikely(status & tx_err)) {
1897                                 priv->dev->stats.tx_errors++;
1898                         } else {
1899                                 priv->dev->stats.tx_packets++;
1900                                 priv->xstats.tx_pkt_n++;
1901                         }
1902                         stmmac_get_tx_hwtstamp(priv, p, skb);
1903                 }
1904
1905                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1906                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1907                                 dma_unmap_page(priv->device,
1908                                                tx_q->tx_skbuff_dma[entry].buf,
1909                                                tx_q->tx_skbuff_dma[entry].len,
1910                                                DMA_TO_DEVICE);
1911                         else
1912                                 dma_unmap_single(priv->device,
1913                                                  tx_q->tx_skbuff_dma[entry].buf,
1914                                                  tx_q->tx_skbuff_dma[entry].len,
1915                                                  DMA_TO_DEVICE);
1916                         tx_q->tx_skbuff_dma[entry].buf = 0;
1917                         tx_q->tx_skbuff_dma[entry].len = 0;
1918                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1919                 }
1920
1921                 stmmac_clean_desc3(priv, tx_q, p);
1922
1923                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1924                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1925
1926                 if (likely(skb != NULL)) {
1927                         pkts_compl++;
1928                         bytes_compl += skb->len;
1929                         dev_consume_skb_any(skb);
1930                         tx_q->tx_skbuff[entry] = NULL;
1931                 }
1932
1933                 stmmac_release_tx_desc(priv, p, priv->mode);
1934
1935                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1936         }
1937         tx_q->dirty_tx = entry;
1938
1939         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1940                                   pkts_compl, bytes_compl);
1941
1942         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1943                                                                 queue))) &&
1944             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1945
1946                 netif_dbg(priv, tx_done, priv->dev,
1947                           "%s: restart transmit\n", __func__);
1948                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1949         }
1950
1951         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1952                 stmmac_enable_eee_mode(priv);
1953                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1954         }
1955
1956         /* We still have pending packets, let's call for a new scheduling */
1957         if (tx_q->dirty_tx != tx_q->cur_tx)
1958                 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
1959
1960         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1961
1962         return count;
1963 }
1964
1965 /**
1966  * stmmac_tx_err - to manage the tx error
1967  * @priv: driver private structure
1968  * @chan: channel index
1969  * Description: it cleans the descriptors and restarts the transmission
1970  * in case of transmission errors.
1971  */
1972 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1973 {
1974         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1975         int i;
1976
1977         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1978
1979         stmmac_stop_tx_dma(priv, chan);
1980         dma_free_tx_skbufs(priv, chan);
1981         for (i = 0; i < DMA_TX_SIZE; i++)
1982                 if (priv->extend_desc)
1983                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1984                                         priv->mode, (i == DMA_TX_SIZE - 1));
1985                 else
1986                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1987                                         priv->mode, (i == DMA_TX_SIZE - 1));
1988         tx_q->dirty_tx = 0;
1989         tx_q->cur_tx = 0;
1990         tx_q->mss = 0;
1991         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1992         stmmac_start_tx_dma(priv, chan);
1993
1994         priv->dev->stats.tx_errors++;
1995         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1996 }
1997
1998 /**
1999  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2000  *  @priv: driver private structure
2001  *  @txmode: TX operating mode
2002  *  @rxmode: RX operating mode
2003  *  @chan: channel index
2004  *  Description: it is used for configuring of the DMA operation mode in
2005  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2006  *  mode.
2007  */
2008 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2009                                           u32 rxmode, u32 chan)
2010 {
2011         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2012         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2013         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2014         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2015         int rxfifosz = priv->plat->rx_fifo_size;
2016         int txfifosz = priv->plat->tx_fifo_size;
2017
2018         if (rxfifosz == 0)
2019                 rxfifosz = priv->dma_cap.rx_fifo_size;
2020         if (txfifosz == 0)
2021                 txfifosz = priv->dma_cap.tx_fifo_size;
2022
2023         /* Adjust for real per queue fifo size */
2024         rxfifosz /= rx_channels_count;
2025         txfifosz /= tx_channels_count;
2026
2027         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2028         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2029 }
2030
2031 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2032 {
2033         int ret;
2034
2035         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2036                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2037         if (ret && (ret != -EINVAL)) {
2038                 stmmac_global_err(priv);
2039                 return true;
2040         }
2041
2042         return false;
2043 }
2044
2045 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2046 {
2047         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2048                                                  &priv->xstats, chan);
2049         struct stmmac_channel *ch = &priv->channel[chan];
2050
2051         if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2052                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2053                 napi_schedule_irqoff(&ch->rx_napi);
2054         }
2055
2056         if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2057                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2058                 napi_schedule_irqoff(&ch->tx_napi);
2059         }
2060
2061         return status;
2062 }
2063
2064 /**
2065  * stmmac_dma_interrupt - DMA ISR
2066  * @priv: driver private structure
2067  * Description: this is the DMA ISR. It is called by the main ISR.
2068  * It calls the dwmac dma routine and schedule poll method in case of some
2069  * work can be done.
2070  */
2071 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2072 {
2073         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2074         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2075         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2076                                 tx_channel_count : rx_channel_count;
2077         u32 chan;
2078         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2079
2080         /* Make sure we never check beyond our status buffer. */
2081         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2082                 channels_to_check = ARRAY_SIZE(status);
2083
2084         for (chan = 0; chan < channels_to_check; chan++)
2085                 status[chan] = stmmac_napi_check(priv, chan);
2086
2087         for (chan = 0; chan < tx_channel_count; chan++) {
2088                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2089                         /* Try to bump up the dma threshold on this failure */
2090                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2091                             (tc <= 256)) {
2092                                 tc += 64;
2093                                 if (priv->plat->force_thresh_dma_mode)
2094                                         stmmac_set_dma_operation_mode(priv,
2095                                                                       tc,
2096                                                                       tc,
2097                                                                       chan);
2098                                 else
2099                                         stmmac_set_dma_operation_mode(priv,
2100                                                                     tc,
2101                                                                     SF_DMA_MODE,
2102                                                                     chan);
2103                                 priv->xstats.threshold = tc;
2104                         }
2105                 } else if (unlikely(status[chan] == tx_hard_error)) {
2106                         stmmac_tx_err(priv, chan);
2107                 }
2108         }
2109 }
2110
2111 /**
2112  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2113  * @priv: driver private structure
2114  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2115  */
2116 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2117 {
2118         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2119                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2120
2121         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2122
2123         if (priv->dma_cap.rmon) {
2124                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2125                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2126         } else
2127                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2128 }
2129
2130 /**
2131  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2132  * @priv: driver private structure
2133  * Description:
2134  *  new GMAC chip generations have a new register to indicate the
2135  *  presence of the optional feature/functions.
2136  *  This can be also used to override the value passed through the
2137  *  platform and necessary for old MAC10/100 and GMAC chips.
2138  */
2139 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2140 {
2141         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2142 }
2143
2144 /**
2145  * stmmac_check_ether_addr - check if the MAC addr is valid
2146  * @priv: driver private structure
2147  * Description:
2148  * it is to verify if the MAC address is valid, in case of failures it
2149  * generates a random MAC address
2150  */
2151 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2152 {
2153         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2154                 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2155                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2156                         eth_hw_addr_random(priv->dev);
2157                 netdev_info(priv->dev, "device MAC address %pM\n",
2158                             priv->dev->dev_addr);
2159         }
2160 }
2161
2162 /**
2163  * stmmac_init_dma_engine - DMA init.
2164  * @priv: driver private structure
2165  * Description:
2166  * It inits the DMA invoking the specific MAC/GMAC callback.
2167  * Some DMA parameters can be passed from the platform;
2168  * in case of these are not passed a default is kept for the MAC or GMAC.
2169  */
2170 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2171 {
2172         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2173         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2174         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2175         struct stmmac_rx_queue *rx_q;
2176         struct stmmac_tx_queue *tx_q;
2177         u32 chan = 0;
2178         int atds = 0;
2179         int ret = 0;
2180
2181         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2182                 dev_err(priv->device, "Invalid DMA configuration\n");
2183                 return -EINVAL;
2184         }
2185
2186         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2187                 atds = 1;
2188
2189         ret = stmmac_reset(priv, priv->ioaddr);
2190         if (ret) {
2191                 dev_err(priv->device, "Failed to reset the dma\n");
2192                 return ret;
2193         }
2194
2195         /* DMA Configuration */
2196         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2197
2198         if (priv->plat->axi)
2199                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2200
2201         /* DMA CSR Channel configuration */
2202         for (chan = 0; chan < dma_csr_ch; chan++)
2203                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2204
2205         /* DMA RX Channel Configuration */
2206         for (chan = 0; chan < rx_channels_count; chan++) {
2207                 rx_q = &priv->rx_queue[chan];
2208
2209                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2210                                     rx_q->dma_rx_phy, chan);
2211
2212                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2213                             (DMA_RX_SIZE * sizeof(struct dma_desc));
2214                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2215                                        rx_q->rx_tail_addr, chan);
2216         }
2217
2218         /* DMA TX Channel Configuration */
2219         for (chan = 0; chan < tx_channels_count; chan++) {
2220                 tx_q = &priv->tx_queue[chan];
2221
2222                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2223                                     tx_q->dma_tx_phy, chan);
2224
2225                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2226                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2227                                        tx_q->tx_tail_addr, chan);
2228         }
2229
2230         return ret;
2231 }
2232
2233 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2234 {
2235         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2236
2237         mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2238 }
2239
2240 /**
2241  * stmmac_tx_timer - mitigation sw timer for tx.
2242  * @data: data pointer
2243  * Description:
2244  * This is the timer handler to directly invoke the stmmac_tx_clean.
2245  */
2246 static void stmmac_tx_timer(struct timer_list *t)
2247 {
2248         struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2249         struct stmmac_priv *priv = tx_q->priv_data;
2250         struct stmmac_channel *ch;
2251
2252         ch = &priv->channel[tx_q->queue_index];
2253
2254         /*
2255          * If NAPI is already running we can miss some events. Let's rearm
2256          * the timer and try again.
2257          */
2258         if (likely(napi_schedule_prep(&ch->tx_napi)))
2259                 __napi_schedule(&ch->tx_napi);
2260         else
2261                 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
2262 }
2263
2264 /**
2265  * stmmac_init_tx_coalesce - init tx mitigation options.
2266  * @priv: driver private structure
2267  * Description:
2268  * This inits the transmit coalesce parameters: i.e. timer rate,
2269  * timer handler and default threshold used for enabling the
2270  * interrupt on completion bit.
2271  */
2272 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2273 {
2274         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2275         u32 chan;
2276
2277         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2278         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2279
2280         for (chan = 0; chan < tx_channel_count; chan++) {
2281                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2282
2283                 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2284         }
2285 }
2286
2287 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2288 {
2289         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2290         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2291         u32 chan;
2292
2293         /* set TX ring length */
2294         for (chan = 0; chan < tx_channels_count; chan++)
2295                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2296                                 (DMA_TX_SIZE - 1), chan);
2297
2298         /* set RX ring length */
2299         for (chan = 0; chan < rx_channels_count; chan++)
2300                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2301                                 (DMA_RX_SIZE - 1), chan);
2302 }
2303
2304 /**
2305  *  stmmac_set_tx_queue_weight - Set TX queue weight
2306  *  @priv: driver private structure
2307  *  Description: It is used for setting TX queues weight
2308  */
2309 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2310 {
2311         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2312         u32 weight;
2313         u32 queue;
2314
2315         for (queue = 0; queue < tx_queues_count; queue++) {
2316                 weight = priv->plat->tx_queues_cfg[queue].weight;
2317                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2318         }
2319 }
2320
2321 /**
2322  *  stmmac_configure_cbs - Configure CBS in TX queue
2323  *  @priv: driver private structure
2324  *  Description: It is used for configuring CBS in AVB TX queues
2325  */
2326 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2327 {
2328         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2329         u32 mode_to_use;
2330         u32 queue;
2331
2332         /* queue 0 is reserved for legacy traffic */
2333         for (queue = 1; queue < tx_queues_count; queue++) {
2334                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2335                 if (mode_to_use == MTL_QUEUE_DCB)
2336                         continue;
2337
2338                 stmmac_config_cbs(priv, priv->hw,
2339                                 priv->plat->tx_queues_cfg[queue].send_slope,
2340                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2341                                 priv->plat->tx_queues_cfg[queue].high_credit,
2342                                 priv->plat->tx_queues_cfg[queue].low_credit,
2343                                 queue);
2344         }
2345 }
2346
2347 /**
2348  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2349  *  @priv: driver private structure
2350  *  Description: It is used for mapping RX queues to RX dma channels
2351  */
2352 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2353 {
2354         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2355         u32 queue;
2356         u32 chan;
2357
2358         for (queue = 0; queue < rx_queues_count; queue++) {
2359                 chan = priv->plat->rx_queues_cfg[queue].chan;
2360                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2361         }
2362 }
2363
2364 /**
2365  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2366  *  @priv: driver private structure
2367  *  Description: It is used for configuring the RX Queue Priority
2368  */
2369 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2370 {
2371         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2372         u32 queue;
2373         u32 prio;
2374
2375         for (queue = 0; queue < rx_queues_count; queue++) {
2376                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2377                         continue;
2378
2379                 prio = priv->plat->rx_queues_cfg[queue].prio;
2380                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2381         }
2382 }
2383
2384 /**
2385  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2386  *  @priv: driver private structure
2387  *  Description: It is used for configuring the TX Queue Priority
2388  */
2389 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2390 {
2391         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2392         u32 queue;
2393         u32 prio;
2394
2395         for (queue = 0; queue < tx_queues_count; queue++) {
2396                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2397                         continue;
2398
2399                 prio = priv->plat->tx_queues_cfg[queue].prio;
2400                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2401         }
2402 }
2403
2404 /**
2405  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2406  *  @priv: driver private structure
2407  *  Description: It is used for configuring the RX queue routing
2408  */
2409 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2410 {
2411         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2412         u32 queue;
2413         u8 packet;
2414
2415         for (queue = 0; queue < rx_queues_count; queue++) {
2416                 /* no specific packet type routing specified for the queue */
2417                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2418                         continue;
2419
2420                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2421                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2422         }
2423 }
2424
2425 /**
2426  *  stmmac_mtl_configuration - Configure MTL
2427  *  @priv: driver private structure
2428  *  Description: It is used for configurring MTL
2429  */
2430 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2431 {
2432         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2433         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2434
2435         if (tx_queues_count > 1)
2436                 stmmac_set_tx_queue_weight(priv);
2437
2438         /* Configure MTL RX algorithms */
2439         if (rx_queues_count > 1)
2440                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2441                                 priv->plat->rx_sched_algorithm);
2442
2443         /* Configure MTL TX algorithms */
2444         if (tx_queues_count > 1)
2445                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2446                                 priv->plat->tx_sched_algorithm);
2447
2448         /* Configure CBS in AVB TX queues */
2449         if (tx_queues_count > 1)
2450                 stmmac_configure_cbs(priv);
2451
2452         /* Map RX MTL to DMA channels */
2453         stmmac_rx_queue_dma_chan_map(priv);
2454
2455         /* Enable MAC RX Queues */
2456         stmmac_mac_enable_rx_queues(priv);
2457
2458         /* Set RX priorities */
2459         if (rx_queues_count > 1)
2460                 stmmac_mac_config_rx_queues_prio(priv);
2461
2462         /* Set TX priorities */
2463         if (tx_queues_count > 1)
2464                 stmmac_mac_config_tx_queues_prio(priv);
2465
2466         /* Set RX routing */
2467         if (rx_queues_count > 1)
2468                 stmmac_mac_config_rx_queues_routing(priv);
2469 }
2470
2471 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2472 {
2473         if (priv->dma_cap.asp) {
2474                 netdev_info(priv->dev, "Enabling Safety Features\n");
2475                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2476         } else {
2477                 netdev_info(priv->dev, "No Safety Features support found\n");
2478         }
2479 }
2480
2481 /**
2482  * stmmac_hw_setup - setup mac in a usable state.
2483  *  @dev : pointer to the device structure.
2484  *  Description:
2485  *  this is the main function to setup the HW in a usable state because the
2486  *  dma engine is reset, the core registers are configured (e.g. AXI,
2487  *  Checksum features, timers). The DMA is ready to start receiving and
2488  *  transmitting.
2489  *  Return value:
2490  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2491  *  file on failure.
2492  */
2493 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2494 {
2495         struct stmmac_priv *priv = netdev_priv(dev);
2496         u32 rx_cnt = priv->plat->rx_queues_to_use;
2497         u32 tx_cnt = priv->plat->tx_queues_to_use;
2498         u32 chan;
2499         int ret;
2500
2501         /* DMA initialization and SW reset */
2502         ret = stmmac_init_dma_engine(priv);
2503         if (ret < 0) {
2504                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2505                            __func__);
2506                 return ret;
2507         }
2508
2509         /* Copy the MAC addr into the HW  */
2510         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2511
2512         /* PS and related bits will be programmed according to the speed */
2513         if (priv->hw->pcs) {
2514                 int speed = priv->plat->mac_port_sel_speed;
2515
2516                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2517                     (speed == SPEED_1000)) {
2518                         priv->hw->ps = speed;
2519                 } else {
2520                         dev_warn(priv->device, "invalid port speed\n");
2521                         priv->hw->ps = 0;
2522                 }
2523         }
2524
2525         /* Initialize the MAC Core */
2526         stmmac_core_init(priv, priv->hw, dev);
2527
2528         /* Initialize MTL*/
2529         stmmac_mtl_configuration(priv);
2530
2531         /* Initialize Safety Features */
2532         stmmac_safety_feat_configuration(priv);
2533
2534         ret = stmmac_rx_ipc(priv, priv->hw);
2535         if (!ret) {
2536                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2537                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2538                 priv->hw->rx_csum = 0;
2539         }
2540
2541         /* Enable the MAC Rx/Tx */
2542         stmmac_mac_set(priv, priv->ioaddr, true);
2543
2544         /* Set the HW DMA mode and the COE */
2545         stmmac_dma_operation_mode(priv);
2546
2547         stmmac_mmc_setup(priv);
2548
2549         if (init_ptp) {
2550                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2551                 if (ret < 0)
2552                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2553
2554                 ret = stmmac_init_ptp(priv);
2555                 if (ret == -EOPNOTSUPP)
2556                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2557                 else if (ret)
2558                         netdev_warn(priv->dev, "PTP init failed\n");
2559         }
2560
2561         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2562
2563         if (priv->use_riwt) {
2564                 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2565                 if (!ret)
2566                         priv->rx_riwt = MAX_DMA_RIWT;
2567         }
2568
2569         if (priv->hw->pcs)
2570                 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2571
2572         /* set TX and RX rings length */
2573         stmmac_set_rings_length(priv);
2574
2575         /* Enable TSO */
2576         if (priv->tso) {
2577                 for (chan = 0; chan < tx_cnt; chan++)
2578                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2579         }
2580
2581         /* Start the ball rolling... */
2582         stmmac_start_all_dma(priv);
2583
2584         return 0;
2585 }
2586
2587 static void stmmac_hw_teardown(struct net_device *dev)
2588 {
2589         struct stmmac_priv *priv = netdev_priv(dev);
2590
2591         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2592 }
2593
2594 /**
2595  *  stmmac_open - open entry point of the driver
2596  *  @dev : pointer to the device structure.
2597  *  Description:
2598  *  This function is the open entry point of the driver.
2599  *  Return value:
2600  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2601  *  file on failure.
2602  */
2603 static int stmmac_open(struct net_device *dev)
2604 {
2605         struct stmmac_priv *priv = netdev_priv(dev);
2606         u32 chan;
2607         int ret;
2608
2609         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2610             priv->hw->pcs != STMMAC_PCS_TBI &&
2611             priv->hw->pcs != STMMAC_PCS_RTBI) {
2612                 ret = stmmac_init_phy(dev);
2613                 if (ret) {
2614                         netdev_err(priv->dev,
2615                                    "%s: Cannot attach to PHY (error: %d)\n",
2616                                    __func__, ret);
2617                         return ret;
2618                 }
2619         }
2620
2621         /* Extra statistics */
2622         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2623         priv->xstats.threshold = tc;
2624
2625         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2626         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2627
2628         ret = alloc_dma_desc_resources(priv);
2629         if (ret < 0) {
2630                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2631                            __func__);
2632                 goto dma_desc_error;
2633         }
2634
2635         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2636         if (ret < 0) {
2637                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2638                            __func__);
2639                 goto init_error;
2640         }
2641
2642         ret = stmmac_hw_setup(dev, true);
2643         if (ret < 0) {
2644                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2645                 goto init_error;
2646         }
2647
2648         stmmac_init_tx_coalesce(priv);
2649
2650         if (dev->phydev)
2651                 phy_start(dev->phydev);
2652
2653         /* Request the IRQ lines */
2654         ret = request_irq(dev->irq, stmmac_interrupt,
2655                           IRQF_SHARED, dev->name, dev);
2656         if (unlikely(ret < 0)) {
2657                 netdev_err(priv->dev,
2658                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2659                            __func__, dev->irq, ret);
2660                 goto irq_error;
2661         }
2662
2663         /* Request the Wake IRQ in case of another line is used for WoL */
2664         if (priv->wol_irq != dev->irq) {
2665                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2666                                   IRQF_SHARED, dev->name, dev);
2667                 if (unlikely(ret < 0)) {
2668                         netdev_err(priv->dev,
2669                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2670                                    __func__, priv->wol_irq, ret);
2671                         goto wolirq_error;
2672                 }
2673         }
2674
2675         /* Request the IRQ lines */
2676         if (priv->lpi_irq > 0) {
2677                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2678                                   dev->name, dev);
2679                 if (unlikely(ret < 0)) {
2680                         netdev_err(priv->dev,
2681                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2682                                    __func__, priv->lpi_irq, ret);
2683                         goto lpiirq_error;
2684                 }
2685         }
2686
2687         stmmac_enable_all_queues(priv);
2688         stmmac_start_all_queues(priv);
2689
2690         return 0;
2691
2692 lpiirq_error:
2693         if (priv->wol_irq != dev->irq)
2694                 free_irq(priv->wol_irq, dev);
2695 wolirq_error:
2696         free_irq(dev->irq, dev);
2697 irq_error:
2698         if (dev->phydev)
2699                 phy_stop(dev->phydev);
2700
2701         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2702                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2703
2704         stmmac_hw_teardown(dev);
2705 init_error:
2706         free_dma_desc_resources(priv);
2707 dma_desc_error:
2708         if (dev->phydev)
2709                 phy_disconnect(dev->phydev);
2710
2711         return ret;
2712 }
2713
2714 /**
2715  *  stmmac_release - close entry point of the driver
2716  *  @dev : device pointer.
2717  *  Description:
2718  *  This is the stop entry point of the driver.
2719  */
2720 static int stmmac_release(struct net_device *dev)
2721 {
2722         struct stmmac_priv *priv = netdev_priv(dev);
2723         u32 chan;
2724
2725         if (priv->eee_enabled)
2726                 del_timer_sync(&priv->eee_ctrl_timer);
2727
2728         /* Stop and disconnect the PHY */
2729         if (dev->phydev) {
2730                 phy_stop(dev->phydev);
2731                 phy_disconnect(dev->phydev);
2732         }
2733
2734         stmmac_stop_all_queues(priv);
2735
2736         stmmac_disable_all_queues(priv);
2737
2738         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2739                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2740
2741         /* Free the IRQ lines */
2742         free_irq(dev->irq, dev);
2743         if (priv->wol_irq != dev->irq)
2744                 free_irq(priv->wol_irq, dev);
2745         if (priv->lpi_irq > 0)
2746                 free_irq(priv->lpi_irq, dev);
2747
2748         /* Stop TX/RX DMA and clear the descriptors */
2749         stmmac_stop_all_dma(priv);
2750
2751         /* Release and free the Rx/Tx resources */
2752         free_dma_desc_resources(priv);
2753
2754         /* Disable the MAC Rx/Tx */
2755         stmmac_mac_set(priv, priv->ioaddr, false);
2756
2757         netif_carrier_off(dev);
2758
2759         stmmac_release_ptp(priv);
2760
2761         return 0;
2762 }
2763
2764 /**
2765  *  stmmac_tso_allocator - close entry point of the driver
2766  *  @priv: driver private structure
2767  *  @des: buffer start address
2768  *  @total_len: total length to fill in descriptors
2769  *  @last_segmant: condition for the last descriptor
2770  *  @queue: TX queue index
2771  *  Description:
2772  *  This function fills descriptor and request new descriptors according to
2773  *  buffer length to fill
2774  */
2775 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2776                                  int total_len, bool last_segment, u32 queue)
2777 {
2778         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2779         struct dma_desc *desc;
2780         u32 buff_size;
2781         int tmp_len;
2782
2783         tmp_len = total_len;
2784
2785         while (tmp_len > 0) {
2786                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2787                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2788                 desc = tx_q->dma_tx + tx_q->cur_tx;
2789
2790                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2791                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2792                             TSO_MAX_BUFF_SIZE : tmp_len;
2793
2794                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2795                                 0, 1,
2796                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2797                                 0, 0);
2798
2799                 tmp_len -= TSO_MAX_BUFF_SIZE;
2800         }
2801 }
2802
2803 /**
2804  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2805  *  @skb : the socket buffer
2806  *  @dev : device pointer
2807  *  Description: this is the transmit function that is called on TSO frames
2808  *  (support available on GMAC4 and newer chips).
2809  *  Diagram below show the ring programming in case of TSO frames:
2810  *
2811  *  First Descriptor
2812  *   --------
2813  *   | DES0 |---> buffer1 = L2/L3/L4 header
2814  *   | DES1 |---> TCP Payload (can continue on next descr...)
2815  *   | DES2 |---> buffer 1 and 2 len
2816  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2817  *   --------
2818  *      |
2819  *     ...
2820  *      |
2821  *   --------
2822  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2823  *   | DES1 | --|
2824  *   | DES2 | --> buffer 1 and 2 len
2825  *   | DES3 |
2826  *   --------
2827  *
2828  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2829  */
2830 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2831 {
2832         struct dma_desc *desc, *first, *mss_desc = NULL;
2833         struct stmmac_priv *priv = netdev_priv(dev);
2834         int nfrags = skb_shinfo(skb)->nr_frags;
2835         u32 queue = skb_get_queue_mapping(skb);
2836         unsigned int first_entry, des;
2837         struct stmmac_tx_queue *tx_q;
2838         int tmp_pay_len = 0;
2839         u32 pay_len, mss;
2840         u8 proto_hdr_len;
2841         int i;
2842
2843         tx_q = &priv->tx_queue[queue];
2844
2845         /* Compute header lengths */
2846         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2847
2848         /* Desc availability based on threshold should be enough safe */
2849         if (unlikely(stmmac_tx_avail(priv, queue) <
2850                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2851                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2852                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2853                                                                 queue));
2854                         /* This is a hard error, log it. */
2855                         netdev_err(priv->dev,
2856                                    "%s: Tx Ring full when queue awake\n",
2857                                    __func__);
2858                 }
2859                 return NETDEV_TX_BUSY;
2860         }
2861
2862         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2863
2864         mss = skb_shinfo(skb)->gso_size;
2865
2866         /* set new MSS value if needed */
2867         if (mss != tx_q->mss) {
2868                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2869                 stmmac_set_mss(priv, mss_desc, mss);
2870                 tx_q->mss = mss;
2871                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2872                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2873         }
2874
2875         if (netif_msg_tx_queued(priv)) {
2876                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2877                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2878                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2879                         skb->data_len);
2880         }
2881
2882         first_entry = tx_q->cur_tx;
2883         WARN_ON(tx_q->tx_skbuff[first_entry]);
2884
2885         desc = tx_q->dma_tx + first_entry;
2886         first = desc;
2887
2888         /* first descriptor: fill Headers on Buf1 */
2889         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2890                              DMA_TO_DEVICE);
2891         if (dma_mapping_error(priv->device, des))
2892                 goto dma_map_err;
2893
2894         tx_q->tx_skbuff_dma[first_entry].buf = des;
2895         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2896
2897         first->des0 = cpu_to_le32(des);
2898
2899         /* Fill start of payload in buff2 of first descriptor */
2900         if (pay_len)
2901                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2902
2903         /* If needed take extra descriptors to fill the remaining payload */
2904         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2905
2906         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2907
2908         /* Prepare fragments */
2909         for (i = 0; i < nfrags; i++) {
2910                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2911
2912                 des = skb_frag_dma_map(priv->device, frag, 0,
2913                                        skb_frag_size(frag),
2914                                        DMA_TO_DEVICE);
2915                 if (dma_mapping_error(priv->device, des))
2916                         goto dma_map_err;
2917
2918                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2919                                      (i == nfrags - 1), queue);
2920
2921                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2922                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2923                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2924         }
2925
2926         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2927
2928         /* Only the last descriptor gets to point to the skb. */
2929         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2930
2931         /* We've used all descriptors we need for this skb, however,
2932          * advance cur_tx so that it references a fresh descriptor.
2933          * ndo_start_xmit will fill this descriptor the next time it's
2934          * called and stmmac_tx_clean may clean up to this descriptor.
2935          */
2936         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2937
2938         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2939                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2940                           __func__);
2941                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2942         }
2943
2944         dev->stats.tx_bytes += skb->len;
2945         priv->xstats.tx_tso_frames++;
2946         priv->xstats.tx_tso_nfrags += nfrags;
2947
2948         /* Manage tx mitigation */
2949         tx_q->tx_count_frames += nfrags + 1;
2950         if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
2951             !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
2952             (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2953             priv->hwts_tx_en)) {
2954                 stmmac_tx_timer_arm(priv, queue);
2955         } else {
2956                 tx_q->tx_count_frames = 0;
2957                 stmmac_set_tx_ic(priv, desc);
2958                 priv->xstats.tx_set_ic_bit++;
2959         }
2960
2961         skb_tx_timestamp(skb);
2962
2963         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2964                      priv->hwts_tx_en)) {
2965                 /* declare that device is doing timestamping */
2966                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2967                 stmmac_enable_tx_timestamp(priv, first);
2968         }
2969
2970         /* Complete the first descriptor before granting the DMA */
2971         stmmac_prepare_tso_tx_desc(priv, first, 1,
2972                         proto_hdr_len,
2973                         pay_len,
2974                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2975                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2976
2977         /* If context desc is used to change MSS */
2978         if (mss_desc) {
2979                 /* Make sure that first descriptor has been completely
2980                  * written, including its own bit. This is because MSS is
2981                  * actually before first descriptor, so we need to make
2982                  * sure that MSS's own bit is the last thing written.
2983                  */
2984                 dma_wmb();
2985                 stmmac_set_tx_owner(priv, mss_desc);
2986         }
2987
2988         /* The own bit must be the latest setting done when prepare the
2989          * descriptor and then barrier is needed to make sure that
2990          * all is coherent before granting the DMA engine.
2991          */
2992         wmb();
2993
2994         if (netif_msg_pktdata(priv)) {
2995                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2996                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2997                         tx_q->cur_tx, first, nfrags);
2998
2999                 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
3000
3001                 pr_info(">>> frame to be transmitted: ");
3002                 print_pkt(skb->data, skb_headlen(skb));
3003         }
3004
3005         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3006
3007         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3008         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3009
3010         return NETDEV_TX_OK;
3011
3012 dma_map_err:
3013         dev_err(priv->device, "Tx dma map failed\n");
3014         dev_kfree_skb(skb);
3015         priv->dev->stats.tx_dropped++;
3016         return NETDEV_TX_OK;
3017 }
3018
3019 /**
3020  *  stmmac_xmit - Tx entry point of the driver
3021  *  @skb : the socket buffer
3022  *  @dev : device pointer
3023  *  Description : this is the tx entry point of the driver.
3024  *  It programs the chain or the ring and supports oversized frames
3025  *  and SG feature.
3026  */
3027 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3028 {
3029         struct stmmac_priv *priv = netdev_priv(dev);
3030         unsigned int nopaged_len = skb_headlen(skb);
3031         int i, csum_insertion = 0, is_jumbo = 0;
3032         u32 queue = skb_get_queue_mapping(skb);
3033         int nfrags = skb_shinfo(skb)->nr_frags;
3034         int entry;
3035         unsigned int first_entry;
3036         struct dma_desc *desc, *first;
3037         struct stmmac_tx_queue *tx_q;
3038         unsigned int enh_desc;
3039         unsigned int des;
3040
3041         tx_q = &priv->tx_queue[queue];
3042
3043         if (priv->tx_path_in_lpi_mode)
3044                 stmmac_disable_eee_mode(priv);
3045
3046         /* Manage oversized TCP frames for GMAC4 device */
3047         if (skb_is_gso(skb) && priv->tso) {
3048                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3049                         /*
3050                          * There is no way to determine the number of TSO
3051                          * capable Queues. Let's use always the Queue 0
3052                          * because if TSO is supported then at least this
3053                          * one will be capable.
3054                          */
3055                         skb_set_queue_mapping(skb, 0);
3056
3057                         return stmmac_tso_xmit(skb, dev);
3058                 }
3059         }
3060
3061         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3062                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3063                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3064                                                                 queue));
3065                         /* This is a hard error, log it. */
3066                         netdev_err(priv->dev,
3067                                    "%s: Tx Ring full when queue awake\n",
3068                                    __func__);
3069                 }
3070                 return NETDEV_TX_BUSY;
3071         }
3072
3073         entry = tx_q->cur_tx;
3074         first_entry = entry;
3075         WARN_ON(tx_q->tx_skbuff[first_entry]);
3076
3077         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3078
3079         if (likely(priv->extend_desc))
3080                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3081         else
3082                 desc = tx_q->dma_tx + entry;
3083
3084         first = desc;
3085
3086         enh_desc = priv->plat->enh_desc;
3087         /* To program the descriptors according to the size of the frame */
3088         if (enh_desc)
3089                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3090
3091         if (unlikely(is_jumbo)) {
3092                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3093                 if (unlikely(entry < 0) && (entry != -EINVAL))
3094                         goto dma_map_err;
3095         }
3096
3097         for (i = 0; i < nfrags; i++) {
3098                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3099                 int len = skb_frag_size(frag);
3100                 bool last_segment = (i == (nfrags - 1));
3101
3102                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3103                 WARN_ON(tx_q->tx_skbuff[entry]);
3104
3105                 if (likely(priv->extend_desc))
3106                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3107                 else
3108                         desc = tx_q->dma_tx + entry;
3109
3110                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3111                                        DMA_TO_DEVICE);
3112                 if (dma_mapping_error(priv->device, des))
3113                         goto dma_map_err; /* should reuse desc w/o issues */
3114
3115                 tx_q->tx_skbuff_dma[entry].buf = des;
3116
3117                 stmmac_set_desc_addr(priv, desc, des);
3118
3119                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3120                 tx_q->tx_skbuff_dma[entry].len = len;
3121                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3122
3123                 /* Prepare the descriptor and set the own bit too */
3124                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3125                                 priv->mode, 1, last_segment, skb->len);
3126         }
3127
3128         /* Only the last descriptor gets to point to the skb. */
3129         tx_q->tx_skbuff[entry] = skb;
3130
3131         /* We've used all descriptors we need for this skb, however,
3132          * advance cur_tx so that it references a fresh descriptor.
3133          * ndo_start_xmit will fill this descriptor the next time it's
3134          * called and stmmac_tx_clean may clean up to this descriptor.
3135          */
3136         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3137         tx_q->cur_tx = entry;
3138
3139         if (netif_msg_pktdata(priv)) {
3140                 void *tx_head;
3141
3142                 netdev_dbg(priv->dev,
3143                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3144                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3145                            entry, first, nfrags);
3146
3147                 if (priv->extend_desc)
3148                         tx_head = (void *)tx_q->dma_etx;
3149                 else
3150                         tx_head = (void *)tx_q->dma_tx;
3151
3152                 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3153
3154                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3155                 print_pkt(skb->data, skb->len);
3156         }
3157
3158         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3159                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3160                           __func__);
3161                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3162         }
3163
3164         dev->stats.tx_bytes += skb->len;
3165
3166         /* According to the coalesce parameter the IC bit for the latest
3167          * segment is reset and the timer re-started to clean the tx status.
3168          * This approach takes care about the fragments: desc is the first
3169          * element in case of no SG.
3170          */
3171         tx_q->tx_count_frames += nfrags + 1;
3172         if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3173             !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3174             (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3175             priv->hwts_tx_en)) {
3176                 stmmac_tx_timer_arm(priv, queue);
3177         } else {
3178                 tx_q->tx_count_frames = 0;
3179                 stmmac_set_tx_ic(priv, desc);
3180                 priv->xstats.tx_set_ic_bit++;
3181         }
3182
3183         skb_tx_timestamp(skb);
3184
3185         /* Ready to fill the first descriptor and set the OWN bit w/o any
3186          * problems because all the descriptors are actually ready to be
3187          * passed to the DMA engine.
3188          */
3189         if (likely(!is_jumbo)) {
3190                 bool last_segment = (nfrags == 0);
3191
3192                 des = dma_map_single(priv->device, skb->data,
3193                                      nopaged_len, DMA_TO_DEVICE);
3194                 if (dma_mapping_error(priv->device, des))
3195                         goto dma_map_err;
3196
3197                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3198
3199                 stmmac_set_desc_addr(priv, first, des);
3200
3201                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3202                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3203
3204                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3205                              priv->hwts_tx_en)) {
3206                         /* declare that device is doing timestamping */
3207                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3208                         stmmac_enable_tx_timestamp(priv, first);
3209                 }
3210
3211                 /* Prepare the first descriptor setting the OWN bit too */
3212                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3213                                 csum_insertion, priv->mode, 1, last_segment,
3214                                 skb->len);
3215         } else {
3216                 stmmac_set_tx_owner(priv, first);
3217         }
3218
3219         /* The own bit must be the latest setting done when prepare the
3220          * descriptor and then barrier is needed to make sure that
3221          * all is coherent before granting the DMA engine.
3222          */
3223         wmb();
3224
3225         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3226
3227         stmmac_enable_dma_transmission(priv, priv->ioaddr);
3228
3229         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3230         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3231
3232         return NETDEV_TX_OK;
3233
3234 dma_map_err:
3235         netdev_err(priv->dev, "Tx DMA map failed\n");
3236         dev_kfree_skb(skb);
3237         priv->dev->stats.tx_dropped++;
3238         return NETDEV_TX_OK;
3239 }
3240
3241 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3242 {
3243         struct vlan_ethhdr *veth;
3244         __be16 vlan_proto;
3245         u16 vlanid;
3246
3247         veth = (struct vlan_ethhdr *)skb->data;
3248         vlan_proto = veth->h_vlan_proto;
3249
3250         if ((vlan_proto == htons(ETH_P_8021Q) &&
3251              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3252             (vlan_proto == htons(ETH_P_8021AD) &&
3253              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3254                 /* pop the vlan tag */
3255                 vlanid = ntohs(veth->h_vlan_TCI);
3256                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3257                 skb_pull(skb, VLAN_HLEN);
3258                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3259         }
3260 }
3261
3262
3263 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3264 {
3265         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3266                 return 0;
3267
3268         return 1;
3269 }
3270
3271 /**
3272  * stmmac_rx_refill - refill used skb preallocated buffers
3273  * @priv: driver private structure
3274  * @queue: RX queue index
3275  * Description : this is to reallocate the skb for the reception process
3276  * that is based on zero-copy.
3277  */
3278 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3279 {
3280         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3281         int dirty = stmmac_rx_dirty(priv, queue);
3282         unsigned int entry = rx_q->dirty_rx;
3283
3284         int bfsize = priv->dma_buf_sz;
3285
3286         while (dirty-- > 0) {
3287                 struct dma_desc *p;
3288
3289                 if (priv->extend_desc)
3290                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3291                 else
3292                         p = rx_q->dma_rx + entry;
3293
3294                 if (likely(!rx_q->rx_skbuff[entry])) {
3295                         struct sk_buff *skb;
3296
3297                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3298                         if (unlikely(!skb)) {
3299                                 /* so for a while no zero-copy! */
3300                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3301                                 if (unlikely(net_ratelimit()))
3302                                         dev_err(priv->device,
3303                                                 "fail to alloc skb entry %d\n",
3304                                                 entry);
3305                                 break;
3306                         }
3307
3308                         rx_q->rx_skbuff[entry] = skb;
3309                         rx_q->rx_skbuff_dma[entry] =
3310                             dma_map_single(priv->device, skb->data, bfsize,
3311                                            DMA_FROM_DEVICE);
3312                         if (dma_mapping_error(priv->device,
3313                                               rx_q->rx_skbuff_dma[entry])) {
3314                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3315                                 dev_kfree_skb(skb);
3316                                 break;
3317                         }
3318
3319                         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3320                         stmmac_refill_desc3(priv, rx_q, p);
3321
3322                         if (rx_q->rx_zeroc_thresh > 0)
3323                                 rx_q->rx_zeroc_thresh--;
3324
3325                         netif_dbg(priv, rx_status, priv->dev,
3326                                   "refill entry #%d\n", entry);
3327                 }
3328                 dma_wmb();
3329
3330                 stmmac_set_rx_owner(priv, p, priv->use_riwt);
3331
3332                 dma_wmb();
3333
3334                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3335         }
3336         rx_q->dirty_rx = entry;
3337         stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
3338 }
3339
3340 /**
3341  * stmmac_rx - manage the receive process
3342  * @priv: driver private structure
3343  * @limit: napi bugget
3344  * @queue: RX queue index.
3345  * Description :  this the function called by the napi poll method.
3346  * It gets all the frames inside the ring.
3347  */
3348 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3349 {
3350         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3351         struct stmmac_channel *ch = &priv->channel[queue];
3352         unsigned int next_entry = rx_q->cur_rx;
3353         int coe = priv->hw->rx_csum;
3354         unsigned int count = 0;
3355         bool xmac;
3356
3357         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3358
3359         if (netif_msg_rx_status(priv)) {
3360                 void *rx_head;
3361
3362                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3363                 if (priv->extend_desc)
3364                         rx_head = (void *)rx_q->dma_erx;
3365                 else
3366                         rx_head = (void *)rx_q->dma_rx;
3367
3368                 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3369         }
3370         while (count < limit) {
3371                 int entry, status;
3372                 struct dma_desc *p;
3373                 struct dma_desc *np;
3374
3375                 entry = next_entry;
3376
3377                 if (priv->extend_desc)
3378                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3379                 else
3380                         p = rx_q->dma_rx + entry;
3381
3382                 /* read the status of the incoming frame */
3383                 status = stmmac_rx_status(priv, &priv->dev->stats,
3384                                 &priv->xstats, p);
3385                 /* check if managed by the DMA otherwise go ahead */
3386                 if (unlikely(status & dma_own))
3387                         break;
3388
3389                 count++;
3390
3391                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3392                 next_entry = rx_q->cur_rx;
3393
3394                 if (priv->extend_desc)
3395                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3396                 else
3397                         np = rx_q->dma_rx + next_entry;
3398
3399                 prefetch(np);
3400
3401                 if (priv->extend_desc)
3402                         stmmac_rx_extended_status(priv, &priv->dev->stats,
3403                                         &priv->xstats, rx_q->dma_erx + entry);
3404                 if (unlikely(status == discard_frame)) {
3405                         priv->dev->stats.rx_errors++;
3406                         if (priv->hwts_rx_en && !priv->extend_desc) {
3407                                 /* DESC2 & DESC3 will be overwritten by device
3408                                  * with timestamp value, hence reinitialize
3409                                  * them in stmmac_rx_refill() function so that
3410                                  * device can reuse it.
3411                                  */
3412                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3413                                 rx_q->rx_skbuff[entry] = NULL;
3414                                 dma_unmap_single(priv->device,
3415                                                  rx_q->rx_skbuff_dma[entry],
3416                                                  priv->dma_buf_sz,
3417                                                  DMA_FROM_DEVICE);
3418                         }
3419                 } else {
3420                         struct sk_buff *skb;
3421                         int frame_len;
3422                         unsigned int des;
3423
3424                         stmmac_get_desc_addr(priv, p, &des);
3425                         frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3426
3427                         /*  If frame length is greater than skb buffer size
3428                          *  (preallocated during init) then the packet is
3429                          *  ignored
3430                          */
3431                         if (frame_len > priv->dma_buf_sz) {
3432                                 if (net_ratelimit())
3433                                         netdev_err(priv->dev,
3434                                                    "len %d larger than size (%d)\n",
3435                                                    frame_len, priv->dma_buf_sz);
3436                                 priv->dev->stats.rx_length_errors++;
3437                                 continue;
3438                         }
3439
3440                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3441                          * Type frames (LLC/LLC-SNAP)
3442                          *
3443                          * llc_snap is never checked in GMAC >= 4, so this ACS
3444                          * feature is always disabled and packets need to be
3445                          * stripped manually.
3446                          */
3447                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3448                             unlikely(status != llc_snap))
3449                                 frame_len -= ETH_FCS_LEN;
3450
3451                         if (netif_msg_rx_status(priv)) {
3452                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3453                                            p, entry, des);
3454                                 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3455                                            frame_len, status);
3456                         }
3457
3458                         /* The zero-copy is always used for all the sizes
3459                          * in case of GMAC4 because it needs
3460                          * to refill the used descriptors, always.
3461                          */
3462                         if (unlikely(!xmac &&
3463                                      ((frame_len < priv->rx_copybreak) ||
3464                                      stmmac_rx_threshold_count(rx_q)))) {
3465                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3466                                                                 frame_len);
3467                                 if (unlikely(!skb)) {
3468                                         if (net_ratelimit())
3469                                                 dev_warn(priv->device,
3470                                                          "packet dropped\n");
3471                                         priv->dev->stats.rx_dropped++;
3472                                         continue;
3473                                 }
3474
3475                                 dma_sync_single_for_cpu(priv->device,
3476                                                         rx_q->rx_skbuff_dma
3477                                                         [entry], frame_len,
3478                                                         DMA_FROM_DEVICE);
3479                                 skb_copy_to_linear_data(skb,
3480                                                         rx_q->
3481                                                         rx_skbuff[entry]->data,
3482                                                         frame_len);
3483
3484                                 skb_put(skb, frame_len);
3485                                 dma_sync_single_for_device(priv->device,
3486                                                            rx_q->rx_skbuff_dma
3487                                                            [entry], frame_len,
3488                                                            DMA_FROM_DEVICE);
3489                         } else {
3490                                 skb = rx_q->rx_skbuff[entry];
3491                                 if (unlikely(!skb)) {
3492                                         if (net_ratelimit())
3493                                                 netdev_err(priv->dev,
3494                                                            "%s: Inconsistent Rx chain\n",
3495                                                            priv->dev->name);
3496                                         priv->dev->stats.rx_dropped++;
3497                                         continue;
3498                                 }
3499                                 prefetch(skb->data - NET_IP_ALIGN);
3500                                 rx_q->rx_skbuff[entry] = NULL;
3501                                 rx_q->rx_zeroc_thresh++;
3502
3503                                 skb_put(skb, frame_len);
3504                                 dma_unmap_single(priv->device,
3505                                                  rx_q->rx_skbuff_dma[entry],
3506                                                  priv->dma_buf_sz,
3507                                                  DMA_FROM_DEVICE);
3508                         }
3509
3510                         if (netif_msg_pktdata(priv)) {
3511                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3512                                            frame_len);
3513                                 print_pkt(skb->data, frame_len);
3514                         }
3515
3516                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3517
3518                         stmmac_rx_vlan(priv->dev, skb);
3519
3520                         skb->protocol = eth_type_trans(skb, priv->dev);
3521
3522                         if (unlikely(!coe))
3523                                 skb_checksum_none_assert(skb);
3524                         else
3525                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3526
3527                         napi_gro_receive(&ch->rx_napi, skb);
3528
3529                         priv->dev->stats.rx_packets++;
3530                         priv->dev->stats.rx_bytes += frame_len;
3531                 }
3532         }
3533
3534         stmmac_rx_refill(priv, queue);
3535
3536         priv->xstats.rx_pkt_n += count;
3537
3538         return count;
3539 }
3540
3541 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
3542 {
3543         struct stmmac_channel *ch =
3544                 container_of(napi, struct stmmac_channel, rx_napi);
3545         struct stmmac_priv *priv = ch->priv_data;
3546         u32 chan = ch->index;
3547         int work_done;
3548
3549         priv->xstats.napi_poll++;
3550
3551         work_done = stmmac_rx(priv, budget, chan);
3552         if (work_done < budget && napi_complete_done(napi, work_done))
3553                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3554         return work_done;
3555 }
3556
3557 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3558 {
3559         struct stmmac_channel *ch =
3560                 container_of(napi, struct stmmac_channel, tx_napi);
3561         struct stmmac_priv *priv = ch->priv_data;
3562         struct stmmac_tx_queue *tx_q;
3563         u32 chan = ch->index;
3564         int work_done;
3565
3566         priv->xstats.napi_poll++;
3567
3568         work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3569         work_done = min(work_done, budget);
3570
3571         if (work_done < budget && napi_complete_done(napi, work_done))
3572                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3573
3574         /* Force transmission restart */
3575         tx_q = &priv->tx_queue[chan];
3576         if (tx_q->cur_tx != tx_q->dirty_tx) {
3577                 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3578                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3579                                        chan);
3580         }
3581
3582         return work_done;
3583 }
3584
3585 /**
3586  *  stmmac_tx_timeout
3587  *  @dev : Pointer to net device structure
3588  *  Description: this function is called when a packet transmission fails to
3589  *   complete within a reasonable time. The driver will mark the error in the
3590  *   netdev structure and arrange for the device to be reset to a sane state
3591  *   in order to transmit a new packet.
3592  */
3593 static void stmmac_tx_timeout(struct net_device *dev)
3594 {
3595         struct stmmac_priv *priv = netdev_priv(dev);
3596
3597         stmmac_global_err(priv);
3598 }
3599
3600 /**
3601  *  stmmac_set_rx_mode - entry point for multicast addressing
3602  *  @dev : pointer to the device structure
3603  *  Description:
3604  *  This function is a driver entry point which gets called by the kernel
3605  *  whenever multicast addresses must be enabled/disabled.
3606  *  Return value:
3607  *  void.
3608  */
3609 static void stmmac_set_rx_mode(struct net_device *dev)
3610 {
3611         struct stmmac_priv *priv = netdev_priv(dev);
3612
3613         stmmac_set_filter(priv, priv->hw, dev);
3614 }
3615
3616 /**
3617  *  stmmac_change_mtu - entry point to change MTU size for the device.
3618  *  @dev : device pointer.
3619  *  @new_mtu : the new MTU size for the device.
3620  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3621  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3622  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3623  *  Return value:
3624  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3625  *  file on failure.
3626  */
3627 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3628 {
3629         struct stmmac_priv *priv = netdev_priv(dev);
3630
3631         if (netif_running(dev)) {
3632                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3633                 return -EBUSY;
3634         }
3635
3636         dev->mtu = new_mtu;
3637
3638         netdev_update_features(dev);
3639
3640         return 0;
3641 }
3642
3643 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3644                                              netdev_features_t features)
3645 {
3646         struct stmmac_priv *priv = netdev_priv(dev);
3647
3648         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3649                 features &= ~NETIF_F_RXCSUM;
3650
3651         if (!priv->plat->tx_coe)
3652                 features &= ~NETIF_F_CSUM_MASK;
3653
3654         /* Some GMAC devices have a bugged Jumbo frame support that
3655          * needs to have the Tx COE disabled for oversized frames
3656          * (due to limited buffer sizes). In this case we disable
3657          * the TX csum insertion in the TDES and not use SF.
3658          */
3659         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3660                 features &= ~NETIF_F_CSUM_MASK;
3661
3662         /* Disable tso if asked by ethtool */
3663         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3664                 if (features & NETIF_F_TSO)
3665                         priv->tso = true;
3666                 else
3667                         priv->tso = false;
3668         }
3669
3670         return features;
3671 }
3672
3673 static int stmmac_set_features(struct net_device *netdev,
3674                                netdev_features_t features)
3675 {
3676         struct stmmac_priv *priv = netdev_priv(netdev);
3677
3678         /* Keep the COE Type in case of csum is supporting */
3679         if (features & NETIF_F_RXCSUM)
3680                 priv->hw->rx_csum = priv->plat->rx_coe;
3681         else
3682                 priv->hw->rx_csum = 0;
3683         /* No check needed because rx_coe has been set before and it will be
3684          * fixed in case of issue.
3685          */
3686         stmmac_rx_ipc(priv, priv->hw);
3687
3688         return 0;
3689 }
3690
3691 /**
3692  *  stmmac_interrupt - main ISR
3693  *  @irq: interrupt number.
3694  *  @dev_id: to pass the net device pointer.
3695  *  Description: this is the main driver interrupt service routine.
3696  *  It can call:
3697  *  o DMA service routine (to manage incoming frame reception and transmission
3698  *    status)
3699  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3700  *    interrupts.
3701  */
3702 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3703 {
3704         struct net_device *dev = (struct net_device *)dev_id;
3705         struct stmmac_priv *priv = netdev_priv(dev);
3706         u32 rx_cnt = priv->plat->rx_queues_to_use;
3707         u32 tx_cnt = priv->plat->tx_queues_to_use;
3708         u32 queues_count;
3709         u32 queue;
3710         bool xmac;
3711
3712         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3713         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3714
3715         if (priv->irq_wake)
3716                 pm_wakeup_event(priv->device, 0);
3717
3718         if (unlikely(!dev)) {
3719                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3720                 return IRQ_NONE;
3721         }
3722
3723         /* Check if adapter is up */
3724         if (test_bit(STMMAC_DOWN, &priv->state))
3725                 return IRQ_HANDLED;
3726         /* Check if a fatal error happened */
3727         if (stmmac_safety_feat_interrupt(priv))
3728                 return IRQ_HANDLED;
3729
3730         /* To handle GMAC own interrupts */
3731         if ((priv->plat->has_gmac) || xmac) {
3732                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3733                 int mtl_status;
3734
3735                 if (unlikely(status)) {
3736                         /* For LPI we need to save the tx status */
3737                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3738                                 priv->tx_path_in_lpi_mode = true;
3739                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3740                                 priv->tx_path_in_lpi_mode = false;
3741                 }
3742
3743                 for (queue = 0; queue < queues_count; queue++) {
3744                         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3745
3746                         mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3747                                                                 queue);
3748                         if (mtl_status != -EINVAL)
3749                                 status |= mtl_status;
3750
3751                         if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3752                                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3753                                                        rx_q->rx_tail_addr,
3754                                                        queue);
3755                 }
3756
3757                 /* PCS link status */
3758                 if (priv->hw->pcs) {
3759                         if (priv->xstats.pcs_link)
3760                                 netif_carrier_on(dev);
3761                         else
3762                                 netif_carrier_off(dev);
3763                 }
3764         }
3765
3766         /* To handle DMA interrupts */
3767         stmmac_dma_interrupt(priv);
3768
3769         return IRQ_HANDLED;
3770 }
3771
3772 #ifdef CONFIG_NET_POLL_CONTROLLER
3773 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3774  * to allow network I/O with interrupts disabled.
3775  */
3776 static void stmmac_poll_controller(struct net_device *dev)
3777 {
3778         disable_irq(dev->irq);
3779         stmmac_interrupt(dev->irq, dev);
3780         enable_irq(dev->irq);
3781 }
3782 #endif
3783
3784 /**
3785  *  stmmac_ioctl - Entry point for the Ioctl
3786  *  @dev: Device pointer.
3787  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3788  *  a proprietary structure used to pass information to the driver.
3789  *  @cmd: IOCTL command
3790  *  Description:
3791  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3792  */
3793 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3794 {
3795         int ret = -EOPNOTSUPP;
3796
3797         if (!netif_running(dev))
3798                 return -EINVAL;
3799
3800         switch (cmd) {
3801         case SIOCGMIIPHY:
3802         case SIOCGMIIREG:
3803         case SIOCSMIIREG:
3804                 if (!dev->phydev)
3805                         return -EINVAL;
3806                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3807                 break;
3808         case SIOCSHWTSTAMP:
3809                 ret = stmmac_hwtstamp_set(dev, rq);
3810                 break;
3811         case SIOCGHWTSTAMP:
3812                 ret = stmmac_hwtstamp_get(dev, rq);
3813                 break;
3814         default:
3815                 break;
3816         }
3817
3818         return ret;
3819 }
3820
3821 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3822                                     void *cb_priv)
3823 {
3824         struct stmmac_priv *priv = cb_priv;
3825         int ret = -EOPNOTSUPP;
3826
3827         stmmac_disable_all_queues(priv);
3828
3829         switch (type) {
3830         case TC_SETUP_CLSU32:
3831                 if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3832                         ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3833                 break;
3834         default:
3835                 break;
3836         }
3837
3838         stmmac_enable_all_queues(priv);
3839         return ret;
3840 }
3841
3842 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3843                                  struct tc_block_offload *f)
3844 {
3845         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3846                 return -EOPNOTSUPP;
3847
3848         switch (f->command) {
3849         case TC_BLOCK_BIND:
3850                 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3851                                 priv, priv, f->extack);
3852         case TC_BLOCK_UNBIND:
3853                 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3854                 return 0;
3855         default:
3856                 return -EOPNOTSUPP;
3857         }
3858 }
3859
3860 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3861                            void *type_data)
3862 {
3863         struct stmmac_priv *priv = netdev_priv(ndev);
3864
3865         switch (type) {
3866         case TC_SETUP_BLOCK:
3867                 return stmmac_setup_tc_block(priv, type_data);
3868         case TC_SETUP_QDISC_CBS:
3869                 return stmmac_tc_setup_cbs(priv, priv, type_data);
3870         default:
3871                 return -EOPNOTSUPP;
3872         }
3873 }
3874
3875 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3876 {
3877         struct stmmac_priv *priv = netdev_priv(ndev);
3878         int ret = 0;
3879
3880         ret = eth_mac_addr(ndev, addr);
3881         if (ret)
3882                 return ret;
3883
3884         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3885
3886         return ret;
3887 }
3888
3889 #ifdef CONFIG_DEBUG_FS
3890 static struct dentry *stmmac_fs_dir;
3891
3892 static void sysfs_display_ring(void *head, int size, int extend_desc,
3893                                struct seq_file *seq)
3894 {
3895         int i;
3896         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3897         struct dma_desc *p = (struct dma_desc *)head;
3898
3899         for (i = 0; i < size; i++) {
3900                 if (extend_desc) {
3901                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3902                                    i, (unsigned int)virt_to_phys(ep),
3903                                    le32_to_cpu(ep->basic.des0),
3904                                    le32_to_cpu(ep->basic.des1),
3905                                    le32_to_cpu(ep->basic.des2),
3906                                    le32_to_cpu(ep->basic.des3));
3907                         ep++;
3908                 } else {
3909                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3910                                    i, (unsigned int)virt_to_phys(p),
3911                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3912                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3913                         p++;
3914                 }
3915                 seq_printf(seq, "\n");
3916         }
3917 }
3918
3919 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
3920 {
3921         struct net_device *dev = seq->private;
3922         struct stmmac_priv *priv = netdev_priv(dev);
3923         u32 rx_count = priv->plat->rx_queues_to_use;
3924         u32 tx_count = priv->plat->tx_queues_to_use;
3925         u32 queue;
3926
3927         if ((dev->flags & IFF_UP) == 0)
3928                 return 0;
3929
3930         for (queue = 0; queue < rx_count; queue++) {
3931                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3932
3933                 seq_printf(seq, "RX Queue %d:\n", queue);
3934
3935                 if (priv->extend_desc) {
3936                         seq_printf(seq, "Extended descriptor ring:\n");
3937                         sysfs_display_ring((void *)rx_q->dma_erx,
3938                                            DMA_RX_SIZE, 1, seq);
3939                 } else {
3940                         seq_printf(seq, "Descriptor ring:\n");
3941                         sysfs_display_ring((void *)rx_q->dma_rx,
3942                                            DMA_RX_SIZE, 0, seq);
3943                 }
3944         }
3945
3946         for (queue = 0; queue < tx_count; queue++) {
3947                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3948
3949                 seq_printf(seq, "TX Queue %d:\n", queue);
3950
3951                 if (priv->extend_desc) {
3952                         seq_printf(seq, "Extended descriptor ring:\n");
3953                         sysfs_display_ring((void *)tx_q->dma_etx,
3954                                            DMA_TX_SIZE, 1, seq);
3955                 } else {
3956                         seq_printf(seq, "Descriptor ring:\n");
3957                         sysfs_display_ring((void *)tx_q->dma_tx,
3958                                            DMA_TX_SIZE, 0, seq);
3959                 }
3960         }
3961
3962         return 0;
3963 }
3964 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
3965
3966 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
3967 {
3968         struct net_device *dev = seq->private;
3969         struct stmmac_priv *priv = netdev_priv(dev);
3970
3971         if (!priv->hw_cap_support) {
3972                 seq_printf(seq, "DMA HW features not supported\n");
3973                 return 0;
3974         }
3975
3976         seq_printf(seq, "==============================\n");
3977         seq_printf(seq, "\tDMA HW features\n");
3978         seq_printf(seq, "==============================\n");
3979
3980         seq_printf(seq, "\t10/100 Mbps: %s\n",
3981                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3982         seq_printf(seq, "\t1000 Mbps: %s\n",
3983                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3984         seq_printf(seq, "\tHalf duplex: %s\n",
3985                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3986         seq_printf(seq, "\tHash Filter: %s\n",
3987                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3988         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3989                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3990         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3991                    (priv->dma_cap.pcs) ? "Y" : "N");
3992         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3993                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3994         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3995                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3996         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3997                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3998         seq_printf(seq, "\tRMON module: %s\n",
3999                    (priv->dma_cap.rmon) ? "Y" : "N");
4000         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4001                    (priv->dma_cap.time_stamp) ? "Y" : "N");
4002         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
4003                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
4004         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
4005                    (priv->dma_cap.eee) ? "Y" : "N");
4006         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4007         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4008                    (priv->dma_cap.tx_coe) ? "Y" : "N");
4009         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4010                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4011                            (priv->dma_cap.rx_coe) ? "Y" : "N");
4012         } else {
4013                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4014                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4015                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4016                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4017         }
4018         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4019                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4020         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4021                    priv->dma_cap.number_rx_channel);
4022         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4023                    priv->dma_cap.number_tx_channel);
4024         seq_printf(seq, "\tEnhanced descriptors: %s\n",
4025                    (priv->dma_cap.enh_desc) ? "Y" : "N");
4026
4027         return 0;
4028 }
4029 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4030
4031 static int stmmac_init_fs(struct net_device *dev)
4032 {
4033         struct stmmac_priv *priv = netdev_priv(dev);
4034
4035         /* Create per netdev entries */
4036         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4037
4038         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4039                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4040
4041                 return -ENOMEM;
4042         }
4043
4044         /* Entry to report DMA RX/TX rings */
4045         priv->dbgfs_rings_status =
4046                 debugfs_create_file("descriptors_status", 0444,
4047                                     priv->dbgfs_dir, dev,
4048                                     &stmmac_rings_status_fops);
4049
4050         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4051                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4052                 debugfs_remove_recursive(priv->dbgfs_dir);
4053
4054                 return -ENOMEM;
4055         }
4056
4057         /* Entry to report the DMA HW features */
4058         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4059                                                   priv->dbgfs_dir,
4060                                                   dev, &stmmac_dma_cap_fops);
4061
4062         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4063                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4064                 debugfs_remove_recursive(priv->dbgfs_dir);
4065
4066                 return -ENOMEM;
4067         }
4068
4069         return 0;
4070 }
4071
4072 static void stmmac_exit_fs(struct net_device *dev)
4073 {
4074         struct stmmac_priv *priv = netdev_priv(dev);
4075
4076         debugfs_remove_recursive(priv->dbgfs_dir);
4077 }
4078 #endif /* CONFIG_DEBUG_FS */
4079
4080 static const struct net_device_ops stmmac_netdev_ops = {
4081         .ndo_open = stmmac_open,
4082         .ndo_start_xmit = stmmac_xmit,
4083         .ndo_stop = stmmac_release,
4084         .ndo_change_mtu = stmmac_change_mtu,
4085         .ndo_fix_features = stmmac_fix_features,
4086         .ndo_set_features = stmmac_set_features,
4087         .ndo_set_rx_mode = stmmac_set_rx_mode,
4088         .ndo_tx_timeout = stmmac_tx_timeout,
4089         .ndo_do_ioctl = stmmac_ioctl,
4090         .ndo_setup_tc = stmmac_setup_tc,
4091 #ifdef CONFIG_NET_POLL_CONTROLLER
4092         .ndo_poll_controller = stmmac_poll_controller,
4093 #endif
4094         .ndo_set_mac_address = stmmac_set_mac_address,
4095 };
4096
4097 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4098 {
4099         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4100                 return;
4101         if (test_bit(STMMAC_DOWN, &priv->state))
4102                 return;
4103
4104         netdev_err(priv->dev, "Reset adapter.\n");
4105
4106         rtnl_lock();
4107         netif_trans_update(priv->dev);
4108         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4109                 usleep_range(1000, 2000);
4110
4111         set_bit(STMMAC_DOWN, &priv->state);
4112         dev_close(priv->dev);
4113         dev_open(priv->dev, NULL);
4114         clear_bit(STMMAC_DOWN, &priv->state);
4115         clear_bit(STMMAC_RESETING, &priv->state);
4116         rtnl_unlock();
4117 }
4118
4119 static void stmmac_service_task(struct work_struct *work)
4120 {
4121         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4122                         service_task);
4123
4124         stmmac_reset_subtask(priv);
4125         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4126 }
4127
4128 /**
4129  *  stmmac_hw_init - Init the MAC device
4130  *  @priv: driver private structure
4131  *  Description: this function is to configure the MAC device according to
4132  *  some platform parameters or the HW capability register. It prepares the
4133  *  driver to use either ring or chain modes and to setup either enhanced or
4134  *  normal descriptors.
4135  */
4136 static int stmmac_hw_init(struct stmmac_priv *priv)
4137 {
4138         int ret;
4139
4140         /* dwmac-sun8i only work in chain mode */
4141         if (priv->plat->has_sun8i)
4142                 chain_mode = 1;
4143         priv->chain_mode = chain_mode;
4144
4145         /* Initialize HW Interface */
4146         ret = stmmac_hwif_init(priv);
4147         if (ret)
4148                 return ret;
4149
4150         /* Get the HW capability (new GMAC newer than 3.50a) */
4151         priv->hw_cap_support = stmmac_get_hw_features(priv);
4152         if (priv->hw_cap_support) {
4153                 dev_info(priv->device, "DMA HW capability register supported\n");
4154
4155                 /* We can override some gmac/dma configuration fields: e.g.
4156                  * enh_desc, tx_coe (e.g. that are passed through the
4157                  * platform) with the values from the HW capability
4158                  * register (if supported).
4159                  */
4160                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4161                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4162                 priv->hw->pmt = priv->plat->pmt;
4163
4164                 /* TXCOE doesn't work in thresh DMA mode */
4165                 if (priv->plat->force_thresh_dma_mode)
4166                         priv->plat->tx_coe = 0;
4167                 else
4168                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4169
4170                 /* In case of GMAC4 rx_coe is from HW cap register. */
4171                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4172
4173                 if (priv->dma_cap.rx_coe_type2)
4174                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4175                 else if (priv->dma_cap.rx_coe_type1)
4176                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4177
4178         } else {
4179                 dev_info(priv->device, "No HW DMA feature register supported\n");
4180         }
4181
4182         if (priv->plat->rx_coe) {
4183                 priv->hw->rx_csum = priv->plat->rx_coe;
4184                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4185                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4186                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4187         }
4188         if (priv->plat->tx_coe)
4189                 dev_info(priv->device, "TX Checksum insertion supported\n");
4190
4191         if (priv->plat->pmt) {
4192                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4193                 device_set_wakeup_capable(priv->device, 1);
4194         }
4195
4196         if (priv->dma_cap.tsoen)
4197                 dev_info(priv->device, "TSO supported\n");
4198
4199         /* Run HW quirks, if any */
4200         if (priv->hwif_quirks) {
4201                 ret = priv->hwif_quirks(priv);
4202                 if (ret)
4203                         return ret;
4204         }
4205
4206         /* Rx Watchdog is available in the COREs newer than the 3.40.
4207          * In some case, for example on bugged HW this feature
4208          * has to be disable and this can be done by passing the
4209          * riwt_off field from the platform.
4210          */
4211         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4212             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4213                 priv->use_riwt = 1;
4214                 dev_info(priv->device,
4215                          "Enable RX Mitigation via HW Watchdog Timer\n");
4216         }
4217
4218         return 0;
4219 }
4220
4221 /**
4222  * stmmac_dvr_probe
4223  * @device: device pointer
4224  * @plat_dat: platform data pointer
4225  * @res: stmmac resource pointer
4226  * Description: this is the main probe function used to
4227  * call the alloc_etherdev, allocate the priv structure.
4228  * Return:
4229  * returns 0 on success, otherwise errno.
4230  */
4231 int stmmac_dvr_probe(struct device *device,
4232                      struct plat_stmmacenet_data *plat_dat,
4233                      struct stmmac_resources *res)
4234 {
4235         struct net_device *ndev = NULL;
4236         struct stmmac_priv *priv;
4237         u32 queue, maxq;
4238         int ret = 0;
4239
4240         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4241                                   MTL_MAX_TX_QUEUES,
4242                                   MTL_MAX_RX_QUEUES);
4243         if (!ndev)
4244                 return -ENOMEM;
4245
4246         SET_NETDEV_DEV(ndev, device);
4247
4248         priv = netdev_priv(ndev);
4249         priv->device = device;
4250         priv->dev = ndev;
4251
4252         stmmac_set_ethtool_ops(ndev);
4253         priv->pause = pause;
4254         priv->plat = plat_dat;
4255         priv->ioaddr = res->addr;
4256         priv->dev->base_addr = (unsigned long)res->addr;
4257
4258         priv->dev->irq = res->irq;
4259         priv->wol_irq = res->wol_irq;
4260         priv->lpi_irq = res->lpi_irq;
4261
4262         if (!IS_ERR_OR_NULL(res->mac))
4263                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4264
4265         dev_set_drvdata(device, priv->dev);
4266
4267         /* Verify driver arguments */
4268         stmmac_verify_args();
4269
4270         /* Allocate workqueue */
4271         priv->wq = create_singlethread_workqueue("stmmac_wq");
4272         if (!priv->wq) {
4273                 dev_err(priv->device, "failed to create workqueue\n");
4274                 ret = -ENOMEM;
4275                 goto error_wq;
4276         }
4277
4278         INIT_WORK(&priv->service_task, stmmac_service_task);
4279
4280         /* Override with kernel parameters if supplied XXX CRS XXX
4281          * this needs to have multiple instances
4282          */
4283         if ((phyaddr >= 0) && (phyaddr <= 31))
4284                 priv->plat->phy_addr = phyaddr;
4285
4286         if (priv->plat->stmmac_rst) {
4287                 ret = reset_control_assert(priv->plat->stmmac_rst);
4288                 reset_control_deassert(priv->plat->stmmac_rst);
4289                 /* Some reset controllers have only reset callback instead of
4290                  * assert + deassert callbacks pair.
4291                  */
4292                 if (ret == -ENOTSUPP)
4293                         reset_control_reset(priv->plat->stmmac_rst);
4294         }
4295
4296         /* Init MAC and get the capabilities */
4297         ret = stmmac_hw_init(priv);
4298         if (ret)
4299                 goto error_hw_init;
4300
4301         stmmac_check_ether_addr(priv);
4302
4303         /* Configure real RX and TX queues */
4304         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4305         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4306
4307         ndev->netdev_ops = &stmmac_netdev_ops;
4308
4309         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4310                             NETIF_F_RXCSUM;
4311
4312         ret = stmmac_tc_init(priv, priv);
4313         if (!ret) {
4314                 ndev->hw_features |= NETIF_F_HW_TC;
4315         }
4316
4317         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4318                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4319                 priv->tso = true;
4320                 dev_info(priv->device, "TSO feature enabled\n");
4321         }
4322         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4323         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4324 #ifdef STMMAC_VLAN_TAG_USED
4325         /* Both mac100 and gmac support receive VLAN tag detection */
4326         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4327 #endif
4328         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4329
4330         /* MTU range: 46 - hw-specific max */
4331         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4332         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4333                 ndev->max_mtu = JUMBO_LEN;
4334         else if (priv->plat->has_xgmac)
4335                 ndev->max_mtu = XGMAC_JUMBO_LEN;
4336         else
4337                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4338         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4339          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4340          */
4341         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4342             (priv->plat->maxmtu >= ndev->min_mtu))
4343                 ndev->max_mtu = priv->plat->maxmtu;
4344         else if (priv->plat->maxmtu < ndev->min_mtu)
4345                 dev_warn(priv->device,
4346                          "%s: warning: maxmtu having invalid value (%d)\n",
4347                          __func__, priv->plat->maxmtu);
4348
4349         if (flow_ctrl)
4350                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4351
4352         /* Setup channels NAPI */
4353         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4354
4355         for (queue = 0; queue < maxq; queue++) {
4356                 struct stmmac_channel *ch = &priv->channel[queue];
4357
4358                 ch->priv_data = priv;
4359                 ch->index = queue;
4360
4361                 if (queue < priv->plat->rx_queues_to_use) {
4362                         netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
4363                                        NAPI_POLL_WEIGHT);
4364                 }
4365                 if (queue < priv->plat->tx_queues_to_use) {
4366                         netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
4367                                        NAPI_POLL_WEIGHT);
4368                 }
4369         }
4370
4371         mutex_init(&priv->lock);
4372
4373         /* If a specific clk_csr value is passed from the platform
4374          * this means that the CSR Clock Range selection cannot be
4375          * changed at run-time and it is fixed. Viceversa the driver'll try to
4376          * set the MDC clock dynamically according to the csr actual
4377          * clock input.
4378          */
4379         if (priv->plat->clk_csr >= 0)
4380                 priv->clk_csr = priv->plat->clk_csr;
4381         else
4382                 stmmac_clk_csr_set(priv);
4383
4384         stmmac_check_pcs_mode(priv);
4385
4386         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4387             priv->hw->pcs != STMMAC_PCS_TBI &&
4388             priv->hw->pcs != STMMAC_PCS_RTBI) {
4389                 /* MDIO bus Registration */
4390                 ret = stmmac_mdio_register(ndev);
4391                 if (ret < 0) {
4392                         dev_err(priv->device,
4393                                 "%s: MDIO bus (id: %d) registration failed",
4394                                 __func__, priv->plat->bus_id);
4395                         goto error_mdio_register;
4396                 }
4397         }
4398
4399         ret = register_netdev(ndev);
4400         if (ret) {
4401                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4402                         __func__, ret);
4403                 goto error_netdev_register;
4404         }
4405
4406 #ifdef CONFIG_DEBUG_FS
4407         ret = stmmac_init_fs(ndev);
4408         if (ret < 0)
4409                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4410                             __func__);
4411 #endif
4412
4413         return ret;
4414
4415 error_netdev_register:
4416         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4417             priv->hw->pcs != STMMAC_PCS_TBI &&
4418             priv->hw->pcs != STMMAC_PCS_RTBI)
4419                 stmmac_mdio_unregister(ndev);
4420 error_mdio_register:
4421         for (queue = 0; queue < maxq; queue++) {
4422                 struct stmmac_channel *ch = &priv->channel[queue];
4423
4424                 if (queue < priv->plat->rx_queues_to_use)
4425                         netif_napi_del(&ch->rx_napi);
4426                 if (queue < priv->plat->tx_queues_to_use)
4427                         netif_napi_del(&ch->tx_napi);
4428         }
4429 error_hw_init:
4430         destroy_workqueue(priv->wq);
4431 error_wq:
4432         free_netdev(ndev);
4433
4434         return ret;
4435 }
4436 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4437
4438 /**
4439  * stmmac_dvr_remove
4440  * @dev: device pointer
4441  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4442  * changes the link status, releases the DMA descriptor rings.
4443  */
4444 int stmmac_dvr_remove(struct device *dev)
4445 {
4446         struct net_device *ndev = dev_get_drvdata(dev);
4447         struct stmmac_priv *priv = netdev_priv(ndev);
4448
4449         netdev_info(priv->dev, "%s: removing driver", __func__);
4450
4451 #ifdef CONFIG_DEBUG_FS
4452         stmmac_exit_fs(ndev);
4453 #endif
4454         stmmac_stop_all_dma(priv);
4455
4456         stmmac_mac_set(priv, priv->ioaddr, false);
4457         netif_carrier_off(ndev);
4458         unregister_netdev(ndev);
4459         if (priv->plat->stmmac_rst)
4460                 reset_control_assert(priv->plat->stmmac_rst);
4461         clk_disable_unprepare(priv->plat->pclk);
4462         clk_disable_unprepare(priv->plat->stmmac_clk);
4463         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4464             priv->hw->pcs != STMMAC_PCS_TBI &&
4465             priv->hw->pcs != STMMAC_PCS_RTBI)
4466                 stmmac_mdio_unregister(ndev);
4467         destroy_workqueue(priv->wq);
4468         mutex_destroy(&priv->lock);
4469         free_netdev(ndev);
4470
4471         return 0;
4472 }
4473 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4474
4475 /**
4476  * stmmac_suspend - suspend callback
4477  * @dev: device pointer
4478  * Description: this is the function to suspend the device and it is called
4479  * by the platform driver to stop the network queue, release the resources,
4480  * program the PMT register (for WoL), clean and release driver resources.
4481  */
4482 int stmmac_suspend(struct device *dev)
4483 {
4484         struct net_device *ndev = dev_get_drvdata(dev);
4485         struct stmmac_priv *priv = netdev_priv(ndev);
4486
4487         if (!ndev || !netif_running(ndev))
4488                 return 0;
4489
4490         if (ndev->phydev)
4491                 phy_stop(ndev->phydev);
4492
4493         mutex_lock(&priv->lock);
4494
4495         netif_device_detach(ndev);
4496         stmmac_stop_all_queues(priv);
4497
4498         stmmac_disable_all_queues(priv);
4499
4500         /* Stop TX/RX DMA */
4501         stmmac_stop_all_dma(priv);
4502
4503         /* Enable Power down mode by programming the PMT regs */
4504         if (device_may_wakeup(priv->device)) {
4505                 stmmac_pmt(priv, priv->hw, priv->wolopts);
4506                 priv->irq_wake = 1;
4507         } else {
4508                 stmmac_mac_set(priv, priv->ioaddr, false);
4509                 pinctrl_pm_select_sleep_state(priv->device);
4510                 /* Disable clock in case of PWM is off */
4511                 clk_disable(priv->plat->pclk);
4512                 clk_disable(priv->plat->stmmac_clk);
4513         }
4514         mutex_unlock(&priv->lock);
4515
4516         priv->oldlink = false;
4517         priv->speed = SPEED_UNKNOWN;
4518         priv->oldduplex = DUPLEX_UNKNOWN;
4519         return 0;
4520 }
4521 EXPORT_SYMBOL_GPL(stmmac_suspend);
4522
4523 /**
4524  * stmmac_reset_queues_param - reset queue parameters
4525  * @dev: device pointer
4526  */
4527 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4528 {
4529         u32 rx_cnt = priv->plat->rx_queues_to_use;
4530         u32 tx_cnt = priv->plat->tx_queues_to_use;
4531         u32 queue;
4532
4533         for (queue = 0; queue < rx_cnt; queue++) {
4534                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4535
4536                 rx_q->cur_rx = 0;
4537                 rx_q->dirty_rx = 0;
4538         }
4539
4540         for (queue = 0; queue < tx_cnt; queue++) {
4541                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4542
4543                 tx_q->cur_tx = 0;
4544                 tx_q->dirty_tx = 0;
4545                 tx_q->mss = 0;
4546         }
4547 }
4548
4549 /**
4550  * stmmac_resume - resume callback
4551  * @dev: device pointer
4552  * Description: when resume this function is invoked to setup the DMA and CORE
4553  * in a usable state.
4554  */
4555 int stmmac_resume(struct device *dev)
4556 {
4557         struct net_device *ndev = dev_get_drvdata(dev);
4558         struct stmmac_priv *priv = netdev_priv(ndev);
4559
4560         if (!netif_running(ndev))
4561                 return 0;
4562
4563         /* Power Down bit, into the PM register, is cleared
4564          * automatically as soon as a magic packet or a Wake-up frame
4565          * is received. Anyway, it's better to manually clear
4566          * this bit because it can generate problems while resuming
4567          * from another devices (e.g. serial console).
4568          */
4569         if (device_may_wakeup(priv->device)) {
4570                 mutex_lock(&priv->lock);
4571                 stmmac_pmt(priv, priv->hw, 0);
4572                 mutex_unlock(&priv->lock);
4573                 priv->irq_wake = 0;
4574         } else {
4575                 pinctrl_pm_select_default_state(priv->device);
4576                 /* enable the clk previously disabled */
4577                 clk_enable(priv->plat->stmmac_clk);
4578                 clk_enable(priv->plat->pclk);
4579                 /* reset the phy so that it's ready */
4580                 if (priv->mii)
4581                         stmmac_mdio_reset(priv->mii);
4582         }
4583
4584         netif_device_attach(ndev);
4585
4586         mutex_lock(&priv->lock);
4587
4588         stmmac_reset_queues_param(priv);
4589
4590         stmmac_clear_descriptors(priv);
4591
4592         stmmac_hw_setup(ndev, false);
4593         stmmac_init_tx_coalesce(priv);
4594         stmmac_set_rx_mode(ndev);
4595
4596         stmmac_enable_all_queues(priv);
4597
4598         stmmac_start_all_queues(priv);
4599
4600         mutex_unlock(&priv->lock);
4601
4602         if (ndev->phydev)
4603                 phy_start(ndev->phydev);
4604
4605         return 0;
4606 }
4607 EXPORT_SYMBOL_GPL(stmmac_resume);
4608
4609 #ifndef MODULE
4610 static int __init stmmac_cmdline_opt(char *str)
4611 {
4612         char *opt;
4613
4614         if (!str || !*str)
4615                 return -EINVAL;
4616         while ((opt = strsep(&str, ",")) != NULL) {
4617                 if (!strncmp(opt, "debug:", 6)) {
4618                         if (kstrtoint(opt + 6, 0, &debug))
4619                                 goto err;
4620                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4621                         if (kstrtoint(opt + 8, 0, &phyaddr))
4622                                 goto err;
4623                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4624                         if (kstrtoint(opt + 7, 0, &buf_sz))
4625                                 goto err;
4626                 } else if (!strncmp(opt, "tc:", 3)) {
4627                         if (kstrtoint(opt + 3, 0, &tc))
4628                                 goto err;
4629                 } else if (!strncmp(opt, "watchdog:", 9)) {
4630                         if (kstrtoint(opt + 9, 0, &watchdog))
4631                                 goto err;
4632                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4633                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4634                                 goto err;
4635                 } else if (!strncmp(opt, "pause:", 6)) {
4636                         if (kstrtoint(opt + 6, 0, &pause))
4637                                 goto err;
4638                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4639                         if (kstrtoint(opt + 10, 0, &eee_timer))
4640                                 goto err;
4641                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4642                         if (kstrtoint(opt + 11, 0, &chain_mode))
4643                                 goto err;
4644                 }
4645         }
4646         return 0;
4647
4648 err:
4649         pr_err("%s: ERROR broken module parameter conversion", __func__);
4650         return -EINVAL;
4651 }
4652
4653 __setup("stmmaceth=", stmmac_cmdline_opt);
4654 #endif /* MODULE */
4655
4656 static int __init stmmac_init(void)
4657 {
4658 #ifdef CONFIG_DEBUG_FS
4659         /* Create debugfs main directory if it doesn't exist yet */
4660         if (!stmmac_fs_dir) {
4661                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4662
4663                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4664                         pr_err("ERROR %s, debugfs create directory failed\n",
4665                                STMMAC_RESOURCE_NAME);
4666
4667                         return -ENOMEM;
4668                 }
4669         }
4670 #endif
4671
4672         return 0;
4673 }
4674
4675 static void __exit stmmac_exit(void)
4676 {
4677 #ifdef CONFIG_DEBUG_FS
4678         debugfs_remove_recursive(stmmac_fs_dir);
4679 #endif
4680 }
4681
4682 module_init(stmmac_init)
4683 module_exit(stmmac_exit)
4684
4685 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4686 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4687 MODULE_LICENSE("GPL");