Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
[linux-2.6-microblaze.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56
57 #define STMMAC_ALIGN(x)         __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
59
60 /* Module parameters */
61 #define TX_TIMEO        5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73
74 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
76
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89
90 #define DEFAULT_BUFSIZE 1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94
95 #define STMMAC_RX_COPYBREAK     256
96
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
99                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100
101 #define STMMAC_DEFAULT_LPI_TIMER        1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115
116 #ifdef CONFIG_DEBUG_FS
117 static int stmmac_init_fs(struct net_device *dev);
118 static void stmmac_exit_fs(struct net_device *dev);
119 #endif
120
121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122
123 /**
124  * stmmac_verify_args - verify the driver parameters.
125  * Description: it checks the driver parameters and set a default in case of
126  * errors.
127  */
128 static void stmmac_verify_args(void)
129 {
130         if (unlikely(watchdog < 0))
131                 watchdog = TX_TIMEO;
132         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133                 buf_sz = DEFAULT_BUFSIZE;
134         if (unlikely(flow_ctrl > 1))
135                 flow_ctrl = FLOW_AUTO;
136         else if (likely(flow_ctrl < 0))
137                 flow_ctrl = FLOW_OFF;
138         if (unlikely((pause < 0) || (pause > 0xffff)))
139                 pause = PAUSE_TIME;
140         if (eee_timer < 0)
141                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 }
143
144 /**
145  * stmmac_disable_all_queues - Disable all queues
146  * @priv: driver private structure
147  */
148 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149 {
150         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
153         u32 queue;
154
155         for (queue = 0; queue < maxq; queue++) {
156                 struct stmmac_channel *ch = &priv->channel[queue];
157
158                 napi_disable(&ch->napi);
159         }
160 }
161
162 /**
163  * stmmac_enable_all_queues - Enable all queues
164  * @priv: driver private structure
165  */
166 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
167 {
168         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
169         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
170         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
171         u32 queue;
172
173         for (queue = 0; queue < maxq; queue++) {
174                 struct stmmac_channel *ch = &priv->channel[queue];
175
176                 napi_enable(&ch->napi);
177         }
178 }
179
180 /**
181  * stmmac_stop_all_queues - Stop all queues
182  * @priv: driver private structure
183  */
184 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
185 {
186         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
187         u32 queue;
188
189         for (queue = 0; queue < tx_queues_cnt; queue++)
190                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
191 }
192
193 /**
194  * stmmac_start_all_queues - Start all queues
195  * @priv: driver private structure
196  */
197 static void stmmac_start_all_queues(struct stmmac_priv *priv)
198 {
199         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
200         u32 queue;
201
202         for (queue = 0; queue < tx_queues_cnt; queue++)
203                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
204 }
205
206 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
207 {
208         if (!test_bit(STMMAC_DOWN, &priv->state) &&
209             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
210                 queue_work(priv->wq, &priv->service_task);
211 }
212
213 static void stmmac_global_err(struct stmmac_priv *priv)
214 {
215         netif_carrier_off(priv->dev);
216         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
217         stmmac_service_event_schedule(priv);
218 }
219
220 /**
221  * stmmac_clk_csr_set - dynamically set the MDC clock
222  * @priv: driver private structure
223  * Description: this is to dynamically set the MDC clock according to the csr
224  * clock input.
225  * Note:
226  *      If a specific clk_csr value is passed from the platform
227  *      this means that the CSR Clock Range selection cannot be
228  *      changed at run-time and it is fixed (as reported in the driver
229  *      documentation). Viceversa the driver will try to set the MDC
230  *      clock dynamically according to the actual clock input.
231  */
232 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
233 {
234         u32 clk_rate;
235
236         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
237
238         /* Platform provided default clk_csr would be assumed valid
239          * for all other cases except for the below mentioned ones.
240          * For values higher than the IEEE 802.3 specified frequency
241          * we can not estimate the proper divider as it is not known
242          * the frequency of clk_csr_i. So we do not change the default
243          * divider.
244          */
245         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
246                 if (clk_rate < CSR_F_35M)
247                         priv->clk_csr = STMMAC_CSR_20_35M;
248                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
249                         priv->clk_csr = STMMAC_CSR_35_60M;
250                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
251                         priv->clk_csr = STMMAC_CSR_60_100M;
252                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
253                         priv->clk_csr = STMMAC_CSR_100_150M;
254                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
255                         priv->clk_csr = STMMAC_CSR_150_250M;
256                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
257                         priv->clk_csr = STMMAC_CSR_250_300M;
258         }
259
260         if (priv->plat->has_sun8i) {
261                 if (clk_rate > 160000000)
262                         priv->clk_csr = 0x03;
263                 else if (clk_rate > 80000000)
264                         priv->clk_csr = 0x02;
265                 else if (clk_rate > 40000000)
266                         priv->clk_csr = 0x01;
267                 else
268                         priv->clk_csr = 0;
269         }
270
271         if (priv->plat->has_xgmac) {
272                 if (clk_rate > 400000000)
273                         priv->clk_csr = 0x5;
274                 else if (clk_rate > 350000000)
275                         priv->clk_csr = 0x4;
276                 else if (clk_rate > 300000000)
277                         priv->clk_csr = 0x3;
278                 else if (clk_rate > 250000000)
279                         priv->clk_csr = 0x2;
280                 else if (clk_rate > 150000000)
281                         priv->clk_csr = 0x1;
282                 else
283                         priv->clk_csr = 0x0;
284         }
285 }
286
287 static void print_pkt(unsigned char *buf, int len)
288 {
289         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
290         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
291 }
292
293 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
294 {
295         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
296         u32 avail;
297
298         if (tx_q->dirty_tx > tx_q->cur_tx)
299                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
300         else
301                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
302
303         return avail;
304 }
305
306 /**
307  * stmmac_rx_dirty - Get RX queue dirty
308  * @priv: driver private structure
309  * @queue: RX queue index
310  */
311 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
312 {
313         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
314         u32 dirty;
315
316         if (rx_q->dirty_rx <= rx_q->cur_rx)
317                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
318         else
319                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
320
321         return dirty;
322 }
323
324 /**
325  * stmmac_hw_fix_mac_speed - callback for speed selection
326  * @priv: driver private structure
327  * Description: on some platforms (e.g. ST), some HW system configuration
328  * registers have to be set according to the link speed negotiated.
329  */
330 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
331 {
332         struct net_device *ndev = priv->dev;
333         struct phy_device *phydev = ndev->phydev;
334
335         if (likely(priv->plat->fix_mac_speed))
336                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
337 }
338
339 /**
340  * stmmac_enable_eee_mode - check and enter in LPI mode
341  * @priv: driver private structure
342  * Description: this function is to verify and enter in LPI mode in case of
343  * EEE.
344  */
345 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
346 {
347         u32 tx_cnt = priv->plat->tx_queues_to_use;
348         u32 queue;
349
350         /* check if all TX queues have the work finished */
351         for (queue = 0; queue < tx_cnt; queue++) {
352                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
353
354                 if (tx_q->dirty_tx != tx_q->cur_tx)
355                         return; /* still unfinished work */
356         }
357
358         /* Check and enter in LPI mode */
359         if (!priv->tx_path_in_lpi_mode)
360                 stmmac_set_eee_mode(priv, priv->hw,
361                                 priv->plat->en_tx_lpi_clockgating);
362 }
363
364 /**
365  * stmmac_disable_eee_mode - disable and exit from LPI mode
366  * @priv: driver private structure
367  * Description: this function is to exit and disable EEE in case of
368  * LPI state is true. This is called by the xmit.
369  */
370 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
371 {
372         stmmac_reset_eee_mode(priv, priv->hw);
373         del_timer_sync(&priv->eee_ctrl_timer);
374         priv->tx_path_in_lpi_mode = false;
375 }
376
377 /**
378  * stmmac_eee_ctrl_timer - EEE TX SW timer.
379  * @arg : data hook
380  * Description:
381  *  if there is no data transfer and if we are not in LPI state,
382  *  then MAC Transmitter can be moved to LPI state.
383  */
384 static void stmmac_eee_ctrl_timer(struct timer_list *t)
385 {
386         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
387
388         stmmac_enable_eee_mode(priv);
389         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
390 }
391
392 /**
393  * stmmac_eee_init - init EEE
394  * @priv: driver private structure
395  * Description:
396  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
397  *  can also manage EEE, this function enable the LPI state and start related
398  *  timer.
399  */
400 bool stmmac_eee_init(struct stmmac_priv *priv)
401 {
402         struct net_device *ndev = priv->dev;
403         int interface = priv->plat->interface;
404         bool ret = false;
405
406         if ((interface != PHY_INTERFACE_MODE_MII) &&
407             (interface != PHY_INTERFACE_MODE_GMII) &&
408             !phy_interface_mode_is_rgmii(interface))
409                 goto out;
410
411         /* Using PCS we cannot dial with the phy registers at this stage
412          * so we do not support extra feature like EEE.
413          */
414         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
415             (priv->hw->pcs == STMMAC_PCS_TBI) ||
416             (priv->hw->pcs == STMMAC_PCS_RTBI))
417                 goto out;
418
419         /* MAC core supports the EEE feature. */
420         if (priv->dma_cap.eee) {
421                 int tx_lpi_timer = priv->tx_lpi_timer;
422
423                 /* Check if the PHY supports EEE */
424                 if (phy_init_eee(ndev->phydev, 1)) {
425                         /* To manage at run-time if the EEE cannot be supported
426                          * anymore (for example because the lp caps have been
427                          * changed).
428                          * In that case the driver disable own timers.
429                          */
430                         mutex_lock(&priv->lock);
431                         if (priv->eee_active) {
432                                 netdev_dbg(priv->dev, "disable EEE\n");
433                                 del_timer_sync(&priv->eee_ctrl_timer);
434                                 stmmac_set_eee_timer(priv, priv->hw, 0,
435                                                 tx_lpi_timer);
436                         }
437                         priv->eee_active = 0;
438                         mutex_unlock(&priv->lock);
439                         goto out;
440                 }
441                 /* Activate the EEE and start timers */
442                 mutex_lock(&priv->lock);
443                 if (!priv->eee_active) {
444                         priv->eee_active = 1;
445                         timer_setup(&priv->eee_ctrl_timer,
446                                     stmmac_eee_ctrl_timer, 0);
447                         mod_timer(&priv->eee_ctrl_timer,
448                                   STMMAC_LPI_T(eee_timer));
449
450                         stmmac_set_eee_timer(priv, priv->hw,
451                                         STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
452                 }
453                 /* Set HW EEE according to the speed */
454                 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
455
456                 ret = true;
457                 mutex_unlock(&priv->lock);
458
459                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
460         }
461 out:
462         return ret;
463 }
464
465 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
466  * @priv: driver private structure
467  * @p : descriptor pointer
468  * @skb : the socket buffer
469  * Description :
470  * This function will read timestamp from the descriptor & pass it to stack.
471  * and also perform some sanity checks.
472  */
473 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
474                                    struct dma_desc *p, struct sk_buff *skb)
475 {
476         struct skb_shared_hwtstamps shhwtstamp;
477         u64 ns;
478
479         if (!priv->hwts_tx_en)
480                 return;
481
482         /* exit if skb doesn't support hw tstamp */
483         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
484                 return;
485
486         /* check tx tstamp status */
487         if (stmmac_get_tx_timestamp_status(priv, p)) {
488                 /* get the valid tstamp */
489                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
490
491                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
493
494                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
495                 /* pass tstamp to stack */
496                 skb_tstamp_tx(skb, &shhwtstamp);
497         }
498
499         return;
500 }
501
502 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
503  * @priv: driver private structure
504  * @p : descriptor pointer
505  * @np : next descriptor pointer
506  * @skb : the socket buffer
507  * Description :
508  * This function will read received packet's timestamp from the descriptor
509  * and pass it to stack. It also perform some sanity checks.
510  */
511 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
512                                    struct dma_desc *np, struct sk_buff *skb)
513 {
514         struct skb_shared_hwtstamps *shhwtstamp = NULL;
515         struct dma_desc *desc = p;
516         u64 ns;
517
518         if (!priv->hwts_rx_en)
519                 return;
520         /* For GMAC4, the valid timestamp is from CTX next desc. */
521         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
522                 desc = np;
523
524         /* Check if timestamp is available */
525         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
526                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
527                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
528                 shhwtstamp = skb_hwtstamps(skb);
529                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
530                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
531         } else  {
532                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
533         }
534 }
535
536 /**
537  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
538  *  @dev: device pointer.
539  *  @ifr: An IOCTL specific structure, that can contain a pointer to
540  *  a proprietary structure used to pass information to the driver.
541  *  Description:
542  *  This function configures the MAC to enable/disable both outgoing(TX)
543  *  and incoming(RX) packets time stamping based on user input.
544  *  Return Value:
545  *  0 on success and an appropriate -ve integer on failure.
546  */
547 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
548 {
549         struct stmmac_priv *priv = netdev_priv(dev);
550         struct hwtstamp_config config;
551         struct timespec64 now;
552         u64 temp = 0;
553         u32 ptp_v2 = 0;
554         u32 tstamp_all = 0;
555         u32 ptp_over_ipv4_udp = 0;
556         u32 ptp_over_ipv6_udp = 0;
557         u32 ptp_over_ethernet = 0;
558         u32 snap_type_sel = 0;
559         u32 ts_master_en = 0;
560         u32 ts_event_en = 0;
561         u32 value = 0;
562         u32 sec_inc;
563         bool xmac;
564
565         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
566
567         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
568                 netdev_alert(priv->dev, "No support for HW time stamping\n");
569                 priv->hwts_tx_en = 0;
570                 priv->hwts_rx_en = 0;
571
572                 return -EOPNOTSUPP;
573         }
574
575         if (copy_from_user(&config, ifr->ifr_data,
576                            sizeof(struct hwtstamp_config)))
577                 return -EFAULT;
578
579         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
580                    __func__, config.flags, config.tx_type, config.rx_filter);
581
582         /* reserved for future extensions */
583         if (config.flags)
584                 return -EINVAL;
585
586         if (config.tx_type != HWTSTAMP_TX_OFF &&
587             config.tx_type != HWTSTAMP_TX_ON)
588                 return -ERANGE;
589
590         if (priv->adv_ts) {
591                 switch (config.rx_filter) {
592                 case HWTSTAMP_FILTER_NONE:
593                         /* time stamp no incoming packet at all */
594                         config.rx_filter = HWTSTAMP_FILTER_NONE;
595                         break;
596
597                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
598                         /* PTP v1, UDP, any kind of event packet */
599                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
600                         /* take time stamp for all event messages */
601                         if (xmac)
602                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
603                         else
604                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
605
606                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
607                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
608                         break;
609
610                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
611                         /* PTP v1, UDP, Sync packet */
612                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
613                         /* take time stamp for SYNC messages only */
614                         ts_event_en = PTP_TCR_TSEVNTENA;
615
616                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
617                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
618                         break;
619
620                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
621                         /* PTP v1, UDP, Delay_req packet */
622                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
623                         /* take time stamp for Delay_Req messages only */
624                         ts_master_en = PTP_TCR_TSMSTRENA;
625                         ts_event_en = PTP_TCR_TSEVNTENA;
626
627                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
628                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
629                         break;
630
631                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
632                         /* PTP v2, UDP, any kind of event packet */
633                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
634                         ptp_v2 = PTP_TCR_TSVER2ENA;
635                         /* take time stamp for all event messages */
636                         if (xmac)
637                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
638                         else
639                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
640
641                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643                         break;
644
645                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
646                         /* PTP v2, UDP, Sync packet */
647                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
648                         ptp_v2 = PTP_TCR_TSVER2ENA;
649                         /* take time stamp for SYNC messages only */
650                         ts_event_en = PTP_TCR_TSEVNTENA;
651
652                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
653                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
654                         break;
655
656                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
657                         /* PTP v2, UDP, Delay_req packet */
658                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
659                         ptp_v2 = PTP_TCR_TSVER2ENA;
660                         /* take time stamp for Delay_Req messages only */
661                         ts_master_en = PTP_TCR_TSMSTRENA;
662                         ts_event_en = PTP_TCR_TSEVNTENA;
663
664                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
665                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
666                         break;
667
668                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
669                         /* PTP v2/802.AS1 any layer, any kind of event packet */
670                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
671                         ptp_v2 = PTP_TCR_TSVER2ENA;
672                         /* take time stamp for all event messages */
673                         if (xmac)
674                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
675                         else
676                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
677
678                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
679                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
680                         ptp_over_ethernet = PTP_TCR_TSIPENA;
681                         break;
682
683                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
684                         /* PTP v2/802.AS1, any layer, Sync packet */
685                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
686                         ptp_v2 = PTP_TCR_TSVER2ENA;
687                         /* take time stamp for SYNC messages only */
688                         ts_event_en = PTP_TCR_TSEVNTENA;
689
690                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
691                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
692                         ptp_over_ethernet = PTP_TCR_TSIPENA;
693                         break;
694
695                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
696                         /* PTP v2/802.AS1, any layer, Delay_req packet */
697                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
698                         ptp_v2 = PTP_TCR_TSVER2ENA;
699                         /* take time stamp for Delay_Req messages only */
700                         ts_master_en = PTP_TCR_TSMSTRENA;
701                         ts_event_en = PTP_TCR_TSEVNTENA;
702
703                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
704                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
705                         ptp_over_ethernet = PTP_TCR_TSIPENA;
706                         break;
707
708                 case HWTSTAMP_FILTER_NTP_ALL:
709                 case HWTSTAMP_FILTER_ALL:
710                         /* time stamp any incoming packet */
711                         config.rx_filter = HWTSTAMP_FILTER_ALL;
712                         tstamp_all = PTP_TCR_TSENALL;
713                         break;
714
715                 default:
716                         return -ERANGE;
717                 }
718         } else {
719                 switch (config.rx_filter) {
720                 case HWTSTAMP_FILTER_NONE:
721                         config.rx_filter = HWTSTAMP_FILTER_NONE;
722                         break;
723                 default:
724                         /* PTP v1, UDP, any kind of event packet */
725                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
726                         break;
727                 }
728         }
729         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
730         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
731
732         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
733                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
734         else {
735                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
736                          tstamp_all | ptp_v2 | ptp_over_ethernet |
737                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
738                          ts_master_en | snap_type_sel);
739                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
740
741                 /* program Sub Second Increment reg */
742                 stmmac_config_sub_second_increment(priv,
743                                 priv->ptpaddr, priv->plat->clk_ptp_rate,
744                                 xmac, &sec_inc);
745                 temp = div_u64(1000000000ULL, sec_inc);
746
747                 /* Store sub second increment and flags for later use */
748                 priv->sub_second_inc = sec_inc;
749                 priv->systime_flags = value;
750
751                 /* calculate default added value:
752                  * formula is :
753                  * addend = (2^32)/freq_div_ratio;
754                  * where, freq_div_ratio = 1e9ns/sec_inc
755                  */
756                 temp = (u64)(temp << 32);
757                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
758                 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
759
760                 /* initialize system time */
761                 ktime_get_real_ts64(&now);
762
763                 /* lower 32 bits of tv_sec are safe until y2106 */
764                 stmmac_init_systime(priv, priv->ptpaddr,
765                                 (u32)now.tv_sec, now.tv_nsec);
766         }
767
768         return copy_to_user(ifr->ifr_data, &config,
769                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
770 }
771
772 /**
773  * stmmac_init_ptp - init PTP
774  * @priv: driver private structure
775  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
776  * This is done by looking at the HW cap. register.
777  * This function also registers the ptp driver.
778  */
779 static int stmmac_init_ptp(struct stmmac_priv *priv)
780 {
781         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
782
783         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
784                 return -EOPNOTSUPP;
785
786         priv->adv_ts = 0;
787         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
788         if (xmac && priv->dma_cap.atime_stamp)
789                 priv->adv_ts = 1;
790         /* Dwmac 3.x core with extend_desc can support adv_ts */
791         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
792                 priv->adv_ts = 1;
793
794         if (priv->dma_cap.time_stamp)
795                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
796
797         if (priv->adv_ts)
798                 netdev_info(priv->dev,
799                             "IEEE 1588-2008 Advanced Timestamp supported\n");
800
801         priv->hwts_tx_en = 0;
802         priv->hwts_rx_en = 0;
803
804         stmmac_ptp_register(priv);
805
806         return 0;
807 }
808
809 static void stmmac_release_ptp(struct stmmac_priv *priv)
810 {
811         if (priv->plat->clk_ptp_ref)
812                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
813         stmmac_ptp_unregister(priv);
814 }
815
816 /**
817  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
818  *  @priv: driver private structure
819  *  Description: It is used for configuring the flow control in all queues
820  */
821 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
822 {
823         u32 tx_cnt = priv->plat->tx_queues_to_use;
824
825         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
826                         priv->pause, tx_cnt);
827 }
828
829 /**
830  * stmmac_adjust_link - adjusts the link parameters
831  * @dev: net device structure
832  * Description: this is the helper called by the physical abstraction layer
833  * drivers to communicate the phy link status. According the speed and duplex
834  * this driver can invoke registered glue-logic as well.
835  * It also invoke the eee initialization because it could happen when switch
836  * on different networks (that are eee capable).
837  */
838 static void stmmac_adjust_link(struct net_device *dev)
839 {
840         struct stmmac_priv *priv = netdev_priv(dev);
841         struct phy_device *phydev = dev->phydev;
842         bool new_state = false;
843
844         if (!phydev)
845                 return;
846
847         mutex_lock(&priv->lock);
848
849         if (phydev->link) {
850                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
851
852                 /* Now we make sure that we can be in full duplex mode.
853                  * If not, we operate in half-duplex mode. */
854                 if (phydev->duplex != priv->oldduplex) {
855                         new_state = true;
856                         if (!phydev->duplex)
857                                 ctrl &= ~priv->hw->link.duplex;
858                         else
859                                 ctrl |= priv->hw->link.duplex;
860                         priv->oldduplex = phydev->duplex;
861                 }
862                 /* Flow Control operation */
863                 if (phydev->pause)
864                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
865
866                 if (phydev->speed != priv->speed) {
867                         new_state = true;
868                         ctrl &= ~priv->hw->link.speed_mask;
869                         switch (phydev->speed) {
870                         case SPEED_1000:
871                                 ctrl |= priv->hw->link.speed1000;
872                                 break;
873                         case SPEED_100:
874                                 ctrl |= priv->hw->link.speed100;
875                                 break;
876                         case SPEED_10:
877                                 ctrl |= priv->hw->link.speed10;
878                                 break;
879                         default:
880                                 netif_warn(priv, link, priv->dev,
881                                            "broken speed: %d\n", phydev->speed);
882                                 phydev->speed = SPEED_UNKNOWN;
883                                 break;
884                         }
885                         if (phydev->speed != SPEED_UNKNOWN)
886                                 stmmac_hw_fix_mac_speed(priv);
887                         priv->speed = phydev->speed;
888                 }
889
890                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
891
892                 if (!priv->oldlink) {
893                         new_state = true;
894                         priv->oldlink = true;
895                 }
896         } else if (priv->oldlink) {
897                 new_state = true;
898                 priv->oldlink = false;
899                 priv->speed = SPEED_UNKNOWN;
900                 priv->oldduplex = DUPLEX_UNKNOWN;
901         }
902
903         if (new_state && netif_msg_link(priv))
904                 phy_print_status(phydev);
905
906         mutex_unlock(&priv->lock);
907
908         if (phydev->is_pseudo_fixed_link)
909                 /* Stop PHY layer to call the hook to adjust the link in case
910                  * of a switch is attached to the stmmac driver.
911                  */
912                 phydev->irq = PHY_IGNORE_INTERRUPT;
913         else
914                 /* At this stage, init the EEE if supported.
915                  * Never called in case of fixed_link.
916                  */
917                 priv->eee_enabled = stmmac_eee_init(priv);
918 }
919
920 /**
921  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
922  * @priv: driver private structure
923  * Description: this is to verify if the HW supports the PCS.
924  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
925  * configured for the TBI, RTBI, or SGMII PHY interface.
926  */
927 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
928 {
929         int interface = priv->plat->interface;
930
931         if (priv->dma_cap.pcs) {
932                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
933                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
934                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
935                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
936                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
937                         priv->hw->pcs = STMMAC_PCS_RGMII;
938                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
939                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
940                         priv->hw->pcs = STMMAC_PCS_SGMII;
941                 }
942         }
943 }
944
945 /**
946  * stmmac_init_phy - PHY initialization
947  * @dev: net device structure
948  * Description: it initializes the driver's PHY state, and attaches the PHY
949  * to the mac driver.
950  *  Return value:
951  *  0 on success
952  */
953 static int stmmac_init_phy(struct net_device *dev)
954 {
955         struct stmmac_priv *priv = netdev_priv(dev);
956         u32 tx_cnt = priv->plat->tx_queues_to_use;
957         struct phy_device *phydev;
958         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
959         char bus_id[MII_BUS_ID_SIZE];
960         int interface = priv->plat->interface;
961         int max_speed = priv->plat->max_speed;
962         priv->oldlink = false;
963         priv->speed = SPEED_UNKNOWN;
964         priv->oldduplex = DUPLEX_UNKNOWN;
965
966         if (priv->plat->phy_node) {
967                 phydev = of_phy_connect(dev, priv->plat->phy_node,
968                                         &stmmac_adjust_link, 0, interface);
969         } else {
970                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
971                          priv->plat->bus_id);
972
973                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
974                          priv->plat->phy_addr);
975                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
976                            phy_id_fmt);
977
978                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
979                                      interface);
980         }
981
982         if (IS_ERR_OR_NULL(phydev)) {
983                 netdev_err(priv->dev, "Could not attach to PHY\n");
984                 if (!phydev)
985                         return -ENODEV;
986
987                 return PTR_ERR(phydev);
988         }
989
990         /* Stop Advertising 1000BASE Capability if interface is not GMII */
991         if ((interface == PHY_INTERFACE_MODE_MII) ||
992             (interface == PHY_INTERFACE_MODE_RMII) ||
993                 (max_speed < 1000 && max_speed > 0))
994                 phy_set_max_speed(phydev, SPEED_100);
995
996         /*
997          * Half-duplex mode not supported with multiqueue
998          * half-duplex can only works with single queue
999          */
1000         if (tx_cnt > 1) {
1001                 phy_remove_link_mode(phydev,
1002                                      ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1003                 phy_remove_link_mode(phydev,
1004                                      ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1005                 phy_remove_link_mode(phydev,
1006                                      ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1007         }
1008
1009         /*
1010          * Broken HW is sometimes missing the pull-up resistor on the
1011          * MDIO line, which results in reads to non-existent devices returning
1012          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1013          * device as well.
1014          * Note: phydev->phy_id is the result of reading the UID PHY registers.
1015          */
1016         if (!priv->plat->phy_node && phydev->phy_id == 0) {
1017                 phy_disconnect(phydev);
1018                 return -ENODEV;
1019         }
1020
1021         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1022          * subsequent PHY polling, make sure we force a link transition if
1023          * we have a UP/DOWN/UP transition
1024          */
1025         if (phydev->is_pseudo_fixed_link)
1026                 phydev->irq = PHY_POLL;
1027
1028         phy_attached_info(phydev);
1029         return 0;
1030 }
1031
1032 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1033 {
1034         u32 rx_cnt = priv->plat->rx_queues_to_use;
1035         void *head_rx;
1036         u32 queue;
1037
1038         /* Display RX rings */
1039         for (queue = 0; queue < rx_cnt; queue++) {
1040                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1041
1042                 pr_info("\tRX Queue %u rings\n", queue);
1043
1044                 if (priv->extend_desc)
1045                         head_rx = (void *)rx_q->dma_erx;
1046                 else
1047                         head_rx = (void *)rx_q->dma_rx;
1048
1049                 /* Display RX ring */
1050                 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1051         }
1052 }
1053
1054 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1055 {
1056         u32 tx_cnt = priv->plat->tx_queues_to_use;
1057         void *head_tx;
1058         u32 queue;
1059
1060         /* Display TX rings */
1061         for (queue = 0; queue < tx_cnt; queue++) {
1062                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1063
1064                 pr_info("\tTX Queue %d rings\n", queue);
1065
1066                 if (priv->extend_desc)
1067                         head_tx = (void *)tx_q->dma_etx;
1068                 else
1069                         head_tx = (void *)tx_q->dma_tx;
1070
1071                 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1072         }
1073 }
1074
1075 static void stmmac_display_rings(struct stmmac_priv *priv)
1076 {
1077         /* Display RX ring */
1078         stmmac_display_rx_rings(priv);
1079
1080         /* Display TX ring */
1081         stmmac_display_tx_rings(priv);
1082 }
1083
1084 static int stmmac_set_bfsize(int mtu, int bufsize)
1085 {
1086         int ret = bufsize;
1087
1088         if (mtu >= BUF_SIZE_4KiB)
1089                 ret = BUF_SIZE_8KiB;
1090         else if (mtu >= BUF_SIZE_2KiB)
1091                 ret = BUF_SIZE_4KiB;
1092         else if (mtu > DEFAULT_BUFSIZE)
1093                 ret = BUF_SIZE_2KiB;
1094         else
1095                 ret = DEFAULT_BUFSIZE;
1096
1097         return ret;
1098 }
1099
1100 /**
1101  * stmmac_clear_rx_descriptors - clear RX descriptors
1102  * @priv: driver private structure
1103  * @queue: RX queue index
1104  * Description: this function is called to clear the RX descriptors
1105  * in case of both basic and extended descriptors are used.
1106  */
1107 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1108 {
1109         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1110         int i;
1111
1112         /* Clear the RX descriptors */
1113         for (i = 0; i < DMA_RX_SIZE; i++)
1114                 if (priv->extend_desc)
1115                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1116                                         priv->use_riwt, priv->mode,
1117                                         (i == DMA_RX_SIZE - 1));
1118                 else
1119                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1120                                         priv->use_riwt, priv->mode,
1121                                         (i == DMA_RX_SIZE - 1));
1122 }
1123
1124 /**
1125  * stmmac_clear_tx_descriptors - clear tx descriptors
1126  * @priv: driver private structure
1127  * @queue: TX queue index.
1128  * Description: this function is called to clear the TX descriptors
1129  * in case of both basic and extended descriptors are used.
1130  */
1131 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1132 {
1133         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1134         int i;
1135
1136         /* Clear the TX descriptors */
1137         for (i = 0; i < DMA_TX_SIZE; i++)
1138                 if (priv->extend_desc)
1139                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1140                                         priv->mode, (i == DMA_TX_SIZE - 1));
1141                 else
1142                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1143                                         priv->mode, (i == DMA_TX_SIZE - 1));
1144 }
1145
1146 /**
1147  * stmmac_clear_descriptors - clear descriptors
1148  * @priv: driver private structure
1149  * Description: this function is called to clear the TX and RX descriptors
1150  * in case of both basic and extended descriptors are used.
1151  */
1152 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1153 {
1154         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1155         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1156         u32 queue;
1157
1158         /* Clear the RX descriptors */
1159         for (queue = 0; queue < rx_queue_cnt; queue++)
1160                 stmmac_clear_rx_descriptors(priv, queue);
1161
1162         /* Clear the TX descriptors */
1163         for (queue = 0; queue < tx_queue_cnt; queue++)
1164                 stmmac_clear_tx_descriptors(priv, queue);
1165 }
1166
1167 /**
1168  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1169  * @priv: driver private structure
1170  * @p: descriptor pointer
1171  * @i: descriptor index
1172  * @flags: gfp flag
1173  * @queue: RX queue index
1174  * Description: this function is called to allocate a receive buffer, perform
1175  * the DMA mapping and init the descriptor.
1176  */
1177 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1178                                   int i, gfp_t flags, u32 queue)
1179 {
1180         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1181         struct sk_buff *skb;
1182
1183         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1184         if (!skb) {
1185                 netdev_err(priv->dev,
1186                            "%s: Rx init fails; skb is NULL\n", __func__);
1187                 return -ENOMEM;
1188         }
1189         rx_q->rx_skbuff[i] = skb;
1190         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1191                                                 priv->dma_buf_sz,
1192                                                 DMA_FROM_DEVICE);
1193         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1194                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1195                 dev_kfree_skb_any(skb);
1196                 return -EINVAL;
1197         }
1198
1199         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1200
1201         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1202                 stmmac_init_desc3(priv, p);
1203
1204         return 0;
1205 }
1206
1207 /**
1208  * stmmac_free_rx_buffer - free RX dma buffers
1209  * @priv: private structure
1210  * @queue: RX queue index
1211  * @i: buffer index.
1212  */
1213 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1214 {
1215         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1216
1217         if (rx_q->rx_skbuff[i]) {
1218                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1219                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1220                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1221         }
1222         rx_q->rx_skbuff[i] = NULL;
1223 }
1224
1225 /**
1226  * stmmac_free_tx_buffer - free RX dma buffers
1227  * @priv: private structure
1228  * @queue: RX queue index
1229  * @i: buffer index.
1230  */
1231 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1232 {
1233         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1234
1235         if (tx_q->tx_skbuff_dma[i].buf) {
1236                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1237                         dma_unmap_page(priv->device,
1238                                        tx_q->tx_skbuff_dma[i].buf,
1239                                        tx_q->tx_skbuff_dma[i].len,
1240                                        DMA_TO_DEVICE);
1241                 else
1242                         dma_unmap_single(priv->device,
1243                                          tx_q->tx_skbuff_dma[i].buf,
1244                                          tx_q->tx_skbuff_dma[i].len,
1245                                          DMA_TO_DEVICE);
1246         }
1247
1248         if (tx_q->tx_skbuff[i]) {
1249                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1250                 tx_q->tx_skbuff[i] = NULL;
1251                 tx_q->tx_skbuff_dma[i].buf = 0;
1252                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1253         }
1254 }
1255
1256 /**
1257  * init_dma_rx_desc_rings - init the RX descriptor rings
1258  * @dev: net device structure
1259  * @flags: gfp flag.
1260  * Description: this function initializes the DMA RX descriptors
1261  * and allocates the socket buffers. It supports the chained and ring
1262  * modes.
1263  */
1264 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1265 {
1266         struct stmmac_priv *priv = netdev_priv(dev);
1267         u32 rx_count = priv->plat->rx_queues_to_use;
1268         int ret = -ENOMEM;
1269         int bfsize = 0;
1270         int queue;
1271         int i;
1272
1273         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1274         if (bfsize < 0)
1275                 bfsize = 0;
1276
1277         if (bfsize < BUF_SIZE_16KiB)
1278                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1279
1280         priv->dma_buf_sz = bfsize;
1281
1282         /* RX INITIALIZATION */
1283         netif_dbg(priv, probe, priv->dev,
1284                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1285
1286         for (queue = 0; queue < rx_count; queue++) {
1287                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1288
1289                 netif_dbg(priv, probe, priv->dev,
1290                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1291                           (u32)rx_q->dma_rx_phy);
1292
1293                 for (i = 0; i < DMA_RX_SIZE; i++) {
1294                         struct dma_desc *p;
1295
1296                         if (priv->extend_desc)
1297                                 p = &((rx_q->dma_erx + i)->basic);
1298                         else
1299                                 p = rx_q->dma_rx + i;
1300
1301                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1302                                                      queue);
1303                         if (ret)
1304                                 goto err_init_rx_buffers;
1305
1306                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1307                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1308                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1309                 }
1310
1311                 rx_q->cur_rx = 0;
1312                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1313
1314                 stmmac_clear_rx_descriptors(priv, queue);
1315
1316                 /* Setup the chained descriptor addresses */
1317                 if (priv->mode == STMMAC_CHAIN_MODE) {
1318                         if (priv->extend_desc)
1319                                 stmmac_mode_init(priv, rx_q->dma_erx,
1320                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1321                         else
1322                                 stmmac_mode_init(priv, rx_q->dma_rx,
1323                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1324                 }
1325         }
1326
1327         buf_sz = bfsize;
1328
1329         return 0;
1330
1331 err_init_rx_buffers:
1332         while (queue >= 0) {
1333                 while (--i >= 0)
1334                         stmmac_free_rx_buffer(priv, queue, i);
1335
1336                 if (queue == 0)
1337                         break;
1338
1339                 i = DMA_RX_SIZE;
1340                 queue--;
1341         }
1342
1343         return ret;
1344 }
1345
1346 /**
1347  * init_dma_tx_desc_rings - init the TX descriptor rings
1348  * @dev: net device structure.
1349  * Description: this function initializes the DMA TX descriptors
1350  * and allocates the socket buffers. It supports the chained and ring
1351  * modes.
1352  */
1353 static int init_dma_tx_desc_rings(struct net_device *dev)
1354 {
1355         struct stmmac_priv *priv = netdev_priv(dev);
1356         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1357         u32 queue;
1358         int i;
1359
1360         for (queue = 0; queue < tx_queue_cnt; queue++) {
1361                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1362
1363                 netif_dbg(priv, probe, priv->dev,
1364                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1365                          (u32)tx_q->dma_tx_phy);
1366
1367                 /* Setup the chained descriptor addresses */
1368                 if (priv->mode == STMMAC_CHAIN_MODE) {
1369                         if (priv->extend_desc)
1370                                 stmmac_mode_init(priv, tx_q->dma_etx,
1371                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1372                         else
1373                                 stmmac_mode_init(priv, tx_q->dma_tx,
1374                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1375                 }
1376
1377                 for (i = 0; i < DMA_TX_SIZE; i++) {
1378                         struct dma_desc *p;
1379                         if (priv->extend_desc)
1380                                 p = &((tx_q->dma_etx + i)->basic);
1381                         else
1382                                 p = tx_q->dma_tx + i;
1383
1384                         stmmac_clear_desc(priv, p);
1385
1386                         tx_q->tx_skbuff_dma[i].buf = 0;
1387                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1388                         tx_q->tx_skbuff_dma[i].len = 0;
1389                         tx_q->tx_skbuff_dma[i].last_segment = false;
1390                         tx_q->tx_skbuff[i] = NULL;
1391                 }
1392
1393                 tx_q->dirty_tx = 0;
1394                 tx_q->cur_tx = 0;
1395                 tx_q->mss = 0;
1396
1397                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1398         }
1399
1400         return 0;
1401 }
1402
1403 /**
1404  * init_dma_desc_rings - init the RX/TX descriptor rings
1405  * @dev: net device structure
1406  * @flags: gfp flag.
1407  * Description: this function initializes the DMA RX/TX descriptors
1408  * and allocates the socket buffers. It supports the chained and ring
1409  * modes.
1410  */
1411 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1412 {
1413         struct stmmac_priv *priv = netdev_priv(dev);
1414         int ret;
1415
1416         ret = init_dma_rx_desc_rings(dev, flags);
1417         if (ret)
1418                 return ret;
1419
1420         ret = init_dma_tx_desc_rings(dev);
1421
1422         stmmac_clear_descriptors(priv);
1423
1424         if (netif_msg_hw(priv))
1425                 stmmac_display_rings(priv);
1426
1427         return ret;
1428 }
1429
1430 /**
1431  * dma_free_rx_skbufs - free RX dma buffers
1432  * @priv: private structure
1433  * @queue: RX queue index
1434  */
1435 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1436 {
1437         int i;
1438
1439         for (i = 0; i < DMA_RX_SIZE; i++)
1440                 stmmac_free_rx_buffer(priv, queue, i);
1441 }
1442
1443 /**
1444  * dma_free_tx_skbufs - free TX dma buffers
1445  * @priv: private structure
1446  * @queue: TX queue index
1447  */
1448 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1449 {
1450         int i;
1451
1452         for (i = 0; i < DMA_TX_SIZE; i++)
1453                 stmmac_free_tx_buffer(priv, queue, i);
1454 }
1455
1456 /**
1457  * free_dma_rx_desc_resources - free RX dma desc resources
1458  * @priv: private structure
1459  */
1460 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1461 {
1462         u32 rx_count = priv->plat->rx_queues_to_use;
1463         u32 queue;
1464
1465         /* Free RX queue resources */
1466         for (queue = 0; queue < rx_count; queue++) {
1467                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1468
1469                 /* Release the DMA RX socket buffers */
1470                 dma_free_rx_skbufs(priv, queue);
1471
1472                 /* Free DMA regions of consistent memory previously allocated */
1473                 if (!priv->extend_desc)
1474                         dma_free_coherent(priv->device,
1475                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1476                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1477                 else
1478                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1479                                           sizeof(struct dma_extended_desc),
1480                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1481
1482                 kfree(rx_q->rx_skbuff_dma);
1483                 kfree(rx_q->rx_skbuff);
1484         }
1485 }
1486
1487 /**
1488  * free_dma_tx_desc_resources - free TX dma desc resources
1489  * @priv: private structure
1490  */
1491 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1492 {
1493         u32 tx_count = priv->plat->tx_queues_to_use;
1494         u32 queue;
1495
1496         /* Free TX queue resources */
1497         for (queue = 0; queue < tx_count; queue++) {
1498                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1499
1500                 /* Release the DMA TX socket buffers */
1501                 dma_free_tx_skbufs(priv, queue);
1502
1503                 /* Free DMA regions of consistent memory previously allocated */
1504                 if (!priv->extend_desc)
1505                         dma_free_coherent(priv->device,
1506                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1507                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1508                 else
1509                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1510                                           sizeof(struct dma_extended_desc),
1511                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1512
1513                 kfree(tx_q->tx_skbuff_dma);
1514                 kfree(tx_q->tx_skbuff);
1515         }
1516 }
1517
1518 /**
1519  * alloc_dma_rx_desc_resources - alloc RX resources.
1520  * @priv: private structure
1521  * Description: according to which descriptor can be used (extend or basic)
1522  * this function allocates the resources for TX and RX paths. In case of
1523  * reception, for example, it pre-allocated the RX socket buffer in order to
1524  * allow zero-copy mechanism.
1525  */
1526 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1527 {
1528         u32 rx_count = priv->plat->rx_queues_to_use;
1529         int ret = -ENOMEM;
1530         u32 queue;
1531
1532         /* RX queues buffers and DMA */
1533         for (queue = 0; queue < rx_count; queue++) {
1534                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1535
1536                 rx_q->queue_index = queue;
1537                 rx_q->priv_data = priv;
1538
1539                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1540                                                     sizeof(dma_addr_t),
1541                                                     GFP_KERNEL);
1542                 if (!rx_q->rx_skbuff_dma)
1543                         goto err_dma;
1544
1545                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1546                                                 sizeof(struct sk_buff *),
1547                                                 GFP_KERNEL);
1548                 if (!rx_q->rx_skbuff)
1549                         goto err_dma;
1550
1551                 if (priv->extend_desc) {
1552                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1553                                                             DMA_RX_SIZE *
1554                                                             sizeof(struct
1555                                                             dma_extended_desc),
1556                                                             &rx_q->dma_rx_phy,
1557                                                             GFP_KERNEL);
1558                         if (!rx_q->dma_erx)
1559                                 goto err_dma;
1560
1561                 } else {
1562                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1563                                                            DMA_RX_SIZE *
1564                                                            sizeof(struct
1565                                                            dma_desc),
1566                                                            &rx_q->dma_rx_phy,
1567                                                            GFP_KERNEL);
1568                         if (!rx_q->dma_rx)
1569                                 goto err_dma;
1570                 }
1571         }
1572
1573         return 0;
1574
1575 err_dma:
1576         free_dma_rx_desc_resources(priv);
1577
1578         return ret;
1579 }
1580
1581 /**
1582  * alloc_dma_tx_desc_resources - alloc TX resources.
1583  * @priv: private structure
1584  * Description: according to which descriptor can be used (extend or basic)
1585  * this function allocates the resources for TX and RX paths. In case of
1586  * reception, for example, it pre-allocated the RX socket buffer in order to
1587  * allow zero-copy mechanism.
1588  */
1589 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1590 {
1591         u32 tx_count = priv->plat->tx_queues_to_use;
1592         int ret = -ENOMEM;
1593         u32 queue;
1594
1595         /* TX queues buffers and DMA */
1596         for (queue = 0; queue < tx_count; queue++) {
1597                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1598
1599                 tx_q->queue_index = queue;
1600                 tx_q->priv_data = priv;
1601
1602                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1603                                                     sizeof(*tx_q->tx_skbuff_dma),
1604                                                     GFP_KERNEL);
1605                 if (!tx_q->tx_skbuff_dma)
1606                         goto err_dma;
1607
1608                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1609                                                 sizeof(struct sk_buff *),
1610                                                 GFP_KERNEL);
1611                 if (!tx_q->tx_skbuff)
1612                         goto err_dma;
1613
1614                 if (priv->extend_desc) {
1615                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1616                                                             DMA_TX_SIZE *
1617                                                             sizeof(struct
1618                                                             dma_extended_desc),
1619                                                             &tx_q->dma_tx_phy,
1620                                                             GFP_KERNEL);
1621                         if (!tx_q->dma_etx)
1622                                 goto err_dma;
1623                 } else {
1624                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1625                                                            DMA_TX_SIZE *
1626                                                            sizeof(struct
1627                                                                   dma_desc),
1628                                                            &tx_q->dma_tx_phy,
1629                                                            GFP_KERNEL);
1630                         if (!tx_q->dma_tx)
1631                                 goto err_dma;
1632                 }
1633         }
1634
1635         return 0;
1636
1637 err_dma:
1638         free_dma_tx_desc_resources(priv);
1639
1640         return ret;
1641 }
1642
1643 /**
1644  * alloc_dma_desc_resources - alloc TX/RX resources.
1645  * @priv: private structure
1646  * Description: according to which descriptor can be used (extend or basic)
1647  * this function allocates the resources for TX and RX paths. In case of
1648  * reception, for example, it pre-allocated the RX socket buffer in order to
1649  * allow zero-copy mechanism.
1650  */
1651 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1652 {
1653         /* RX Allocation */
1654         int ret = alloc_dma_rx_desc_resources(priv);
1655
1656         if (ret)
1657                 return ret;
1658
1659         ret = alloc_dma_tx_desc_resources(priv);
1660
1661         return ret;
1662 }
1663
1664 /**
1665  * free_dma_desc_resources - free dma desc resources
1666  * @priv: private structure
1667  */
1668 static void free_dma_desc_resources(struct stmmac_priv *priv)
1669 {
1670         /* Release the DMA RX socket buffers */
1671         free_dma_rx_desc_resources(priv);
1672
1673         /* Release the DMA TX socket buffers */
1674         free_dma_tx_desc_resources(priv);
1675 }
1676
1677 /**
1678  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1679  *  @priv: driver private structure
1680  *  Description: It is used for enabling the rx queues in the MAC
1681  */
1682 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1683 {
1684         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1685         int queue;
1686         u8 mode;
1687
1688         for (queue = 0; queue < rx_queues_count; queue++) {
1689                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1690                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1691         }
1692 }
1693
1694 /**
1695  * stmmac_start_rx_dma - start RX DMA channel
1696  * @priv: driver private structure
1697  * @chan: RX channel index
1698  * Description:
1699  * This starts a RX DMA channel
1700  */
1701 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1702 {
1703         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1704         stmmac_start_rx(priv, priv->ioaddr, chan);
1705 }
1706
1707 /**
1708  * stmmac_start_tx_dma - start TX DMA channel
1709  * @priv: driver private structure
1710  * @chan: TX channel index
1711  * Description:
1712  * This starts a TX DMA channel
1713  */
1714 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1715 {
1716         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1717         stmmac_start_tx(priv, priv->ioaddr, chan);
1718 }
1719
1720 /**
1721  * stmmac_stop_rx_dma - stop RX DMA channel
1722  * @priv: driver private structure
1723  * @chan: RX channel index
1724  * Description:
1725  * This stops a RX DMA channel
1726  */
1727 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1728 {
1729         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1730         stmmac_stop_rx(priv, priv->ioaddr, chan);
1731 }
1732
1733 /**
1734  * stmmac_stop_tx_dma - stop TX DMA channel
1735  * @priv: driver private structure
1736  * @chan: TX channel index
1737  * Description:
1738  * This stops a TX DMA channel
1739  */
1740 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1741 {
1742         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1743         stmmac_stop_tx(priv, priv->ioaddr, chan);
1744 }
1745
1746 /**
1747  * stmmac_start_all_dma - start all RX and TX DMA channels
1748  * @priv: driver private structure
1749  * Description:
1750  * This starts all the RX and TX DMA channels
1751  */
1752 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1753 {
1754         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1755         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1756         u32 chan = 0;
1757
1758         for (chan = 0; chan < rx_channels_count; chan++)
1759                 stmmac_start_rx_dma(priv, chan);
1760
1761         for (chan = 0; chan < tx_channels_count; chan++)
1762                 stmmac_start_tx_dma(priv, chan);
1763 }
1764
1765 /**
1766  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1767  * @priv: driver private structure
1768  * Description:
1769  * This stops the RX and TX DMA channels
1770  */
1771 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1772 {
1773         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1774         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1775         u32 chan = 0;
1776
1777         for (chan = 0; chan < rx_channels_count; chan++)
1778                 stmmac_stop_rx_dma(priv, chan);
1779
1780         for (chan = 0; chan < tx_channels_count; chan++)
1781                 stmmac_stop_tx_dma(priv, chan);
1782 }
1783
1784 /**
1785  *  stmmac_dma_operation_mode - HW DMA operation mode
1786  *  @priv: driver private structure
1787  *  Description: it is used for configuring the DMA operation mode register in
1788  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1789  */
1790 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1791 {
1792         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1793         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1794         int rxfifosz = priv->plat->rx_fifo_size;
1795         int txfifosz = priv->plat->tx_fifo_size;
1796         u32 txmode = 0;
1797         u32 rxmode = 0;
1798         u32 chan = 0;
1799         u8 qmode = 0;
1800
1801         if (rxfifosz == 0)
1802                 rxfifosz = priv->dma_cap.rx_fifo_size;
1803         if (txfifosz == 0)
1804                 txfifosz = priv->dma_cap.tx_fifo_size;
1805
1806         /* Adjust for real per queue fifo size */
1807         rxfifosz /= rx_channels_count;
1808         txfifosz /= tx_channels_count;
1809
1810         if (priv->plat->force_thresh_dma_mode) {
1811                 txmode = tc;
1812                 rxmode = tc;
1813         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1814                 /*
1815                  * In case of GMAC, SF mode can be enabled
1816                  * to perform the TX COE in HW. This depends on:
1817                  * 1) TX COE if actually supported
1818                  * 2) There is no bugged Jumbo frame support
1819                  *    that needs to not insert csum in the TDES.
1820                  */
1821                 txmode = SF_DMA_MODE;
1822                 rxmode = SF_DMA_MODE;
1823                 priv->xstats.threshold = SF_DMA_MODE;
1824         } else {
1825                 txmode = tc;
1826                 rxmode = SF_DMA_MODE;
1827         }
1828
1829         /* configure all channels */
1830         for (chan = 0; chan < rx_channels_count; chan++) {
1831                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1832
1833                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1834                                 rxfifosz, qmode);
1835                 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1836                                 chan);
1837         }
1838
1839         for (chan = 0; chan < tx_channels_count; chan++) {
1840                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1841
1842                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1843                                 txfifosz, qmode);
1844         }
1845 }
1846
1847 /**
1848  * stmmac_tx_clean - to manage the transmission completion
1849  * @priv: driver private structure
1850  * @queue: TX queue index
1851  * Description: it reclaims the transmit resources after transmission completes.
1852  */
1853 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1854 {
1855         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1856         unsigned int bytes_compl = 0, pkts_compl = 0;
1857         unsigned int entry, count = 0;
1858
1859         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1860
1861         priv->xstats.tx_clean++;
1862
1863         entry = tx_q->dirty_tx;
1864         while ((entry != tx_q->cur_tx) && (count < budget)) {
1865                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1866                 struct dma_desc *p;
1867                 int status;
1868
1869                 if (priv->extend_desc)
1870                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1871                 else
1872                         p = tx_q->dma_tx + entry;
1873
1874                 status = stmmac_tx_status(priv, &priv->dev->stats,
1875                                 &priv->xstats, p, priv->ioaddr);
1876                 /* Check if the descriptor is owned by the DMA */
1877                 if (unlikely(status & tx_dma_own))
1878                         break;
1879
1880                 count++;
1881
1882                 /* Make sure descriptor fields are read after reading
1883                  * the own bit.
1884                  */
1885                 dma_rmb();
1886
1887                 /* Just consider the last segment and ...*/
1888                 if (likely(!(status & tx_not_ls))) {
1889                         /* ... verify the status error condition */
1890                         if (unlikely(status & tx_err)) {
1891                                 priv->dev->stats.tx_errors++;
1892                         } else {
1893                                 priv->dev->stats.tx_packets++;
1894                                 priv->xstats.tx_pkt_n++;
1895                         }
1896                         stmmac_get_tx_hwtstamp(priv, p, skb);
1897                 }
1898
1899                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1900                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1901                                 dma_unmap_page(priv->device,
1902                                                tx_q->tx_skbuff_dma[entry].buf,
1903                                                tx_q->tx_skbuff_dma[entry].len,
1904                                                DMA_TO_DEVICE);
1905                         else
1906                                 dma_unmap_single(priv->device,
1907                                                  tx_q->tx_skbuff_dma[entry].buf,
1908                                                  tx_q->tx_skbuff_dma[entry].len,
1909                                                  DMA_TO_DEVICE);
1910                         tx_q->tx_skbuff_dma[entry].buf = 0;
1911                         tx_q->tx_skbuff_dma[entry].len = 0;
1912                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1913                 }
1914
1915                 stmmac_clean_desc3(priv, tx_q, p);
1916
1917                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1918                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1919
1920                 if (likely(skb != NULL)) {
1921                         pkts_compl++;
1922                         bytes_compl += skb->len;
1923                         dev_consume_skb_any(skb);
1924                         tx_q->tx_skbuff[entry] = NULL;
1925                 }
1926
1927                 stmmac_release_tx_desc(priv, p, priv->mode);
1928
1929                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1930         }
1931         tx_q->dirty_tx = entry;
1932
1933         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1934                                   pkts_compl, bytes_compl);
1935
1936         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1937                                                                 queue))) &&
1938             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1939
1940                 netif_dbg(priv, tx_done, priv->dev,
1941                           "%s: restart transmit\n", __func__);
1942                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1943         }
1944
1945         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1946                 stmmac_enable_eee_mode(priv);
1947                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1948         }
1949
1950         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1951
1952         return count;
1953 }
1954
1955 /**
1956  * stmmac_tx_err - to manage the tx error
1957  * @priv: driver private structure
1958  * @chan: channel index
1959  * Description: it cleans the descriptors and restarts the transmission
1960  * in case of transmission errors.
1961  */
1962 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1963 {
1964         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1965         int i;
1966
1967         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1968
1969         stmmac_stop_tx_dma(priv, chan);
1970         dma_free_tx_skbufs(priv, chan);
1971         for (i = 0; i < DMA_TX_SIZE; i++)
1972                 if (priv->extend_desc)
1973                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1974                                         priv->mode, (i == DMA_TX_SIZE - 1));
1975                 else
1976                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1977                                         priv->mode, (i == DMA_TX_SIZE - 1));
1978         tx_q->dirty_tx = 0;
1979         tx_q->cur_tx = 0;
1980         tx_q->mss = 0;
1981         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1982         stmmac_start_tx_dma(priv, chan);
1983
1984         priv->dev->stats.tx_errors++;
1985         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1986 }
1987
1988 /**
1989  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1990  *  @priv: driver private structure
1991  *  @txmode: TX operating mode
1992  *  @rxmode: RX operating mode
1993  *  @chan: channel index
1994  *  Description: it is used for configuring of the DMA operation mode in
1995  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1996  *  mode.
1997  */
1998 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1999                                           u32 rxmode, u32 chan)
2000 {
2001         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2002         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2003         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2004         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2005         int rxfifosz = priv->plat->rx_fifo_size;
2006         int txfifosz = priv->plat->tx_fifo_size;
2007
2008         if (rxfifosz == 0)
2009                 rxfifosz = priv->dma_cap.rx_fifo_size;
2010         if (txfifosz == 0)
2011                 txfifosz = priv->dma_cap.tx_fifo_size;
2012
2013         /* Adjust for real per queue fifo size */
2014         rxfifosz /= rx_channels_count;
2015         txfifosz /= tx_channels_count;
2016
2017         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2018         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2019 }
2020
2021 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2022 {
2023         int ret;
2024
2025         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2026                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2027         if (ret && (ret != -EINVAL)) {
2028                 stmmac_global_err(priv);
2029                 return true;
2030         }
2031
2032         return false;
2033 }
2034
2035 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2036 {
2037         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2038                                                  &priv->xstats, chan);
2039         struct stmmac_channel *ch = &priv->channel[chan];
2040         bool needs_work = false;
2041
2042         if ((status & handle_rx) && ch->has_rx) {
2043                 needs_work = true;
2044         } else {
2045                 status &= ~handle_rx;
2046         }
2047
2048         if ((status & handle_tx) && ch->has_tx) {
2049                 needs_work = true;
2050         } else {
2051                 status &= ~handle_tx;
2052         }
2053
2054         if (needs_work && napi_schedule_prep(&ch->napi)) {
2055                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2056                 __napi_schedule(&ch->napi);
2057         }
2058
2059         return status;
2060 }
2061
2062 /**
2063  * stmmac_dma_interrupt - DMA ISR
2064  * @priv: driver private structure
2065  * Description: this is the DMA ISR. It is called by the main ISR.
2066  * It calls the dwmac dma routine and schedule poll method in case of some
2067  * work can be done.
2068  */
2069 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2070 {
2071         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2072         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2073         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2074                                 tx_channel_count : rx_channel_count;
2075         u32 chan;
2076         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2077
2078         /* Make sure we never check beyond our status buffer. */
2079         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2080                 channels_to_check = ARRAY_SIZE(status);
2081
2082         for (chan = 0; chan < channels_to_check; chan++)
2083                 status[chan] = stmmac_napi_check(priv, chan);
2084
2085         for (chan = 0; chan < tx_channel_count; chan++) {
2086                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2087                         /* Try to bump up the dma threshold on this failure */
2088                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2089                             (tc <= 256)) {
2090                                 tc += 64;
2091                                 if (priv->plat->force_thresh_dma_mode)
2092                                         stmmac_set_dma_operation_mode(priv,
2093                                                                       tc,
2094                                                                       tc,
2095                                                                       chan);
2096                                 else
2097                                         stmmac_set_dma_operation_mode(priv,
2098                                                                     tc,
2099                                                                     SF_DMA_MODE,
2100                                                                     chan);
2101                                 priv->xstats.threshold = tc;
2102                         }
2103                 } else if (unlikely(status[chan] == tx_hard_error)) {
2104                         stmmac_tx_err(priv, chan);
2105                 }
2106         }
2107 }
2108
2109 /**
2110  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2111  * @priv: driver private structure
2112  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2113  */
2114 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2115 {
2116         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2117                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2118
2119         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2120
2121         if (priv->dma_cap.rmon) {
2122                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2123                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2124         } else
2125                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2126 }
2127
2128 /**
2129  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2130  * @priv: driver private structure
2131  * Description:
2132  *  new GMAC chip generations have a new register to indicate the
2133  *  presence of the optional feature/functions.
2134  *  This can be also used to override the value passed through the
2135  *  platform and necessary for old MAC10/100 and GMAC chips.
2136  */
2137 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2138 {
2139         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2140 }
2141
2142 /**
2143  * stmmac_check_ether_addr - check if the MAC addr is valid
2144  * @priv: driver private structure
2145  * Description:
2146  * it is to verify if the MAC address is valid, in case of failures it
2147  * generates a random MAC address
2148  */
2149 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2150 {
2151         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2152                 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2153                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2154                         eth_hw_addr_random(priv->dev);
2155                 netdev_info(priv->dev, "device MAC address %pM\n",
2156                             priv->dev->dev_addr);
2157         }
2158 }
2159
2160 /**
2161  * stmmac_init_dma_engine - DMA init.
2162  * @priv: driver private structure
2163  * Description:
2164  * It inits the DMA invoking the specific MAC/GMAC callback.
2165  * Some DMA parameters can be passed from the platform;
2166  * in case of these are not passed a default is kept for the MAC or GMAC.
2167  */
2168 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2169 {
2170         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2171         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2172         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2173         struct stmmac_rx_queue *rx_q;
2174         struct stmmac_tx_queue *tx_q;
2175         u32 chan = 0;
2176         int atds = 0;
2177         int ret = 0;
2178
2179         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2180                 dev_err(priv->device, "Invalid DMA configuration\n");
2181                 return -EINVAL;
2182         }
2183
2184         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2185                 atds = 1;
2186
2187         ret = stmmac_reset(priv, priv->ioaddr);
2188         if (ret) {
2189                 dev_err(priv->device, "Failed to reset the dma\n");
2190                 return ret;
2191         }
2192
2193         /* DMA Configuration */
2194         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2195
2196         if (priv->plat->axi)
2197                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2198
2199         /* DMA RX Channel Configuration */
2200         for (chan = 0; chan < rx_channels_count; chan++) {
2201                 rx_q = &priv->rx_queue[chan];
2202
2203                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2204                                     rx_q->dma_rx_phy, chan);
2205
2206                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2207                             (DMA_RX_SIZE * sizeof(struct dma_desc));
2208                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2209                                        rx_q->rx_tail_addr, chan);
2210         }
2211
2212         /* DMA TX Channel Configuration */
2213         for (chan = 0; chan < tx_channels_count; chan++) {
2214                 tx_q = &priv->tx_queue[chan];
2215
2216                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2217                                     tx_q->dma_tx_phy, chan);
2218
2219                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2220                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2221                                        tx_q->tx_tail_addr, chan);
2222         }
2223
2224         /* DMA CSR Channel configuration */
2225         for (chan = 0; chan < dma_csr_ch; chan++)
2226                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2227
2228         return ret;
2229 }
2230
2231 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2232 {
2233         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2234
2235         mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2236 }
2237
2238 /**
2239  * stmmac_tx_timer - mitigation sw timer for tx.
2240  * @data: data pointer
2241  * Description:
2242  * This is the timer handler to directly invoke the stmmac_tx_clean.
2243  */
2244 static void stmmac_tx_timer(struct timer_list *t)
2245 {
2246         struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2247         struct stmmac_priv *priv = tx_q->priv_data;
2248         struct stmmac_channel *ch;
2249
2250         ch = &priv->channel[tx_q->queue_index];
2251
2252         if (likely(napi_schedule_prep(&ch->napi)))
2253                 __napi_schedule(&ch->napi);
2254 }
2255
2256 /**
2257  * stmmac_init_tx_coalesce - init tx mitigation options.
2258  * @priv: driver private structure
2259  * Description:
2260  * This inits the transmit coalesce parameters: i.e. timer rate,
2261  * timer handler and default threshold used for enabling the
2262  * interrupt on completion bit.
2263  */
2264 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2265 {
2266         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2267         u32 chan;
2268
2269         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2270         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2271
2272         for (chan = 0; chan < tx_channel_count; chan++) {
2273                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2274
2275                 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2276         }
2277 }
2278
2279 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2280 {
2281         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2282         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2283         u32 chan;
2284
2285         /* set TX ring length */
2286         for (chan = 0; chan < tx_channels_count; chan++)
2287                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2288                                 (DMA_TX_SIZE - 1), chan);
2289
2290         /* set RX ring length */
2291         for (chan = 0; chan < rx_channels_count; chan++)
2292                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2293                                 (DMA_RX_SIZE - 1), chan);
2294 }
2295
2296 /**
2297  *  stmmac_set_tx_queue_weight - Set TX queue weight
2298  *  @priv: driver private structure
2299  *  Description: It is used for setting TX queues weight
2300  */
2301 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2302 {
2303         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2304         u32 weight;
2305         u32 queue;
2306
2307         for (queue = 0; queue < tx_queues_count; queue++) {
2308                 weight = priv->plat->tx_queues_cfg[queue].weight;
2309                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2310         }
2311 }
2312
2313 /**
2314  *  stmmac_configure_cbs - Configure CBS in TX queue
2315  *  @priv: driver private structure
2316  *  Description: It is used for configuring CBS in AVB TX queues
2317  */
2318 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2319 {
2320         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2321         u32 mode_to_use;
2322         u32 queue;
2323
2324         /* queue 0 is reserved for legacy traffic */
2325         for (queue = 1; queue < tx_queues_count; queue++) {
2326                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2327                 if (mode_to_use == MTL_QUEUE_DCB)
2328                         continue;
2329
2330                 stmmac_config_cbs(priv, priv->hw,
2331                                 priv->plat->tx_queues_cfg[queue].send_slope,
2332                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2333                                 priv->plat->tx_queues_cfg[queue].high_credit,
2334                                 priv->plat->tx_queues_cfg[queue].low_credit,
2335                                 queue);
2336         }
2337 }
2338
2339 /**
2340  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2341  *  @priv: driver private structure
2342  *  Description: It is used for mapping RX queues to RX dma channels
2343  */
2344 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2345 {
2346         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2347         u32 queue;
2348         u32 chan;
2349
2350         for (queue = 0; queue < rx_queues_count; queue++) {
2351                 chan = priv->plat->rx_queues_cfg[queue].chan;
2352                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2353         }
2354 }
2355
2356 /**
2357  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2358  *  @priv: driver private structure
2359  *  Description: It is used for configuring the RX Queue Priority
2360  */
2361 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2362 {
2363         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2364         u32 queue;
2365         u32 prio;
2366
2367         for (queue = 0; queue < rx_queues_count; queue++) {
2368                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2369                         continue;
2370
2371                 prio = priv->plat->rx_queues_cfg[queue].prio;
2372                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2373         }
2374 }
2375
2376 /**
2377  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2378  *  @priv: driver private structure
2379  *  Description: It is used for configuring the TX Queue Priority
2380  */
2381 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2382 {
2383         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2384         u32 queue;
2385         u32 prio;
2386
2387         for (queue = 0; queue < tx_queues_count; queue++) {
2388                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2389                         continue;
2390
2391                 prio = priv->plat->tx_queues_cfg[queue].prio;
2392                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2393         }
2394 }
2395
2396 /**
2397  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2398  *  @priv: driver private structure
2399  *  Description: It is used for configuring the RX queue routing
2400  */
2401 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2402 {
2403         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2404         u32 queue;
2405         u8 packet;
2406
2407         for (queue = 0; queue < rx_queues_count; queue++) {
2408                 /* no specific packet type routing specified for the queue */
2409                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2410                         continue;
2411
2412                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2413                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2414         }
2415 }
2416
2417 /**
2418  *  stmmac_mtl_configuration - Configure MTL
2419  *  @priv: driver private structure
2420  *  Description: It is used for configurring MTL
2421  */
2422 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2423 {
2424         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2425         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2426
2427         if (tx_queues_count > 1)
2428                 stmmac_set_tx_queue_weight(priv);
2429
2430         /* Configure MTL RX algorithms */
2431         if (rx_queues_count > 1)
2432                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2433                                 priv->plat->rx_sched_algorithm);
2434
2435         /* Configure MTL TX algorithms */
2436         if (tx_queues_count > 1)
2437                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2438                                 priv->plat->tx_sched_algorithm);
2439
2440         /* Configure CBS in AVB TX queues */
2441         if (tx_queues_count > 1)
2442                 stmmac_configure_cbs(priv);
2443
2444         /* Map RX MTL to DMA channels */
2445         stmmac_rx_queue_dma_chan_map(priv);
2446
2447         /* Enable MAC RX Queues */
2448         stmmac_mac_enable_rx_queues(priv);
2449
2450         /* Set RX priorities */
2451         if (rx_queues_count > 1)
2452                 stmmac_mac_config_rx_queues_prio(priv);
2453
2454         /* Set TX priorities */
2455         if (tx_queues_count > 1)
2456                 stmmac_mac_config_tx_queues_prio(priv);
2457
2458         /* Set RX routing */
2459         if (rx_queues_count > 1)
2460                 stmmac_mac_config_rx_queues_routing(priv);
2461 }
2462
2463 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2464 {
2465         if (priv->dma_cap.asp) {
2466                 netdev_info(priv->dev, "Enabling Safety Features\n");
2467                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2468         } else {
2469                 netdev_info(priv->dev, "No Safety Features support found\n");
2470         }
2471 }
2472
2473 /**
2474  * stmmac_hw_setup - setup mac in a usable state.
2475  *  @dev : pointer to the device structure.
2476  *  Description:
2477  *  this is the main function to setup the HW in a usable state because the
2478  *  dma engine is reset, the core registers are configured (e.g. AXI,
2479  *  Checksum features, timers). The DMA is ready to start receiving and
2480  *  transmitting.
2481  *  Return value:
2482  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2483  *  file on failure.
2484  */
2485 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2486 {
2487         struct stmmac_priv *priv = netdev_priv(dev);
2488         u32 rx_cnt = priv->plat->rx_queues_to_use;
2489         u32 tx_cnt = priv->plat->tx_queues_to_use;
2490         u32 chan;
2491         int ret;
2492
2493         /* DMA initialization and SW reset */
2494         ret = stmmac_init_dma_engine(priv);
2495         if (ret < 0) {
2496                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2497                            __func__);
2498                 return ret;
2499         }
2500
2501         /* Copy the MAC addr into the HW  */
2502         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2503
2504         /* PS and related bits will be programmed according to the speed */
2505         if (priv->hw->pcs) {
2506                 int speed = priv->plat->mac_port_sel_speed;
2507
2508                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2509                     (speed == SPEED_1000)) {
2510                         priv->hw->ps = speed;
2511                 } else {
2512                         dev_warn(priv->device, "invalid port speed\n");
2513                         priv->hw->ps = 0;
2514                 }
2515         }
2516
2517         /* Initialize the MAC Core */
2518         stmmac_core_init(priv, priv->hw, dev);
2519
2520         /* Initialize MTL*/
2521         stmmac_mtl_configuration(priv);
2522
2523         /* Initialize Safety Features */
2524         stmmac_safety_feat_configuration(priv);
2525
2526         ret = stmmac_rx_ipc(priv, priv->hw);
2527         if (!ret) {
2528                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2529                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2530                 priv->hw->rx_csum = 0;
2531         }
2532
2533         /* Enable the MAC Rx/Tx */
2534         stmmac_mac_set(priv, priv->ioaddr, true);
2535
2536         /* Set the HW DMA mode and the COE */
2537         stmmac_dma_operation_mode(priv);
2538
2539         stmmac_mmc_setup(priv);
2540
2541         if (init_ptp) {
2542                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2543                 if (ret < 0)
2544                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2545
2546                 ret = stmmac_init_ptp(priv);
2547                 if (ret == -EOPNOTSUPP)
2548                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2549                 else if (ret)
2550                         netdev_warn(priv->dev, "PTP init failed\n");
2551         }
2552
2553         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2554
2555         if (priv->use_riwt) {
2556                 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2557                 if (!ret)
2558                         priv->rx_riwt = MAX_DMA_RIWT;
2559         }
2560
2561         if (priv->hw->pcs)
2562                 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2563
2564         /* set TX and RX rings length */
2565         stmmac_set_rings_length(priv);
2566
2567         /* Enable TSO */
2568         if (priv->tso) {
2569                 for (chan = 0; chan < tx_cnt; chan++)
2570                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2571         }
2572
2573         /* Start the ball rolling... */
2574         stmmac_start_all_dma(priv);
2575
2576         return 0;
2577 }
2578
2579 static void stmmac_hw_teardown(struct net_device *dev)
2580 {
2581         struct stmmac_priv *priv = netdev_priv(dev);
2582
2583         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2584 }
2585
2586 /**
2587  *  stmmac_open - open entry point of the driver
2588  *  @dev : pointer to the device structure.
2589  *  Description:
2590  *  This function is the open entry point of the driver.
2591  *  Return value:
2592  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2593  *  file on failure.
2594  */
2595 static int stmmac_open(struct net_device *dev)
2596 {
2597         struct stmmac_priv *priv = netdev_priv(dev);
2598         u32 chan;
2599         int ret;
2600
2601         stmmac_check_ether_addr(priv);
2602
2603         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2604             priv->hw->pcs != STMMAC_PCS_TBI &&
2605             priv->hw->pcs != STMMAC_PCS_RTBI) {
2606                 ret = stmmac_init_phy(dev);
2607                 if (ret) {
2608                         netdev_err(priv->dev,
2609                                    "%s: Cannot attach to PHY (error: %d)\n",
2610                                    __func__, ret);
2611                         return ret;
2612                 }
2613         }
2614
2615         /* Extra statistics */
2616         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2617         priv->xstats.threshold = tc;
2618
2619         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2620         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2621
2622         ret = alloc_dma_desc_resources(priv);
2623         if (ret < 0) {
2624                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2625                            __func__);
2626                 goto dma_desc_error;
2627         }
2628
2629         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2630         if (ret < 0) {
2631                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2632                            __func__);
2633                 goto init_error;
2634         }
2635
2636         ret = stmmac_hw_setup(dev, true);
2637         if (ret < 0) {
2638                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2639                 goto init_error;
2640         }
2641
2642         stmmac_init_tx_coalesce(priv);
2643
2644         if (dev->phydev)
2645                 phy_start(dev->phydev);
2646
2647         /* Request the IRQ lines */
2648         ret = request_irq(dev->irq, stmmac_interrupt,
2649                           IRQF_SHARED, dev->name, dev);
2650         if (unlikely(ret < 0)) {
2651                 netdev_err(priv->dev,
2652                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2653                            __func__, dev->irq, ret);
2654                 goto irq_error;
2655         }
2656
2657         /* Request the Wake IRQ in case of another line is used for WoL */
2658         if (priv->wol_irq != dev->irq) {
2659                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2660                                   IRQF_SHARED, dev->name, dev);
2661                 if (unlikely(ret < 0)) {
2662                         netdev_err(priv->dev,
2663                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2664                                    __func__, priv->wol_irq, ret);
2665                         goto wolirq_error;
2666                 }
2667         }
2668
2669         /* Request the IRQ lines */
2670         if (priv->lpi_irq > 0) {
2671                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2672                                   dev->name, dev);
2673                 if (unlikely(ret < 0)) {
2674                         netdev_err(priv->dev,
2675                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2676                                    __func__, priv->lpi_irq, ret);
2677                         goto lpiirq_error;
2678                 }
2679         }
2680
2681         stmmac_enable_all_queues(priv);
2682         stmmac_start_all_queues(priv);
2683
2684         return 0;
2685
2686 lpiirq_error:
2687         if (priv->wol_irq != dev->irq)
2688                 free_irq(priv->wol_irq, dev);
2689 wolirq_error:
2690         free_irq(dev->irq, dev);
2691 irq_error:
2692         if (dev->phydev)
2693                 phy_stop(dev->phydev);
2694
2695         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2696                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2697
2698         stmmac_hw_teardown(dev);
2699 init_error:
2700         free_dma_desc_resources(priv);
2701 dma_desc_error:
2702         if (dev->phydev)
2703                 phy_disconnect(dev->phydev);
2704
2705         return ret;
2706 }
2707
2708 /**
2709  *  stmmac_release - close entry point of the driver
2710  *  @dev : device pointer.
2711  *  Description:
2712  *  This is the stop entry point of the driver.
2713  */
2714 static int stmmac_release(struct net_device *dev)
2715 {
2716         struct stmmac_priv *priv = netdev_priv(dev);
2717         u32 chan;
2718
2719         if (priv->eee_enabled)
2720                 del_timer_sync(&priv->eee_ctrl_timer);
2721
2722         /* Stop and disconnect the PHY */
2723         if (dev->phydev) {
2724                 phy_stop(dev->phydev);
2725                 phy_disconnect(dev->phydev);
2726         }
2727
2728         stmmac_stop_all_queues(priv);
2729
2730         stmmac_disable_all_queues(priv);
2731
2732         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2733                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2734
2735         /* Free the IRQ lines */
2736         free_irq(dev->irq, dev);
2737         if (priv->wol_irq != dev->irq)
2738                 free_irq(priv->wol_irq, dev);
2739         if (priv->lpi_irq > 0)
2740                 free_irq(priv->lpi_irq, dev);
2741
2742         /* Stop TX/RX DMA and clear the descriptors */
2743         stmmac_stop_all_dma(priv);
2744
2745         /* Release and free the Rx/Tx resources */
2746         free_dma_desc_resources(priv);
2747
2748         /* Disable the MAC Rx/Tx */
2749         stmmac_mac_set(priv, priv->ioaddr, false);
2750
2751         netif_carrier_off(dev);
2752
2753         stmmac_release_ptp(priv);
2754
2755         return 0;
2756 }
2757
2758 /**
2759  *  stmmac_tso_allocator - close entry point of the driver
2760  *  @priv: driver private structure
2761  *  @des: buffer start address
2762  *  @total_len: total length to fill in descriptors
2763  *  @last_segmant: condition for the last descriptor
2764  *  @queue: TX queue index
2765  *  Description:
2766  *  This function fills descriptor and request new descriptors according to
2767  *  buffer length to fill
2768  */
2769 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2770                                  int total_len, bool last_segment, u32 queue)
2771 {
2772         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2773         struct dma_desc *desc;
2774         u32 buff_size;
2775         int tmp_len;
2776
2777         tmp_len = total_len;
2778
2779         while (tmp_len > 0) {
2780                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2781                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2782                 desc = tx_q->dma_tx + tx_q->cur_tx;
2783
2784                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2785                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2786                             TSO_MAX_BUFF_SIZE : tmp_len;
2787
2788                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2789                                 0, 1,
2790                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2791                                 0, 0);
2792
2793                 tmp_len -= TSO_MAX_BUFF_SIZE;
2794         }
2795 }
2796
2797 /**
2798  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2799  *  @skb : the socket buffer
2800  *  @dev : device pointer
2801  *  Description: this is the transmit function that is called on TSO frames
2802  *  (support available on GMAC4 and newer chips).
2803  *  Diagram below show the ring programming in case of TSO frames:
2804  *
2805  *  First Descriptor
2806  *   --------
2807  *   | DES0 |---> buffer1 = L2/L3/L4 header
2808  *   | DES1 |---> TCP Payload (can continue on next descr...)
2809  *   | DES2 |---> buffer 1 and 2 len
2810  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2811  *   --------
2812  *      |
2813  *     ...
2814  *      |
2815  *   --------
2816  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2817  *   | DES1 | --|
2818  *   | DES2 | --> buffer 1 and 2 len
2819  *   | DES3 |
2820  *   --------
2821  *
2822  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2823  */
2824 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2825 {
2826         struct dma_desc *desc, *first, *mss_desc = NULL;
2827         struct stmmac_priv *priv = netdev_priv(dev);
2828         int nfrags = skb_shinfo(skb)->nr_frags;
2829         u32 queue = skb_get_queue_mapping(skb);
2830         unsigned int first_entry, des;
2831         struct stmmac_tx_queue *tx_q;
2832         int tmp_pay_len = 0;
2833         u32 pay_len, mss;
2834         u8 proto_hdr_len;
2835         int i;
2836
2837         tx_q = &priv->tx_queue[queue];
2838
2839         /* Compute header lengths */
2840         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2841
2842         /* Desc availability based on threshold should be enough safe */
2843         if (unlikely(stmmac_tx_avail(priv, queue) <
2844                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2845                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2846                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2847                                                                 queue));
2848                         /* This is a hard error, log it. */
2849                         netdev_err(priv->dev,
2850                                    "%s: Tx Ring full when queue awake\n",
2851                                    __func__);
2852                 }
2853                 return NETDEV_TX_BUSY;
2854         }
2855
2856         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2857
2858         mss = skb_shinfo(skb)->gso_size;
2859
2860         /* set new MSS value if needed */
2861         if (mss != tx_q->mss) {
2862                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2863                 stmmac_set_mss(priv, mss_desc, mss);
2864                 tx_q->mss = mss;
2865                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2866                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2867         }
2868
2869         if (netif_msg_tx_queued(priv)) {
2870                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2871                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2872                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2873                         skb->data_len);
2874         }
2875
2876         first_entry = tx_q->cur_tx;
2877         WARN_ON(tx_q->tx_skbuff[first_entry]);
2878
2879         desc = tx_q->dma_tx + first_entry;
2880         first = desc;
2881
2882         /* first descriptor: fill Headers on Buf1 */
2883         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2884                              DMA_TO_DEVICE);
2885         if (dma_mapping_error(priv->device, des))
2886                 goto dma_map_err;
2887
2888         tx_q->tx_skbuff_dma[first_entry].buf = des;
2889         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2890
2891         first->des0 = cpu_to_le32(des);
2892
2893         /* Fill start of payload in buff2 of first descriptor */
2894         if (pay_len)
2895                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2896
2897         /* If needed take extra descriptors to fill the remaining payload */
2898         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2899
2900         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2901
2902         /* Prepare fragments */
2903         for (i = 0; i < nfrags; i++) {
2904                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2905
2906                 des = skb_frag_dma_map(priv->device, frag, 0,
2907                                        skb_frag_size(frag),
2908                                        DMA_TO_DEVICE);
2909                 if (dma_mapping_error(priv->device, des))
2910                         goto dma_map_err;
2911
2912                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2913                                      (i == nfrags - 1), queue);
2914
2915                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2916                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2917                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2918         }
2919
2920         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2921
2922         /* Only the last descriptor gets to point to the skb. */
2923         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2924
2925         /* We've used all descriptors we need for this skb, however,
2926          * advance cur_tx so that it references a fresh descriptor.
2927          * ndo_start_xmit will fill this descriptor the next time it's
2928          * called and stmmac_tx_clean may clean up to this descriptor.
2929          */
2930         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2931
2932         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2933                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2934                           __func__);
2935                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2936         }
2937
2938         dev->stats.tx_bytes += skb->len;
2939         priv->xstats.tx_tso_frames++;
2940         priv->xstats.tx_tso_nfrags += nfrags;
2941
2942         /* Manage tx mitigation */
2943         tx_q->tx_count_frames += nfrags + 1;
2944         if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
2945                 stmmac_set_tx_ic(priv, desc);
2946                 priv->xstats.tx_set_ic_bit++;
2947                 tx_q->tx_count_frames = 0;
2948         } else {
2949                 stmmac_tx_timer_arm(priv, queue);
2950         }
2951
2952         skb_tx_timestamp(skb);
2953
2954         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2955                      priv->hwts_tx_en)) {
2956                 /* declare that device is doing timestamping */
2957                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2958                 stmmac_enable_tx_timestamp(priv, first);
2959         }
2960
2961         /* Complete the first descriptor before granting the DMA */
2962         stmmac_prepare_tso_tx_desc(priv, first, 1,
2963                         proto_hdr_len,
2964                         pay_len,
2965                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2966                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2967
2968         /* If context desc is used to change MSS */
2969         if (mss_desc) {
2970                 /* Make sure that first descriptor has been completely
2971                  * written, including its own bit. This is because MSS is
2972                  * actually before first descriptor, so we need to make
2973                  * sure that MSS's own bit is the last thing written.
2974                  */
2975                 dma_wmb();
2976                 stmmac_set_tx_owner(priv, mss_desc);
2977         }
2978
2979         /* The own bit must be the latest setting done when prepare the
2980          * descriptor and then barrier is needed to make sure that
2981          * all is coherent before granting the DMA engine.
2982          */
2983         wmb();
2984
2985         if (netif_msg_pktdata(priv)) {
2986                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2987                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2988                         tx_q->cur_tx, first, nfrags);
2989
2990                 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2991
2992                 pr_info(">>> frame to be transmitted: ");
2993                 print_pkt(skb->data, skb_headlen(skb));
2994         }
2995
2996         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2997
2998         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
2999         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3000
3001         return NETDEV_TX_OK;
3002
3003 dma_map_err:
3004         dev_err(priv->device, "Tx dma map failed\n");
3005         dev_kfree_skb(skb);
3006         priv->dev->stats.tx_dropped++;
3007         return NETDEV_TX_OK;
3008 }
3009
3010 /**
3011  *  stmmac_xmit - Tx entry point of the driver
3012  *  @skb : the socket buffer
3013  *  @dev : device pointer
3014  *  Description : this is the tx entry point of the driver.
3015  *  It programs the chain or the ring and supports oversized frames
3016  *  and SG feature.
3017  */
3018 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3019 {
3020         struct stmmac_priv *priv = netdev_priv(dev);
3021         unsigned int nopaged_len = skb_headlen(skb);
3022         int i, csum_insertion = 0, is_jumbo = 0;
3023         u32 queue = skb_get_queue_mapping(skb);
3024         int nfrags = skb_shinfo(skb)->nr_frags;
3025         int entry;
3026         unsigned int first_entry;
3027         struct dma_desc *desc, *first;
3028         struct stmmac_tx_queue *tx_q;
3029         unsigned int enh_desc;
3030         unsigned int des;
3031
3032         tx_q = &priv->tx_queue[queue];
3033
3034         /* Manage oversized TCP frames for GMAC4 device */
3035         if (skb_is_gso(skb) && priv->tso) {
3036                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3037                         return stmmac_tso_xmit(skb, dev);
3038         }
3039
3040         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3041                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3042                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3043                                                                 queue));
3044                         /* This is a hard error, log it. */
3045                         netdev_err(priv->dev,
3046                                    "%s: Tx Ring full when queue awake\n",
3047                                    __func__);
3048                 }
3049                 return NETDEV_TX_BUSY;
3050         }
3051
3052         if (priv->tx_path_in_lpi_mode)
3053                 stmmac_disable_eee_mode(priv);
3054
3055         entry = tx_q->cur_tx;
3056         first_entry = entry;
3057         WARN_ON(tx_q->tx_skbuff[first_entry]);
3058
3059         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3060
3061         if (likely(priv->extend_desc))
3062                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3063         else
3064                 desc = tx_q->dma_tx + entry;
3065
3066         first = desc;
3067
3068         enh_desc = priv->plat->enh_desc;
3069         /* To program the descriptors according to the size of the frame */
3070         if (enh_desc)
3071                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3072
3073         if (unlikely(is_jumbo)) {
3074                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3075                 if (unlikely(entry < 0) && (entry != -EINVAL))
3076                         goto dma_map_err;
3077         }
3078
3079         for (i = 0; i < nfrags; i++) {
3080                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3081                 int len = skb_frag_size(frag);
3082                 bool last_segment = (i == (nfrags - 1));
3083
3084                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3085                 WARN_ON(tx_q->tx_skbuff[entry]);
3086
3087                 if (likely(priv->extend_desc))
3088                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3089                 else
3090                         desc = tx_q->dma_tx + entry;
3091
3092                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3093                                        DMA_TO_DEVICE);
3094                 if (dma_mapping_error(priv->device, des))
3095                         goto dma_map_err; /* should reuse desc w/o issues */
3096
3097                 tx_q->tx_skbuff_dma[entry].buf = des;
3098
3099                 stmmac_set_desc_addr(priv, desc, des);
3100
3101                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3102                 tx_q->tx_skbuff_dma[entry].len = len;
3103                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3104
3105                 /* Prepare the descriptor and set the own bit too */
3106                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3107                                 priv->mode, 1, last_segment, skb->len);
3108         }
3109
3110         /* Only the last descriptor gets to point to the skb. */
3111         tx_q->tx_skbuff[entry] = skb;
3112
3113         /* We've used all descriptors we need for this skb, however,
3114          * advance cur_tx so that it references a fresh descriptor.
3115          * ndo_start_xmit will fill this descriptor the next time it's
3116          * called and stmmac_tx_clean may clean up to this descriptor.
3117          */
3118         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3119         tx_q->cur_tx = entry;
3120
3121         if (netif_msg_pktdata(priv)) {
3122                 void *tx_head;
3123
3124                 netdev_dbg(priv->dev,
3125                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3126                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3127                            entry, first, nfrags);
3128
3129                 if (priv->extend_desc)
3130                         tx_head = (void *)tx_q->dma_etx;
3131                 else
3132                         tx_head = (void *)tx_q->dma_tx;
3133
3134                 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3135
3136                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3137                 print_pkt(skb->data, skb->len);
3138         }
3139
3140         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3141                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3142                           __func__);
3143                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3144         }
3145
3146         dev->stats.tx_bytes += skb->len;
3147
3148         /* According to the coalesce parameter the IC bit for the latest
3149          * segment is reset and the timer re-started to clean the tx status.
3150          * This approach takes care about the fragments: desc is the first
3151          * element in case of no SG.
3152          */
3153         tx_q->tx_count_frames += nfrags + 1;
3154         if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3155                 stmmac_set_tx_ic(priv, desc);
3156                 priv->xstats.tx_set_ic_bit++;
3157                 tx_q->tx_count_frames = 0;
3158         } else {
3159                 stmmac_tx_timer_arm(priv, queue);
3160         }
3161
3162         skb_tx_timestamp(skb);
3163
3164         /* Ready to fill the first descriptor and set the OWN bit w/o any
3165          * problems because all the descriptors are actually ready to be
3166          * passed to the DMA engine.
3167          */
3168         if (likely(!is_jumbo)) {
3169                 bool last_segment = (nfrags == 0);
3170
3171                 des = dma_map_single(priv->device, skb->data,
3172                                      nopaged_len, DMA_TO_DEVICE);
3173                 if (dma_mapping_error(priv->device, des))
3174                         goto dma_map_err;
3175
3176                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3177
3178                 stmmac_set_desc_addr(priv, first, des);
3179
3180                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3181                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3182
3183                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3184                              priv->hwts_tx_en)) {
3185                         /* declare that device is doing timestamping */
3186                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3187                         stmmac_enable_tx_timestamp(priv, first);
3188                 }
3189
3190                 /* Prepare the first descriptor setting the OWN bit too */
3191                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3192                                 csum_insertion, priv->mode, 1, last_segment,
3193                                 skb->len);
3194
3195                 /* The own bit must be the latest setting done when prepare the
3196                  * descriptor and then barrier is needed to make sure that
3197                  * all is coherent before granting the DMA engine.
3198                  */
3199                 wmb();
3200         }
3201
3202         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3203
3204         stmmac_enable_dma_transmission(priv, priv->ioaddr);
3205
3206         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3207         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3208
3209         return NETDEV_TX_OK;
3210
3211 dma_map_err:
3212         netdev_err(priv->dev, "Tx DMA map failed\n");
3213         dev_kfree_skb(skb);
3214         priv->dev->stats.tx_dropped++;
3215         return NETDEV_TX_OK;
3216 }
3217
3218 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3219 {
3220         struct vlan_ethhdr *veth;
3221         __be16 vlan_proto;
3222         u16 vlanid;
3223
3224         veth = (struct vlan_ethhdr *)skb->data;
3225         vlan_proto = veth->h_vlan_proto;
3226
3227         if ((vlan_proto == htons(ETH_P_8021Q) &&
3228              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3229             (vlan_proto == htons(ETH_P_8021AD) &&
3230              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3231                 /* pop the vlan tag */
3232                 vlanid = ntohs(veth->h_vlan_TCI);
3233                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3234                 skb_pull(skb, VLAN_HLEN);
3235                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3236         }
3237 }
3238
3239
3240 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3241 {
3242         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3243                 return 0;
3244
3245         return 1;
3246 }
3247
3248 /**
3249  * stmmac_rx_refill - refill used skb preallocated buffers
3250  * @priv: driver private structure
3251  * @queue: RX queue index
3252  * Description : this is to reallocate the skb for the reception process
3253  * that is based on zero-copy.
3254  */
3255 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3256 {
3257         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3258         int dirty = stmmac_rx_dirty(priv, queue);
3259         unsigned int entry = rx_q->dirty_rx;
3260
3261         int bfsize = priv->dma_buf_sz;
3262
3263         while (dirty-- > 0) {
3264                 struct dma_desc *p;
3265
3266                 if (priv->extend_desc)
3267                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3268                 else
3269                         p = rx_q->dma_rx + entry;
3270
3271                 if (likely(!rx_q->rx_skbuff[entry])) {
3272                         struct sk_buff *skb;
3273
3274                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3275                         if (unlikely(!skb)) {
3276                                 /* so for a while no zero-copy! */
3277                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3278                                 if (unlikely(net_ratelimit()))
3279                                         dev_err(priv->device,
3280                                                 "fail to alloc skb entry %d\n",
3281                                                 entry);
3282                                 break;
3283                         }
3284
3285                         rx_q->rx_skbuff[entry] = skb;
3286                         rx_q->rx_skbuff_dma[entry] =
3287                             dma_map_single(priv->device, skb->data, bfsize,
3288                                            DMA_FROM_DEVICE);
3289                         if (dma_mapping_error(priv->device,
3290                                               rx_q->rx_skbuff_dma[entry])) {
3291                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3292                                 dev_kfree_skb(skb);
3293                                 break;
3294                         }
3295
3296                         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3297                         stmmac_refill_desc3(priv, rx_q, p);
3298
3299                         if (rx_q->rx_zeroc_thresh > 0)
3300                                 rx_q->rx_zeroc_thresh--;
3301
3302                         netif_dbg(priv, rx_status, priv->dev,
3303                                   "refill entry #%d\n", entry);
3304                 }
3305                 dma_wmb();
3306
3307                 stmmac_set_rx_owner(priv, p, priv->use_riwt);
3308
3309                 dma_wmb();
3310
3311                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3312         }
3313         rx_q->dirty_rx = entry;
3314 }
3315
3316 /**
3317  * stmmac_rx - manage the receive process
3318  * @priv: driver private structure
3319  * @limit: napi bugget
3320  * @queue: RX queue index.
3321  * Description :  this the function called by the napi poll method.
3322  * It gets all the frames inside the ring.
3323  */
3324 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3325 {
3326         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3327         struct stmmac_channel *ch = &priv->channel[queue];
3328         unsigned int entry = rx_q->cur_rx;
3329         int coe = priv->hw->rx_csum;
3330         unsigned int next_entry;
3331         unsigned int count = 0;
3332         bool xmac;
3333
3334         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3335
3336         if (netif_msg_rx_status(priv)) {
3337                 void *rx_head;
3338
3339                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3340                 if (priv->extend_desc)
3341                         rx_head = (void *)rx_q->dma_erx;
3342                 else
3343                         rx_head = (void *)rx_q->dma_rx;
3344
3345                 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3346         }
3347         while (count < limit) {
3348                 int status;
3349                 struct dma_desc *p;
3350                 struct dma_desc *np;
3351
3352                 if (priv->extend_desc)
3353                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3354                 else
3355                         p = rx_q->dma_rx + entry;
3356
3357                 /* read the status of the incoming frame */
3358                 status = stmmac_rx_status(priv, &priv->dev->stats,
3359                                 &priv->xstats, p);
3360                 /* check if managed by the DMA otherwise go ahead */
3361                 if (unlikely(status & dma_own))
3362                         break;
3363
3364                 count++;
3365
3366                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3367                 next_entry = rx_q->cur_rx;
3368
3369                 if (priv->extend_desc)
3370                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3371                 else
3372                         np = rx_q->dma_rx + next_entry;
3373
3374                 prefetch(np);
3375
3376                 if (priv->extend_desc)
3377                         stmmac_rx_extended_status(priv, &priv->dev->stats,
3378                                         &priv->xstats, rx_q->dma_erx + entry);
3379                 if (unlikely(status == discard_frame)) {
3380                         priv->dev->stats.rx_errors++;
3381                         if (priv->hwts_rx_en && !priv->extend_desc) {
3382                                 /* DESC2 & DESC3 will be overwritten by device
3383                                  * with timestamp value, hence reinitialize
3384                                  * them in stmmac_rx_refill() function so that
3385                                  * device can reuse it.
3386                                  */
3387                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3388                                 rx_q->rx_skbuff[entry] = NULL;
3389                                 dma_unmap_single(priv->device,
3390                                                  rx_q->rx_skbuff_dma[entry],
3391                                                  priv->dma_buf_sz,
3392                                                  DMA_FROM_DEVICE);
3393                         }
3394                 } else {
3395                         struct sk_buff *skb;
3396                         int frame_len;
3397                         unsigned int des;
3398
3399                         stmmac_get_desc_addr(priv, p, &des);
3400                         frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3401
3402                         /*  If frame length is greater than skb buffer size
3403                          *  (preallocated during init) then the packet is
3404                          *  ignored
3405                          */
3406                         if (frame_len > priv->dma_buf_sz) {
3407                                 netdev_err(priv->dev,
3408                                            "len %d larger than size (%d)\n",
3409                                            frame_len, priv->dma_buf_sz);
3410                                 priv->dev->stats.rx_length_errors++;
3411                                 break;
3412                         }
3413
3414                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3415                          * Type frames (LLC/LLC-SNAP)
3416                          *
3417                          * llc_snap is never checked in GMAC >= 4, so this ACS
3418                          * feature is always disabled and packets need to be
3419                          * stripped manually.
3420                          */
3421                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3422                             unlikely(status != llc_snap))
3423                                 frame_len -= ETH_FCS_LEN;
3424
3425                         if (netif_msg_rx_status(priv)) {
3426                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3427                                            p, entry, des);
3428                                 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3429                                            frame_len, status);
3430                         }
3431
3432                         /* The zero-copy is always used for all the sizes
3433                          * in case of GMAC4 because it needs
3434                          * to refill the used descriptors, always.
3435                          */
3436                         if (unlikely(!xmac &&
3437                                      ((frame_len < priv->rx_copybreak) ||
3438                                      stmmac_rx_threshold_count(rx_q)))) {
3439                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3440                                                                 frame_len);
3441                                 if (unlikely(!skb)) {
3442                                         if (net_ratelimit())
3443                                                 dev_warn(priv->device,
3444                                                          "packet dropped\n");
3445                                         priv->dev->stats.rx_dropped++;
3446                                         break;
3447                                 }
3448
3449                                 dma_sync_single_for_cpu(priv->device,
3450                                                         rx_q->rx_skbuff_dma
3451                                                         [entry], frame_len,
3452                                                         DMA_FROM_DEVICE);
3453                                 skb_copy_to_linear_data(skb,
3454                                                         rx_q->
3455                                                         rx_skbuff[entry]->data,
3456                                                         frame_len);
3457
3458                                 skb_put(skb, frame_len);
3459                                 dma_sync_single_for_device(priv->device,
3460                                                            rx_q->rx_skbuff_dma
3461                                                            [entry], frame_len,
3462                                                            DMA_FROM_DEVICE);
3463                         } else {
3464                                 skb = rx_q->rx_skbuff[entry];
3465                                 if (unlikely(!skb)) {
3466                                         netdev_err(priv->dev,
3467                                                    "%s: Inconsistent Rx chain\n",
3468                                                    priv->dev->name);
3469                                         priv->dev->stats.rx_dropped++;
3470                                         break;
3471                                 }
3472                                 prefetch(skb->data - NET_IP_ALIGN);
3473                                 rx_q->rx_skbuff[entry] = NULL;
3474                                 rx_q->rx_zeroc_thresh++;
3475
3476                                 skb_put(skb, frame_len);
3477                                 dma_unmap_single(priv->device,
3478                                                  rx_q->rx_skbuff_dma[entry],
3479                                                  priv->dma_buf_sz,
3480                                                  DMA_FROM_DEVICE);
3481                         }
3482
3483                         if (netif_msg_pktdata(priv)) {
3484                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3485                                            frame_len);
3486                                 print_pkt(skb->data, frame_len);
3487                         }
3488
3489                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3490
3491                         stmmac_rx_vlan(priv->dev, skb);
3492
3493                         skb->protocol = eth_type_trans(skb, priv->dev);
3494
3495                         if (unlikely(!coe))
3496                                 skb_checksum_none_assert(skb);
3497                         else
3498                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3499
3500                         napi_gro_receive(&ch->napi, skb);
3501
3502                         priv->dev->stats.rx_packets++;
3503                         priv->dev->stats.rx_bytes += frame_len;
3504                 }
3505                 entry = next_entry;
3506         }
3507
3508         stmmac_rx_refill(priv, queue);
3509
3510         priv->xstats.rx_pkt_n += count;
3511
3512         return count;
3513 }
3514
3515 /**
3516  *  stmmac_poll - stmmac poll method (NAPI)
3517  *  @napi : pointer to the napi structure.
3518  *  @budget : maximum number of packets that the current CPU can receive from
3519  *            all interfaces.
3520  *  Description :
3521  *  To look at the incoming frames and clear the tx resources.
3522  */
3523 static int stmmac_napi_poll(struct napi_struct *napi, int budget)
3524 {
3525         struct stmmac_channel *ch =
3526                 container_of(napi, struct stmmac_channel, napi);
3527         struct stmmac_priv *priv = ch->priv_data;
3528         int work_done = 0, work_rem = budget;
3529         u32 chan = ch->index;
3530
3531         priv->xstats.napi_poll++;
3532
3533         if (ch->has_tx) {
3534                 int done = stmmac_tx_clean(priv, work_rem, chan);
3535
3536                 work_done += done;
3537                 work_rem -= done;
3538         }
3539
3540         if (ch->has_rx) {
3541                 int done = stmmac_rx(priv, work_rem, chan);
3542
3543                 work_done += done;
3544                 work_rem -= done;
3545         }
3546
3547         if (work_done < budget && napi_complete_done(napi, work_done))
3548                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3549
3550         return work_done;
3551 }
3552
3553 /**
3554  *  stmmac_tx_timeout
3555  *  @dev : Pointer to net device structure
3556  *  Description: this function is called when a packet transmission fails to
3557  *   complete within a reasonable time. The driver will mark the error in the
3558  *   netdev structure and arrange for the device to be reset to a sane state
3559  *   in order to transmit a new packet.
3560  */
3561 static void stmmac_tx_timeout(struct net_device *dev)
3562 {
3563         struct stmmac_priv *priv = netdev_priv(dev);
3564
3565         stmmac_global_err(priv);
3566 }
3567
3568 /**
3569  *  stmmac_set_rx_mode - entry point for multicast addressing
3570  *  @dev : pointer to the device structure
3571  *  Description:
3572  *  This function is a driver entry point which gets called by the kernel
3573  *  whenever multicast addresses must be enabled/disabled.
3574  *  Return value:
3575  *  void.
3576  */
3577 static void stmmac_set_rx_mode(struct net_device *dev)
3578 {
3579         struct stmmac_priv *priv = netdev_priv(dev);
3580
3581         stmmac_set_filter(priv, priv->hw, dev);
3582 }
3583
3584 /**
3585  *  stmmac_change_mtu - entry point to change MTU size for the device.
3586  *  @dev : device pointer.
3587  *  @new_mtu : the new MTU size for the device.
3588  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3589  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3590  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3591  *  Return value:
3592  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3593  *  file on failure.
3594  */
3595 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3596 {
3597         struct stmmac_priv *priv = netdev_priv(dev);
3598
3599         if (netif_running(dev)) {
3600                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3601                 return -EBUSY;
3602         }
3603
3604         dev->mtu = new_mtu;
3605
3606         netdev_update_features(dev);
3607
3608         return 0;
3609 }
3610
3611 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3612                                              netdev_features_t features)
3613 {
3614         struct stmmac_priv *priv = netdev_priv(dev);
3615
3616         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3617                 features &= ~NETIF_F_RXCSUM;
3618
3619         if (!priv->plat->tx_coe)
3620                 features &= ~NETIF_F_CSUM_MASK;
3621
3622         /* Some GMAC devices have a bugged Jumbo frame support that
3623          * needs to have the Tx COE disabled for oversized frames
3624          * (due to limited buffer sizes). In this case we disable
3625          * the TX csum insertion in the TDES and not use SF.
3626          */
3627         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3628                 features &= ~NETIF_F_CSUM_MASK;
3629
3630         /* Disable tso if asked by ethtool */
3631         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3632                 if (features & NETIF_F_TSO)
3633                         priv->tso = true;
3634                 else
3635                         priv->tso = false;
3636         }
3637
3638         return features;
3639 }
3640
3641 static int stmmac_set_features(struct net_device *netdev,
3642                                netdev_features_t features)
3643 {
3644         struct stmmac_priv *priv = netdev_priv(netdev);
3645
3646         /* Keep the COE Type in case of csum is supporting */
3647         if (features & NETIF_F_RXCSUM)
3648                 priv->hw->rx_csum = priv->plat->rx_coe;
3649         else
3650                 priv->hw->rx_csum = 0;
3651         /* No check needed because rx_coe has been set before and it will be
3652          * fixed in case of issue.
3653          */
3654         stmmac_rx_ipc(priv, priv->hw);
3655
3656         return 0;
3657 }
3658
3659 /**
3660  *  stmmac_interrupt - main ISR
3661  *  @irq: interrupt number.
3662  *  @dev_id: to pass the net device pointer.
3663  *  Description: this is the main driver interrupt service routine.
3664  *  It can call:
3665  *  o DMA service routine (to manage incoming frame reception and transmission
3666  *    status)
3667  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3668  *    interrupts.
3669  */
3670 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3671 {
3672         struct net_device *dev = (struct net_device *)dev_id;
3673         struct stmmac_priv *priv = netdev_priv(dev);
3674         u32 rx_cnt = priv->plat->rx_queues_to_use;
3675         u32 tx_cnt = priv->plat->tx_queues_to_use;
3676         u32 queues_count;
3677         u32 queue;
3678         bool xmac;
3679
3680         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3681         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3682
3683         if (priv->irq_wake)
3684                 pm_wakeup_event(priv->device, 0);
3685
3686         if (unlikely(!dev)) {
3687                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3688                 return IRQ_NONE;
3689         }
3690
3691         /* Check if adapter is up */
3692         if (test_bit(STMMAC_DOWN, &priv->state))
3693                 return IRQ_HANDLED;
3694         /* Check if a fatal error happened */
3695         if (stmmac_safety_feat_interrupt(priv))
3696                 return IRQ_HANDLED;
3697
3698         /* To handle GMAC own interrupts */
3699         if ((priv->plat->has_gmac) || xmac) {
3700                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3701                 int mtl_status;
3702
3703                 if (unlikely(status)) {
3704                         /* For LPI we need to save the tx status */
3705                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3706                                 priv->tx_path_in_lpi_mode = true;
3707                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3708                                 priv->tx_path_in_lpi_mode = false;
3709                 }
3710
3711                 for (queue = 0; queue < queues_count; queue++) {
3712                         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3713
3714                         mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3715                                                                 queue);
3716                         if (mtl_status != -EINVAL)
3717                                 status |= mtl_status;
3718
3719                         if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3720                                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3721                                                        rx_q->rx_tail_addr,
3722                                                        queue);
3723                 }
3724
3725                 /* PCS link status */
3726                 if (priv->hw->pcs) {
3727                         if (priv->xstats.pcs_link)
3728                                 netif_carrier_on(dev);
3729                         else
3730                                 netif_carrier_off(dev);
3731                 }
3732         }
3733
3734         /* To handle DMA interrupts */
3735         stmmac_dma_interrupt(priv);
3736
3737         return IRQ_HANDLED;
3738 }
3739
3740 #ifdef CONFIG_NET_POLL_CONTROLLER
3741 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3742  * to allow network I/O with interrupts disabled.
3743  */
3744 static void stmmac_poll_controller(struct net_device *dev)
3745 {
3746         disable_irq(dev->irq);
3747         stmmac_interrupt(dev->irq, dev);
3748         enable_irq(dev->irq);
3749 }
3750 #endif
3751
3752 /**
3753  *  stmmac_ioctl - Entry point for the Ioctl
3754  *  @dev: Device pointer.
3755  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3756  *  a proprietary structure used to pass information to the driver.
3757  *  @cmd: IOCTL command
3758  *  Description:
3759  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3760  */
3761 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3762 {
3763         int ret = -EOPNOTSUPP;
3764
3765         if (!netif_running(dev))
3766                 return -EINVAL;
3767
3768         switch (cmd) {
3769         case SIOCGMIIPHY:
3770         case SIOCGMIIREG:
3771         case SIOCSMIIREG:
3772                 if (!dev->phydev)
3773                         return -EINVAL;
3774                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3775                 break;
3776         case SIOCSHWTSTAMP:
3777                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3778                 break;
3779         default:
3780                 break;
3781         }
3782
3783         return ret;
3784 }
3785
3786 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3787                                     void *cb_priv)
3788 {
3789         struct stmmac_priv *priv = cb_priv;
3790         int ret = -EOPNOTSUPP;
3791
3792         stmmac_disable_all_queues(priv);
3793
3794         switch (type) {
3795         case TC_SETUP_CLSU32:
3796                 if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3797                         ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3798                 break;
3799         default:
3800                 break;
3801         }
3802
3803         stmmac_enable_all_queues(priv);
3804         return ret;
3805 }
3806
3807 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3808                                  struct tc_block_offload *f)
3809 {
3810         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3811                 return -EOPNOTSUPP;
3812
3813         switch (f->command) {
3814         case TC_BLOCK_BIND:
3815                 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3816                                 priv, priv, f->extack);
3817         case TC_BLOCK_UNBIND:
3818                 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3819                 return 0;
3820         default:
3821                 return -EOPNOTSUPP;
3822         }
3823 }
3824
3825 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3826                            void *type_data)
3827 {
3828         struct stmmac_priv *priv = netdev_priv(ndev);
3829
3830         switch (type) {
3831         case TC_SETUP_BLOCK:
3832                 return stmmac_setup_tc_block(priv, type_data);
3833         case TC_SETUP_QDISC_CBS:
3834                 return stmmac_tc_setup_cbs(priv, priv, type_data);
3835         default:
3836                 return -EOPNOTSUPP;
3837         }
3838 }
3839
3840 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3841 {
3842         struct stmmac_priv *priv = netdev_priv(ndev);
3843         int ret = 0;
3844
3845         ret = eth_mac_addr(ndev, addr);
3846         if (ret)
3847                 return ret;
3848
3849         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3850
3851         return ret;
3852 }
3853
3854 #ifdef CONFIG_DEBUG_FS
3855 static struct dentry *stmmac_fs_dir;
3856
3857 static void sysfs_display_ring(void *head, int size, int extend_desc,
3858                                struct seq_file *seq)
3859 {
3860         int i;
3861         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3862         struct dma_desc *p = (struct dma_desc *)head;
3863
3864         for (i = 0; i < size; i++) {
3865                 if (extend_desc) {
3866                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3867                                    i, (unsigned int)virt_to_phys(ep),
3868                                    le32_to_cpu(ep->basic.des0),
3869                                    le32_to_cpu(ep->basic.des1),
3870                                    le32_to_cpu(ep->basic.des2),
3871                                    le32_to_cpu(ep->basic.des3));
3872                         ep++;
3873                 } else {
3874                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3875                                    i, (unsigned int)virt_to_phys(p),
3876                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3877                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3878                         p++;
3879                 }
3880                 seq_printf(seq, "\n");
3881         }
3882 }
3883
3884 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
3885 {
3886         struct net_device *dev = seq->private;
3887         struct stmmac_priv *priv = netdev_priv(dev);
3888         u32 rx_count = priv->plat->rx_queues_to_use;
3889         u32 tx_count = priv->plat->tx_queues_to_use;
3890         u32 queue;
3891
3892         if ((dev->flags & IFF_UP) == 0)
3893                 return 0;
3894
3895         for (queue = 0; queue < rx_count; queue++) {
3896                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3897
3898                 seq_printf(seq, "RX Queue %d:\n", queue);
3899
3900                 if (priv->extend_desc) {
3901                         seq_printf(seq, "Extended descriptor ring:\n");
3902                         sysfs_display_ring((void *)rx_q->dma_erx,
3903                                            DMA_RX_SIZE, 1, seq);
3904                 } else {
3905                         seq_printf(seq, "Descriptor ring:\n");
3906                         sysfs_display_ring((void *)rx_q->dma_rx,
3907                                            DMA_RX_SIZE, 0, seq);
3908                 }
3909         }
3910
3911         for (queue = 0; queue < tx_count; queue++) {
3912                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3913
3914                 seq_printf(seq, "TX Queue %d:\n", queue);
3915
3916                 if (priv->extend_desc) {
3917                         seq_printf(seq, "Extended descriptor ring:\n");
3918                         sysfs_display_ring((void *)tx_q->dma_etx,
3919                                            DMA_TX_SIZE, 1, seq);
3920                 } else {
3921                         seq_printf(seq, "Descriptor ring:\n");
3922                         sysfs_display_ring((void *)tx_q->dma_tx,
3923                                            DMA_TX_SIZE, 0, seq);
3924                 }
3925         }
3926
3927         return 0;
3928 }
3929 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
3930
3931 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
3932 {
3933         struct net_device *dev = seq->private;
3934         struct stmmac_priv *priv = netdev_priv(dev);
3935
3936         if (!priv->hw_cap_support) {
3937                 seq_printf(seq, "DMA HW features not supported\n");
3938                 return 0;
3939         }
3940
3941         seq_printf(seq, "==============================\n");
3942         seq_printf(seq, "\tDMA HW features\n");
3943         seq_printf(seq, "==============================\n");
3944
3945         seq_printf(seq, "\t10/100 Mbps: %s\n",
3946                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3947         seq_printf(seq, "\t1000 Mbps: %s\n",
3948                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3949         seq_printf(seq, "\tHalf duplex: %s\n",
3950                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3951         seq_printf(seq, "\tHash Filter: %s\n",
3952                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3953         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3954                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3955         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3956                    (priv->dma_cap.pcs) ? "Y" : "N");
3957         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3958                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3959         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3960                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3961         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3962                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3963         seq_printf(seq, "\tRMON module: %s\n",
3964                    (priv->dma_cap.rmon) ? "Y" : "N");
3965         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3966                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3967         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3968                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3969         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3970                    (priv->dma_cap.eee) ? "Y" : "N");
3971         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3972         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3973                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3974         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3975                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3976                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3977         } else {
3978                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3979                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3980                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3981                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3982         }
3983         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3984                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3985         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3986                    priv->dma_cap.number_rx_channel);
3987         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3988                    priv->dma_cap.number_tx_channel);
3989         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3990                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3991
3992         return 0;
3993 }
3994 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
3995
3996 static int stmmac_init_fs(struct net_device *dev)
3997 {
3998         struct stmmac_priv *priv = netdev_priv(dev);
3999
4000         /* Create per netdev entries */
4001         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4002
4003         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4004                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4005
4006                 return -ENOMEM;
4007         }
4008
4009         /* Entry to report DMA RX/TX rings */
4010         priv->dbgfs_rings_status =
4011                 debugfs_create_file("descriptors_status", 0444,
4012                                     priv->dbgfs_dir, dev,
4013                                     &stmmac_rings_status_fops);
4014
4015         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4016                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4017                 debugfs_remove_recursive(priv->dbgfs_dir);
4018
4019                 return -ENOMEM;
4020         }
4021
4022         /* Entry to report the DMA HW features */
4023         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4024                                                   priv->dbgfs_dir,
4025                                                   dev, &stmmac_dma_cap_fops);
4026
4027         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4028                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4029                 debugfs_remove_recursive(priv->dbgfs_dir);
4030
4031                 return -ENOMEM;
4032         }
4033
4034         return 0;
4035 }
4036
4037 static void stmmac_exit_fs(struct net_device *dev)
4038 {
4039         struct stmmac_priv *priv = netdev_priv(dev);
4040
4041         debugfs_remove_recursive(priv->dbgfs_dir);
4042 }
4043 #endif /* CONFIG_DEBUG_FS */
4044
4045 static const struct net_device_ops stmmac_netdev_ops = {
4046         .ndo_open = stmmac_open,
4047         .ndo_start_xmit = stmmac_xmit,
4048         .ndo_stop = stmmac_release,
4049         .ndo_change_mtu = stmmac_change_mtu,
4050         .ndo_fix_features = stmmac_fix_features,
4051         .ndo_set_features = stmmac_set_features,
4052         .ndo_set_rx_mode = stmmac_set_rx_mode,
4053         .ndo_tx_timeout = stmmac_tx_timeout,
4054         .ndo_do_ioctl = stmmac_ioctl,
4055         .ndo_setup_tc = stmmac_setup_tc,
4056 #ifdef CONFIG_NET_POLL_CONTROLLER
4057         .ndo_poll_controller = stmmac_poll_controller,
4058 #endif
4059         .ndo_set_mac_address = stmmac_set_mac_address,
4060 };
4061
4062 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4063 {
4064         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4065                 return;
4066         if (test_bit(STMMAC_DOWN, &priv->state))
4067                 return;
4068
4069         netdev_err(priv->dev, "Reset adapter.\n");
4070
4071         rtnl_lock();
4072         netif_trans_update(priv->dev);
4073         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4074                 usleep_range(1000, 2000);
4075
4076         set_bit(STMMAC_DOWN, &priv->state);
4077         dev_close(priv->dev);
4078         dev_open(priv->dev, NULL);
4079         clear_bit(STMMAC_DOWN, &priv->state);
4080         clear_bit(STMMAC_RESETING, &priv->state);
4081         rtnl_unlock();
4082 }
4083
4084 static void stmmac_service_task(struct work_struct *work)
4085 {
4086         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4087                         service_task);
4088
4089         stmmac_reset_subtask(priv);
4090         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4091 }
4092
4093 /**
4094  *  stmmac_hw_init - Init the MAC device
4095  *  @priv: driver private structure
4096  *  Description: this function is to configure the MAC device according to
4097  *  some platform parameters or the HW capability register. It prepares the
4098  *  driver to use either ring or chain modes and to setup either enhanced or
4099  *  normal descriptors.
4100  */
4101 static int stmmac_hw_init(struct stmmac_priv *priv)
4102 {
4103         int ret;
4104
4105         /* dwmac-sun8i only work in chain mode */
4106         if (priv->plat->has_sun8i)
4107                 chain_mode = 1;
4108         priv->chain_mode = chain_mode;
4109
4110         /* Initialize HW Interface */
4111         ret = stmmac_hwif_init(priv);
4112         if (ret)
4113                 return ret;
4114
4115         /* Get the HW capability (new GMAC newer than 3.50a) */
4116         priv->hw_cap_support = stmmac_get_hw_features(priv);
4117         if (priv->hw_cap_support) {
4118                 dev_info(priv->device, "DMA HW capability register supported\n");
4119
4120                 /* We can override some gmac/dma configuration fields: e.g.
4121                  * enh_desc, tx_coe (e.g. that are passed through the
4122                  * platform) with the values from the HW capability
4123                  * register (if supported).
4124                  */
4125                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4126                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4127                 priv->hw->pmt = priv->plat->pmt;
4128
4129                 /* TXCOE doesn't work in thresh DMA mode */
4130                 if (priv->plat->force_thresh_dma_mode)
4131                         priv->plat->tx_coe = 0;
4132                 else
4133                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4134
4135                 /* In case of GMAC4 rx_coe is from HW cap register. */
4136                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4137
4138                 if (priv->dma_cap.rx_coe_type2)
4139                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4140                 else if (priv->dma_cap.rx_coe_type1)
4141                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4142
4143         } else {
4144                 dev_info(priv->device, "No HW DMA feature register supported\n");
4145         }
4146
4147         if (priv->plat->rx_coe) {
4148                 priv->hw->rx_csum = priv->plat->rx_coe;
4149                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4150                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4151                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4152         }
4153         if (priv->plat->tx_coe)
4154                 dev_info(priv->device, "TX Checksum insertion supported\n");
4155
4156         if (priv->plat->pmt) {
4157                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4158                 device_set_wakeup_capable(priv->device, 1);
4159         }
4160
4161         if (priv->dma_cap.tsoen)
4162                 dev_info(priv->device, "TSO supported\n");
4163
4164         /* Run HW quirks, if any */
4165         if (priv->hwif_quirks) {
4166                 ret = priv->hwif_quirks(priv);
4167                 if (ret)
4168                         return ret;
4169         }
4170
4171         return 0;
4172 }
4173
4174 /**
4175  * stmmac_dvr_probe
4176  * @device: device pointer
4177  * @plat_dat: platform data pointer
4178  * @res: stmmac resource pointer
4179  * Description: this is the main probe function used to
4180  * call the alloc_etherdev, allocate the priv structure.
4181  * Return:
4182  * returns 0 on success, otherwise errno.
4183  */
4184 int stmmac_dvr_probe(struct device *device,
4185                      struct plat_stmmacenet_data *plat_dat,
4186                      struct stmmac_resources *res)
4187 {
4188         struct net_device *ndev = NULL;
4189         struct stmmac_priv *priv;
4190         u32 queue, maxq;
4191         int ret = 0;
4192
4193         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4194                                   MTL_MAX_TX_QUEUES,
4195                                   MTL_MAX_RX_QUEUES);
4196         if (!ndev)
4197                 return -ENOMEM;
4198
4199         SET_NETDEV_DEV(ndev, device);
4200
4201         priv = netdev_priv(ndev);
4202         priv->device = device;
4203         priv->dev = ndev;
4204
4205         stmmac_set_ethtool_ops(ndev);
4206         priv->pause = pause;
4207         priv->plat = plat_dat;
4208         priv->ioaddr = res->addr;
4209         priv->dev->base_addr = (unsigned long)res->addr;
4210
4211         priv->dev->irq = res->irq;
4212         priv->wol_irq = res->wol_irq;
4213         priv->lpi_irq = res->lpi_irq;
4214
4215         if (res->mac)
4216                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4217
4218         dev_set_drvdata(device, priv->dev);
4219
4220         /* Verify driver arguments */
4221         stmmac_verify_args();
4222
4223         /* Allocate workqueue */
4224         priv->wq = create_singlethread_workqueue("stmmac_wq");
4225         if (!priv->wq) {
4226                 dev_err(priv->device, "failed to create workqueue\n");
4227                 ret = -ENOMEM;
4228                 goto error_wq;
4229         }
4230
4231         INIT_WORK(&priv->service_task, stmmac_service_task);
4232
4233         /* Override with kernel parameters if supplied XXX CRS XXX
4234          * this needs to have multiple instances
4235          */
4236         if ((phyaddr >= 0) && (phyaddr <= 31))
4237                 priv->plat->phy_addr = phyaddr;
4238
4239         if (priv->plat->stmmac_rst) {
4240                 ret = reset_control_assert(priv->plat->stmmac_rst);
4241                 reset_control_deassert(priv->plat->stmmac_rst);
4242                 /* Some reset controllers have only reset callback instead of
4243                  * assert + deassert callbacks pair.
4244                  */
4245                 if (ret == -ENOTSUPP)
4246                         reset_control_reset(priv->plat->stmmac_rst);
4247         }
4248
4249         /* Init MAC and get the capabilities */
4250         ret = stmmac_hw_init(priv);
4251         if (ret)
4252                 goto error_hw_init;
4253
4254         /* Configure real RX and TX queues */
4255         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4256         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4257
4258         ndev->netdev_ops = &stmmac_netdev_ops;
4259
4260         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4261                             NETIF_F_RXCSUM;
4262
4263         ret = stmmac_tc_init(priv, priv);
4264         if (!ret) {
4265                 ndev->hw_features |= NETIF_F_HW_TC;
4266         }
4267
4268         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4269                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4270                 priv->tso = true;
4271                 dev_info(priv->device, "TSO feature enabled\n");
4272         }
4273         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4274         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4275 #ifdef STMMAC_VLAN_TAG_USED
4276         /* Both mac100 and gmac support receive VLAN tag detection */
4277         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4278 #endif
4279         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4280
4281         /* MTU range: 46 - hw-specific max */
4282         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4283         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4284                 ndev->max_mtu = JUMBO_LEN;
4285         else if (priv->plat->has_xgmac)
4286                 ndev->max_mtu = XGMAC_JUMBO_LEN;
4287         else
4288                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4289         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4290          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4291          */
4292         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4293             (priv->plat->maxmtu >= ndev->min_mtu))
4294                 ndev->max_mtu = priv->plat->maxmtu;
4295         else if (priv->plat->maxmtu < ndev->min_mtu)
4296                 dev_warn(priv->device,
4297                          "%s: warning: maxmtu having invalid value (%d)\n",
4298                          __func__, priv->plat->maxmtu);
4299
4300         if (flow_ctrl)
4301                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4302
4303         /* Rx Watchdog is available in the COREs newer than the 3.40.
4304          * In some case, for example on bugged HW this feature
4305          * has to be disable and this can be done by passing the
4306          * riwt_off field from the platform.
4307          */
4308         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4309             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4310                 priv->use_riwt = 1;
4311                 dev_info(priv->device,
4312                          "Enable RX Mitigation via HW Watchdog Timer\n");
4313         }
4314
4315         /* Setup channels NAPI */
4316         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4317
4318         for (queue = 0; queue < maxq; queue++) {
4319                 struct stmmac_channel *ch = &priv->channel[queue];
4320
4321                 ch->priv_data = priv;
4322                 ch->index = queue;
4323
4324                 if (queue < priv->plat->rx_queues_to_use)
4325                         ch->has_rx = true;
4326                 if (queue < priv->plat->tx_queues_to_use)
4327                         ch->has_tx = true;
4328
4329                 netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
4330                                NAPI_POLL_WEIGHT);
4331         }
4332
4333         mutex_init(&priv->lock);
4334
4335         /* If a specific clk_csr value is passed from the platform
4336          * this means that the CSR Clock Range selection cannot be
4337          * changed at run-time and it is fixed. Viceversa the driver'll try to
4338          * set the MDC clock dynamically according to the csr actual
4339          * clock input.
4340          */
4341         if (!priv->plat->clk_csr)
4342                 stmmac_clk_csr_set(priv);
4343         else
4344                 priv->clk_csr = priv->plat->clk_csr;
4345
4346         stmmac_check_pcs_mode(priv);
4347
4348         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4349             priv->hw->pcs != STMMAC_PCS_TBI &&
4350             priv->hw->pcs != STMMAC_PCS_RTBI) {
4351                 /* MDIO bus Registration */
4352                 ret = stmmac_mdio_register(ndev);
4353                 if (ret < 0) {
4354                         dev_err(priv->device,
4355                                 "%s: MDIO bus (id: %d) registration failed",
4356                                 __func__, priv->plat->bus_id);
4357                         goto error_mdio_register;
4358                 }
4359         }
4360
4361         ret = register_netdev(ndev);
4362         if (ret) {
4363                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4364                         __func__, ret);
4365                 goto error_netdev_register;
4366         }
4367
4368 #ifdef CONFIG_DEBUG_FS
4369         ret = stmmac_init_fs(ndev);
4370         if (ret < 0)
4371                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4372                             __func__);
4373 #endif
4374
4375         return ret;
4376
4377 error_netdev_register:
4378         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4379             priv->hw->pcs != STMMAC_PCS_TBI &&
4380             priv->hw->pcs != STMMAC_PCS_RTBI)
4381                 stmmac_mdio_unregister(ndev);
4382 error_mdio_register:
4383         for (queue = 0; queue < maxq; queue++) {
4384                 struct stmmac_channel *ch = &priv->channel[queue];
4385
4386                 netif_napi_del(&ch->napi);
4387         }
4388 error_hw_init:
4389         destroy_workqueue(priv->wq);
4390 error_wq:
4391         free_netdev(ndev);
4392
4393         return ret;
4394 }
4395 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4396
4397 /**
4398  * stmmac_dvr_remove
4399  * @dev: device pointer
4400  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4401  * changes the link status, releases the DMA descriptor rings.
4402  */
4403 int stmmac_dvr_remove(struct device *dev)
4404 {
4405         struct net_device *ndev = dev_get_drvdata(dev);
4406         struct stmmac_priv *priv = netdev_priv(ndev);
4407
4408         netdev_info(priv->dev, "%s: removing driver", __func__);
4409
4410 #ifdef CONFIG_DEBUG_FS
4411         stmmac_exit_fs(ndev);
4412 #endif
4413         stmmac_stop_all_dma(priv);
4414
4415         stmmac_mac_set(priv, priv->ioaddr, false);
4416         netif_carrier_off(ndev);
4417         unregister_netdev(ndev);
4418         if (priv->plat->stmmac_rst)
4419                 reset_control_assert(priv->plat->stmmac_rst);
4420         clk_disable_unprepare(priv->plat->pclk);
4421         clk_disable_unprepare(priv->plat->stmmac_clk);
4422         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4423             priv->hw->pcs != STMMAC_PCS_TBI &&
4424             priv->hw->pcs != STMMAC_PCS_RTBI)
4425                 stmmac_mdio_unregister(ndev);
4426         destroy_workqueue(priv->wq);
4427         mutex_destroy(&priv->lock);
4428         free_netdev(ndev);
4429
4430         return 0;
4431 }
4432 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4433
4434 /**
4435  * stmmac_suspend - suspend callback
4436  * @dev: device pointer
4437  * Description: this is the function to suspend the device and it is called
4438  * by the platform driver to stop the network queue, release the resources,
4439  * program the PMT register (for WoL), clean and release driver resources.
4440  */
4441 int stmmac_suspend(struct device *dev)
4442 {
4443         struct net_device *ndev = dev_get_drvdata(dev);
4444         struct stmmac_priv *priv = netdev_priv(ndev);
4445
4446         if (!ndev || !netif_running(ndev))
4447                 return 0;
4448
4449         if (ndev->phydev)
4450                 phy_stop(ndev->phydev);
4451
4452         mutex_lock(&priv->lock);
4453
4454         netif_device_detach(ndev);
4455         stmmac_stop_all_queues(priv);
4456
4457         stmmac_disable_all_queues(priv);
4458
4459         /* Stop TX/RX DMA */
4460         stmmac_stop_all_dma(priv);
4461
4462         /* Enable Power down mode by programming the PMT regs */
4463         if (device_may_wakeup(priv->device)) {
4464                 stmmac_pmt(priv, priv->hw, priv->wolopts);
4465                 priv->irq_wake = 1;
4466         } else {
4467                 stmmac_mac_set(priv, priv->ioaddr, false);
4468                 pinctrl_pm_select_sleep_state(priv->device);
4469                 /* Disable clock in case of PWM is off */
4470                 clk_disable(priv->plat->pclk);
4471                 clk_disable(priv->plat->stmmac_clk);
4472         }
4473         mutex_unlock(&priv->lock);
4474
4475         priv->oldlink = false;
4476         priv->speed = SPEED_UNKNOWN;
4477         priv->oldduplex = DUPLEX_UNKNOWN;
4478         return 0;
4479 }
4480 EXPORT_SYMBOL_GPL(stmmac_suspend);
4481
4482 /**
4483  * stmmac_reset_queues_param - reset queue parameters
4484  * @dev: device pointer
4485  */
4486 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4487 {
4488         u32 rx_cnt = priv->plat->rx_queues_to_use;
4489         u32 tx_cnt = priv->plat->tx_queues_to_use;
4490         u32 queue;
4491
4492         for (queue = 0; queue < rx_cnt; queue++) {
4493                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4494
4495                 rx_q->cur_rx = 0;
4496                 rx_q->dirty_rx = 0;
4497         }
4498
4499         for (queue = 0; queue < tx_cnt; queue++) {
4500                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4501
4502                 tx_q->cur_tx = 0;
4503                 tx_q->dirty_tx = 0;
4504                 tx_q->mss = 0;
4505         }
4506 }
4507
4508 /**
4509  * stmmac_resume - resume callback
4510  * @dev: device pointer
4511  * Description: when resume this function is invoked to setup the DMA and CORE
4512  * in a usable state.
4513  */
4514 int stmmac_resume(struct device *dev)
4515 {
4516         struct net_device *ndev = dev_get_drvdata(dev);
4517         struct stmmac_priv *priv = netdev_priv(ndev);
4518
4519         if (!netif_running(ndev))
4520                 return 0;
4521
4522         /* Power Down bit, into the PM register, is cleared
4523          * automatically as soon as a magic packet or a Wake-up frame
4524          * is received. Anyway, it's better to manually clear
4525          * this bit because it can generate problems while resuming
4526          * from another devices (e.g. serial console).
4527          */
4528         if (device_may_wakeup(priv->device)) {
4529                 mutex_lock(&priv->lock);
4530                 stmmac_pmt(priv, priv->hw, 0);
4531                 mutex_unlock(&priv->lock);
4532                 priv->irq_wake = 0;
4533         } else {
4534                 pinctrl_pm_select_default_state(priv->device);
4535                 /* enable the clk previously disabled */
4536                 clk_enable(priv->plat->stmmac_clk);
4537                 clk_enable(priv->plat->pclk);
4538                 /* reset the phy so that it's ready */
4539                 if (priv->mii)
4540                         stmmac_mdio_reset(priv->mii);
4541         }
4542
4543         netif_device_attach(ndev);
4544
4545         mutex_lock(&priv->lock);
4546
4547         stmmac_reset_queues_param(priv);
4548
4549         stmmac_clear_descriptors(priv);
4550
4551         stmmac_hw_setup(ndev, false);
4552         stmmac_init_tx_coalesce(priv);
4553         stmmac_set_rx_mode(ndev);
4554
4555         stmmac_enable_all_queues(priv);
4556
4557         stmmac_start_all_queues(priv);
4558
4559         mutex_unlock(&priv->lock);
4560
4561         if (ndev->phydev)
4562                 phy_start(ndev->phydev);
4563
4564         return 0;
4565 }
4566 EXPORT_SYMBOL_GPL(stmmac_resume);
4567
4568 #ifndef MODULE
4569 static int __init stmmac_cmdline_opt(char *str)
4570 {
4571         char *opt;
4572
4573         if (!str || !*str)
4574                 return -EINVAL;
4575         while ((opt = strsep(&str, ",")) != NULL) {
4576                 if (!strncmp(opt, "debug:", 6)) {
4577                         if (kstrtoint(opt + 6, 0, &debug))
4578                                 goto err;
4579                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4580                         if (kstrtoint(opt + 8, 0, &phyaddr))
4581                                 goto err;
4582                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4583                         if (kstrtoint(opt + 7, 0, &buf_sz))
4584                                 goto err;
4585                 } else if (!strncmp(opt, "tc:", 3)) {
4586                         if (kstrtoint(opt + 3, 0, &tc))
4587                                 goto err;
4588                 } else if (!strncmp(opt, "watchdog:", 9)) {
4589                         if (kstrtoint(opt + 9, 0, &watchdog))
4590                                 goto err;
4591                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4592                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4593                                 goto err;
4594                 } else if (!strncmp(opt, "pause:", 6)) {
4595                         if (kstrtoint(opt + 6, 0, &pause))
4596                                 goto err;
4597                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4598                         if (kstrtoint(opt + 10, 0, &eee_timer))
4599                                 goto err;
4600                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4601                         if (kstrtoint(opt + 11, 0, &chain_mode))
4602                                 goto err;
4603                 }
4604         }
4605         return 0;
4606
4607 err:
4608         pr_err("%s: ERROR broken module parameter conversion", __func__);
4609         return -EINVAL;
4610 }
4611
4612 __setup("stmmaceth=", stmmac_cmdline_opt);
4613 #endif /* MODULE */
4614
4615 static int __init stmmac_init(void)
4616 {
4617 #ifdef CONFIG_DEBUG_FS
4618         /* Create debugfs main directory if it doesn't exist yet */
4619         if (!stmmac_fs_dir) {
4620                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4621
4622                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4623                         pr_err("ERROR %s, debugfs create directory failed\n",
4624                                STMMAC_RESOURCE_NAME);
4625
4626                         return -ENOMEM;
4627                 }
4628         }
4629 #endif
4630
4631         return 0;
4632 }
4633
4634 static void __exit stmmac_exit(void)
4635 {
4636 #ifdef CONFIG_DEBUG_FS
4637         debugfs_remove_recursive(stmmac_fs_dir);
4638 #endif
4639 }
4640
4641 module_init(stmmac_init)
4642 module_exit(stmmac_exit)
4643
4644 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4645 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4646 MODULE_LICENSE("GPL");