f12dd59c85cfeffec76423eb507ca2d8e0662a2a
[linux-2.6-microblaze.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "dwxgmac2.h"
55 #include "hwif.h"
56
57 #define STMMAC_ALIGN(x)         __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
58 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
59
60 /* Module parameters */
61 #define TX_TIMEO        5000
62 static int watchdog = TX_TIMEO;
63 module_param(watchdog, int, 0644);
64 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
65
66 static int debug = -1;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
69
70 static int phyaddr = -1;
71 module_param(phyaddr, int, 0444);
72 MODULE_PARM_DESC(phyaddr, "Physical device address");
73
74 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
75 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
76
77 static int flow_ctrl = FLOW_OFF;
78 module_param(flow_ctrl, int, 0644);
79 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80
81 static int pause = PAUSE_TIME;
82 module_param(pause, int, 0644);
83 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84
85 #define TC_DEFAULT 64
86 static int tc = TC_DEFAULT;
87 module_param(tc, int, 0644);
88 MODULE_PARM_DESC(tc, "DMA threshold control value");
89
90 #define DEFAULT_BUFSIZE 1536
91 static int buf_sz = DEFAULT_BUFSIZE;
92 module_param(buf_sz, int, 0644);
93 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94
95 #define STMMAC_RX_COPYBREAK     256
96
97 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
99                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100
101 #define STMMAC_DEFAULT_LPI_TIMER        1000
102 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103 module_param(eee_timer, int, 0644);
104 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
105 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
106
107 /* By default the driver will use the ring mode to manage tx and rx descriptors,
108  * but allow user to force to use the chain instead of the ring
109  */
110 static unsigned int chain_mode;
111 module_param(chain_mode, int, 0444);
112 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113
114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
115
116 #ifdef CONFIG_DEBUG_FS
117 static int stmmac_init_fs(struct net_device *dev);
118 static void stmmac_exit_fs(struct net_device *dev);
119 #endif
120
121 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122
123 /**
124  * stmmac_verify_args - verify the driver parameters.
125  * Description: it checks the driver parameters and set a default in case of
126  * errors.
127  */
128 static void stmmac_verify_args(void)
129 {
130         if (unlikely(watchdog < 0))
131                 watchdog = TX_TIMEO;
132         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133                 buf_sz = DEFAULT_BUFSIZE;
134         if (unlikely(flow_ctrl > 1))
135                 flow_ctrl = FLOW_AUTO;
136         else if (likely(flow_ctrl < 0))
137                 flow_ctrl = FLOW_OFF;
138         if (unlikely((pause < 0) || (pause > 0xffff)))
139                 pause = PAUSE_TIME;
140         if (eee_timer < 0)
141                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
142 }
143
144 /**
145  * stmmac_disable_all_queues - Disable all queues
146  * @priv: driver private structure
147  */
148 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149 {
150         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
153         u32 queue;
154
155         for (queue = 0; queue < maxq; queue++) {
156                 struct stmmac_channel *ch = &priv->channel[queue];
157
158                 napi_disable(&ch->napi);
159         }
160 }
161
162 /**
163  * stmmac_enable_all_queues - Enable all queues
164  * @priv: driver private structure
165  */
166 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
167 {
168         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
169         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
170         u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
171         u32 queue;
172
173         for (queue = 0; queue < maxq; queue++) {
174                 struct stmmac_channel *ch = &priv->channel[queue];
175
176                 napi_enable(&ch->napi);
177         }
178 }
179
180 /**
181  * stmmac_stop_all_queues - Stop all queues
182  * @priv: driver private structure
183  */
184 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
185 {
186         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
187         u32 queue;
188
189         for (queue = 0; queue < tx_queues_cnt; queue++)
190                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
191 }
192
193 /**
194  * stmmac_start_all_queues - Start all queues
195  * @priv: driver private structure
196  */
197 static void stmmac_start_all_queues(struct stmmac_priv *priv)
198 {
199         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
200         u32 queue;
201
202         for (queue = 0; queue < tx_queues_cnt; queue++)
203                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
204 }
205
206 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
207 {
208         if (!test_bit(STMMAC_DOWN, &priv->state) &&
209             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
210                 queue_work(priv->wq, &priv->service_task);
211 }
212
213 static void stmmac_global_err(struct stmmac_priv *priv)
214 {
215         netif_carrier_off(priv->dev);
216         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
217         stmmac_service_event_schedule(priv);
218 }
219
220 /**
221  * stmmac_clk_csr_set - dynamically set the MDC clock
222  * @priv: driver private structure
223  * Description: this is to dynamically set the MDC clock according to the csr
224  * clock input.
225  * Note:
226  *      If a specific clk_csr value is passed from the platform
227  *      this means that the CSR Clock Range selection cannot be
228  *      changed at run-time and it is fixed (as reported in the driver
229  *      documentation). Viceversa the driver will try to set the MDC
230  *      clock dynamically according to the actual clock input.
231  */
232 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
233 {
234         u32 clk_rate;
235
236         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
237
238         /* Platform provided default clk_csr would be assumed valid
239          * for all other cases except for the below mentioned ones.
240          * For values higher than the IEEE 802.3 specified frequency
241          * we can not estimate the proper divider as it is not known
242          * the frequency of clk_csr_i. So we do not change the default
243          * divider.
244          */
245         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
246                 if (clk_rate < CSR_F_35M)
247                         priv->clk_csr = STMMAC_CSR_20_35M;
248                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
249                         priv->clk_csr = STMMAC_CSR_35_60M;
250                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
251                         priv->clk_csr = STMMAC_CSR_60_100M;
252                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
253                         priv->clk_csr = STMMAC_CSR_100_150M;
254                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
255                         priv->clk_csr = STMMAC_CSR_150_250M;
256                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
257                         priv->clk_csr = STMMAC_CSR_250_300M;
258         }
259
260         if (priv->plat->has_sun8i) {
261                 if (clk_rate > 160000000)
262                         priv->clk_csr = 0x03;
263                 else if (clk_rate > 80000000)
264                         priv->clk_csr = 0x02;
265                 else if (clk_rate > 40000000)
266                         priv->clk_csr = 0x01;
267                 else
268                         priv->clk_csr = 0;
269         }
270
271         if (priv->plat->has_xgmac) {
272                 if (clk_rate > 400000000)
273                         priv->clk_csr = 0x5;
274                 else if (clk_rate > 350000000)
275                         priv->clk_csr = 0x4;
276                 else if (clk_rate > 300000000)
277                         priv->clk_csr = 0x3;
278                 else if (clk_rate > 250000000)
279                         priv->clk_csr = 0x2;
280                 else if (clk_rate > 150000000)
281                         priv->clk_csr = 0x1;
282                 else
283                         priv->clk_csr = 0x0;
284         }
285 }
286
287 static void print_pkt(unsigned char *buf, int len)
288 {
289         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
290         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
291 }
292
293 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
294 {
295         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
296         u32 avail;
297
298         if (tx_q->dirty_tx > tx_q->cur_tx)
299                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
300         else
301                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
302
303         return avail;
304 }
305
306 /**
307  * stmmac_rx_dirty - Get RX queue dirty
308  * @priv: driver private structure
309  * @queue: RX queue index
310  */
311 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
312 {
313         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
314         u32 dirty;
315
316         if (rx_q->dirty_rx <= rx_q->cur_rx)
317                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
318         else
319                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
320
321         return dirty;
322 }
323
324 /**
325  * stmmac_hw_fix_mac_speed - callback for speed selection
326  * @priv: driver private structure
327  * Description: on some platforms (e.g. ST), some HW system configuration
328  * registers have to be set according to the link speed negotiated.
329  */
330 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
331 {
332         struct net_device *ndev = priv->dev;
333         struct phy_device *phydev = ndev->phydev;
334
335         if (likely(priv->plat->fix_mac_speed))
336                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
337 }
338
339 /**
340  * stmmac_enable_eee_mode - check and enter in LPI mode
341  * @priv: driver private structure
342  * Description: this function is to verify and enter in LPI mode in case of
343  * EEE.
344  */
345 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
346 {
347         u32 tx_cnt = priv->plat->tx_queues_to_use;
348         u32 queue;
349
350         /* check if all TX queues have the work finished */
351         for (queue = 0; queue < tx_cnt; queue++) {
352                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
353
354                 if (tx_q->dirty_tx != tx_q->cur_tx)
355                         return; /* still unfinished work */
356         }
357
358         /* Check and enter in LPI mode */
359         if (!priv->tx_path_in_lpi_mode)
360                 stmmac_set_eee_mode(priv, priv->hw,
361                                 priv->plat->en_tx_lpi_clockgating);
362 }
363
364 /**
365  * stmmac_disable_eee_mode - disable and exit from LPI mode
366  * @priv: driver private structure
367  * Description: this function is to exit and disable EEE in case of
368  * LPI state is true. This is called by the xmit.
369  */
370 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
371 {
372         stmmac_reset_eee_mode(priv, priv->hw);
373         del_timer_sync(&priv->eee_ctrl_timer);
374         priv->tx_path_in_lpi_mode = false;
375 }
376
377 /**
378  * stmmac_eee_ctrl_timer - EEE TX SW timer.
379  * @arg : data hook
380  * Description:
381  *  if there is no data transfer and if we are not in LPI state,
382  *  then MAC Transmitter can be moved to LPI state.
383  */
384 static void stmmac_eee_ctrl_timer(struct timer_list *t)
385 {
386         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
387
388         stmmac_enable_eee_mode(priv);
389         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
390 }
391
392 /**
393  * stmmac_eee_init - init EEE
394  * @priv: driver private structure
395  * Description:
396  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
397  *  can also manage EEE, this function enable the LPI state and start related
398  *  timer.
399  */
400 bool stmmac_eee_init(struct stmmac_priv *priv)
401 {
402         struct net_device *ndev = priv->dev;
403         int interface = priv->plat->interface;
404         bool ret = false;
405
406         if ((interface != PHY_INTERFACE_MODE_MII) &&
407             (interface != PHY_INTERFACE_MODE_GMII) &&
408             !phy_interface_mode_is_rgmii(interface))
409                 goto out;
410
411         /* Using PCS we cannot dial with the phy registers at this stage
412          * so we do not support extra feature like EEE.
413          */
414         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
415             (priv->hw->pcs == STMMAC_PCS_TBI) ||
416             (priv->hw->pcs == STMMAC_PCS_RTBI))
417                 goto out;
418
419         /* MAC core supports the EEE feature. */
420         if (priv->dma_cap.eee) {
421                 int tx_lpi_timer = priv->tx_lpi_timer;
422
423                 /* Check if the PHY supports EEE */
424                 if (phy_init_eee(ndev->phydev, 1)) {
425                         /* To manage at run-time if the EEE cannot be supported
426                          * anymore (for example because the lp caps have been
427                          * changed).
428                          * In that case the driver disable own timers.
429                          */
430                         mutex_lock(&priv->lock);
431                         if (priv->eee_active) {
432                                 netdev_dbg(priv->dev, "disable EEE\n");
433                                 del_timer_sync(&priv->eee_ctrl_timer);
434                                 stmmac_set_eee_timer(priv, priv->hw, 0,
435                                                 tx_lpi_timer);
436                         }
437                         priv->eee_active = 0;
438                         mutex_unlock(&priv->lock);
439                         goto out;
440                 }
441                 /* Activate the EEE and start timers */
442                 mutex_lock(&priv->lock);
443                 if (!priv->eee_active) {
444                         priv->eee_active = 1;
445                         timer_setup(&priv->eee_ctrl_timer,
446                                     stmmac_eee_ctrl_timer, 0);
447                         mod_timer(&priv->eee_ctrl_timer,
448                                   STMMAC_LPI_T(eee_timer));
449
450                         stmmac_set_eee_timer(priv, priv->hw,
451                                         STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
452                 }
453                 /* Set HW EEE according to the speed */
454                 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
455
456                 ret = true;
457                 mutex_unlock(&priv->lock);
458
459                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
460         }
461 out:
462         return ret;
463 }
464
465 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
466  * @priv: driver private structure
467  * @p : descriptor pointer
468  * @skb : the socket buffer
469  * Description :
470  * This function will read timestamp from the descriptor & pass it to stack.
471  * and also perform some sanity checks.
472  */
473 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
474                                    struct dma_desc *p, struct sk_buff *skb)
475 {
476         struct skb_shared_hwtstamps shhwtstamp;
477         u64 ns;
478
479         if (!priv->hwts_tx_en)
480                 return;
481
482         /* exit if skb doesn't support hw tstamp */
483         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
484                 return;
485
486         /* check tx tstamp status */
487         if (stmmac_get_tx_timestamp_status(priv, p)) {
488                 /* get the valid tstamp */
489                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
490
491                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
493
494                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
495                 /* pass tstamp to stack */
496                 skb_tstamp_tx(skb, &shhwtstamp);
497         }
498
499         return;
500 }
501
502 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
503  * @priv: driver private structure
504  * @p : descriptor pointer
505  * @np : next descriptor pointer
506  * @skb : the socket buffer
507  * Description :
508  * This function will read received packet's timestamp from the descriptor
509  * and pass it to stack. It also perform some sanity checks.
510  */
511 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
512                                    struct dma_desc *np, struct sk_buff *skb)
513 {
514         struct skb_shared_hwtstamps *shhwtstamp = NULL;
515         struct dma_desc *desc = p;
516         u64 ns;
517
518         if (!priv->hwts_rx_en)
519                 return;
520         /* For GMAC4, the valid timestamp is from CTX next desc. */
521         if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
522                 desc = np;
523
524         /* Check if timestamp is available */
525         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
526                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
527                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
528                 shhwtstamp = skb_hwtstamps(skb);
529                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
530                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
531         } else  {
532                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
533         }
534 }
535
536 /**
537  *  stmmac_hwtstamp_set - control hardware timestamping.
538  *  @dev: device pointer.
539  *  @ifr: An IOCTL specific structure, that can contain a pointer to
540  *  a proprietary structure used to pass information to the driver.
541  *  Description:
542  *  This function configures the MAC to enable/disable both outgoing(TX)
543  *  and incoming(RX) packets time stamping based on user input.
544  *  Return Value:
545  *  0 on success and an appropriate -ve integer on failure.
546  */
547 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
548 {
549         struct stmmac_priv *priv = netdev_priv(dev);
550         struct hwtstamp_config config;
551         struct timespec64 now;
552         u64 temp = 0;
553         u32 ptp_v2 = 0;
554         u32 tstamp_all = 0;
555         u32 ptp_over_ipv4_udp = 0;
556         u32 ptp_over_ipv6_udp = 0;
557         u32 ptp_over_ethernet = 0;
558         u32 snap_type_sel = 0;
559         u32 ts_master_en = 0;
560         u32 ts_event_en = 0;
561         u32 value = 0;
562         u32 sec_inc;
563         bool xmac;
564
565         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
566
567         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
568                 netdev_alert(priv->dev, "No support for HW time stamping\n");
569                 priv->hwts_tx_en = 0;
570                 priv->hwts_rx_en = 0;
571
572                 return -EOPNOTSUPP;
573         }
574
575         if (copy_from_user(&config, ifr->ifr_data,
576                            sizeof(config)))
577                 return -EFAULT;
578
579         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
580                    __func__, config.flags, config.tx_type, config.rx_filter);
581
582         /* reserved for future extensions */
583         if (config.flags)
584                 return -EINVAL;
585
586         if (config.tx_type != HWTSTAMP_TX_OFF &&
587             config.tx_type != HWTSTAMP_TX_ON)
588                 return -ERANGE;
589
590         if (priv->adv_ts) {
591                 switch (config.rx_filter) {
592                 case HWTSTAMP_FILTER_NONE:
593                         /* time stamp no incoming packet at all */
594                         config.rx_filter = HWTSTAMP_FILTER_NONE;
595                         break;
596
597                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
598                         /* PTP v1, UDP, any kind of event packet */
599                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
600                         /* 'xmac' hardware can support Sync, Pdelay_Req and
601                          * Pdelay_resp by setting bit14 and bits17/16 to 01
602                          * This leaves Delay_Req timestamps out.
603                          * Enable all events *and* general purpose message
604                          * timestamping
605                          */
606                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
607                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
608                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
609                         break;
610
611                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
612                         /* PTP v1, UDP, Sync packet */
613                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
614                         /* take time stamp for SYNC messages only */
615                         ts_event_en = PTP_TCR_TSEVNTENA;
616
617                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
618                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
619                         break;
620
621                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
622                         /* PTP v1, UDP, Delay_req packet */
623                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
624                         /* take time stamp for Delay_Req messages only */
625                         ts_master_en = PTP_TCR_TSMSTRENA;
626                         ts_event_en = PTP_TCR_TSEVNTENA;
627
628                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
629                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
630                         break;
631
632                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
633                         /* PTP v2, UDP, any kind of event packet */
634                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
635                         ptp_v2 = PTP_TCR_TSVER2ENA;
636                         /* take time stamp for all event messages */
637                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
638
639                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
640                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
641                         break;
642
643                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
644                         /* PTP v2, UDP, Sync packet */
645                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
646                         ptp_v2 = PTP_TCR_TSVER2ENA;
647                         /* take time stamp for SYNC messages only */
648                         ts_event_en = PTP_TCR_TSEVNTENA;
649
650                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
651                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
652                         break;
653
654                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
655                         /* PTP v2, UDP, Delay_req packet */
656                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
657                         ptp_v2 = PTP_TCR_TSVER2ENA;
658                         /* take time stamp for Delay_Req messages only */
659                         ts_master_en = PTP_TCR_TSMSTRENA;
660                         ts_event_en = PTP_TCR_TSEVNTENA;
661
662                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
663                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
664                         break;
665
666                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
667                         /* PTP v2/802.AS1 any layer, any kind of event packet */
668                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
669                         ptp_v2 = PTP_TCR_TSVER2ENA;
670                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
671                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
672                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
673                         ptp_over_ethernet = PTP_TCR_TSIPENA;
674                         break;
675
676                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
677                         /* PTP v2/802.AS1, any layer, Sync packet */
678                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
679                         ptp_v2 = PTP_TCR_TSVER2ENA;
680                         /* take time stamp for SYNC messages only */
681                         ts_event_en = PTP_TCR_TSEVNTENA;
682
683                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
684                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
685                         ptp_over_ethernet = PTP_TCR_TSIPENA;
686                         break;
687
688                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
689                         /* PTP v2/802.AS1, any layer, Delay_req packet */
690                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
691                         ptp_v2 = PTP_TCR_TSVER2ENA;
692                         /* take time stamp for Delay_Req messages only */
693                         ts_master_en = PTP_TCR_TSMSTRENA;
694                         ts_event_en = PTP_TCR_TSEVNTENA;
695
696                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
697                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
698                         ptp_over_ethernet = PTP_TCR_TSIPENA;
699                         break;
700
701                 case HWTSTAMP_FILTER_NTP_ALL:
702                 case HWTSTAMP_FILTER_ALL:
703                         /* time stamp any incoming packet */
704                         config.rx_filter = HWTSTAMP_FILTER_ALL;
705                         tstamp_all = PTP_TCR_TSENALL;
706                         break;
707
708                 default:
709                         return -ERANGE;
710                 }
711         } else {
712                 switch (config.rx_filter) {
713                 case HWTSTAMP_FILTER_NONE:
714                         config.rx_filter = HWTSTAMP_FILTER_NONE;
715                         break;
716                 default:
717                         /* PTP v1, UDP, any kind of event packet */
718                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
719                         break;
720                 }
721         }
722         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
723         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
724
725         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
726                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
727         else {
728                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
729                          tstamp_all | ptp_v2 | ptp_over_ethernet |
730                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
731                          ts_master_en | snap_type_sel);
732                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
733
734                 /* program Sub Second Increment reg */
735                 stmmac_config_sub_second_increment(priv,
736                                 priv->ptpaddr, priv->plat->clk_ptp_rate,
737                                 xmac, &sec_inc);
738                 temp = div_u64(1000000000ULL, sec_inc);
739
740                 /* Store sub second increment and flags for later use */
741                 priv->sub_second_inc = sec_inc;
742                 priv->systime_flags = value;
743
744                 /* calculate default added value:
745                  * formula is :
746                  * addend = (2^32)/freq_div_ratio;
747                  * where, freq_div_ratio = 1e9ns/sec_inc
748                  */
749                 temp = (u64)(temp << 32);
750                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
751                 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
752
753                 /* initialize system time */
754                 ktime_get_real_ts64(&now);
755
756                 /* lower 32 bits of tv_sec are safe until y2106 */
757                 stmmac_init_systime(priv, priv->ptpaddr,
758                                 (u32)now.tv_sec, now.tv_nsec);
759         }
760
761         memcpy(&priv->tstamp_config, &config, sizeof(config));
762
763         return copy_to_user(ifr->ifr_data, &config,
764                             sizeof(config)) ? -EFAULT : 0;
765 }
766
767 /**
768  *  stmmac_hwtstamp_get - read hardware timestamping.
769  *  @dev: device pointer.
770  *  @ifr: An IOCTL specific structure, that can contain a pointer to
771  *  a proprietary structure used to pass information to the driver.
772  *  Description:
773  *  This function obtain the current hardware timestamping settings
774     as requested.
775  */
776 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
777 {
778         struct stmmac_priv *priv = netdev_priv(dev);
779         struct hwtstamp_config *config = &priv->tstamp_config;
780
781         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
782                 return -EOPNOTSUPP;
783
784         return copy_to_user(ifr->ifr_data, config,
785                             sizeof(*config)) ? -EFAULT : 0;
786 }
787
788 /**
789  * stmmac_init_ptp - init PTP
790  * @priv: driver private structure
791  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
792  * This is done by looking at the HW cap. register.
793  * This function also registers the ptp driver.
794  */
795 static int stmmac_init_ptp(struct stmmac_priv *priv)
796 {
797         bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
798
799         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
800                 return -EOPNOTSUPP;
801
802         priv->adv_ts = 0;
803         /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
804         if (xmac && priv->dma_cap.atime_stamp)
805                 priv->adv_ts = 1;
806         /* Dwmac 3.x core with extend_desc can support adv_ts */
807         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
808                 priv->adv_ts = 1;
809
810         if (priv->dma_cap.time_stamp)
811                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
812
813         if (priv->adv_ts)
814                 netdev_info(priv->dev,
815                             "IEEE 1588-2008 Advanced Timestamp supported\n");
816
817         priv->hwts_tx_en = 0;
818         priv->hwts_rx_en = 0;
819
820         stmmac_ptp_register(priv);
821
822         return 0;
823 }
824
825 static void stmmac_release_ptp(struct stmmac_priv *priv)
826 {
827         if (priv->plat->clk_ptp_ref)
828                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
829         stmmac_ptp_unregister(priv);
830 }
831
832 /**
833  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
834  *  @priv: driver private structure
835  *  Description: It is used for configuring the flow control in all queues
836  */
837 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
838 {
839         u32 tx_cnt = priv->plat->tx_queues_to_use;
840
841         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
842                         priv->pause, tx_cnt);
843 }
844
845 /**
846  * stmmac_adjust_link - adjusts the link parameters
847  * @dev: net device structure
848  * Description: this is the helper called by the physical abstraction layer
849  * drivers to communicate the phy link status. According the speed and duplex
850  * this driver can invoke registered glue-logic as well.
851  * It also invoke the eee initialization because it could happen when switch
852  * on different networks (that are eee capable).
853  */
854 static void stmmac_adjust_link(struct net_device *dev)
855 {
856         struct stmmac_priv *priv = netdev_priv(dev);
857         struct phy_device *phydev = dev->phydev;
858         bool new_state = false;
859
860         if (!phydev)
861                 return;
862
863         mutex_lock(&priv->lock);
864
865         if (phydev->link) {
866                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
867
868                 /* Now we make sure that we can be in full duplex mode.
869                  * If not, we operate in half-duplex mode. */
870                 if (phydev->duplex != priv->oldduplex) {
871                         new_state = true;
872                         if (!phydev->duplex)
873                                 ctrl &= ~priv->hw->link.duplex;
874                         else
875                                 ctrl |= priv->hw->link.duplex;
876                         priv->oldduplex = phydev->duplex;
877                 }
878                 /* Flow Control operation */
879                 if (phydev->pause)
880                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
881
882                 if (phydev->speed != priv->speed) {
883                         new_state = true;
884                         ctrl &= ~priv->hw->link.speed_mask;
885                         switch (phydev->speed) {
886                         case SPEED_1000:
887                                 ctrl |= priv->hw->link.speed1000;
888                                 break;
889                         case SPEED_100:
890                                 ctrl |= priv->hw->link.speed100;
891                                 break;
892                         case SPEED_10:
893                                 ctrl |= priv->hw->link.speed10;
894                                 break;
895                         default:
896                                 netif_warn(priv, link, priv->dev,
897                                            "broken speed: %d\n", phydev->speed);
898                                 phydev->speed = SPEED_UNKNOWN;
899                                 break;
900                         }
901                         if (phydev->speed != SPEED_UNKNOWN)
902                                 stmmac_hw_fix_mac_speed(priv);
903                         priv->speed = phydev->speed;
904                 }
905
906                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
907
908                 if (!priv->oldlink) {
909                         new_state = true;
910                         priv->oldlink = true;
911                 }
912         } else if (priv->oldlink) {
913                 new_state = true;
914                 priv->oldlink = false;
915                 priv->speed = SPEED_UNKNOWN;
916                 priv->oldduplex = DUPLEX_UNKNOWN;
917         }
918
919         if (new_state && netif_msg_link(priv))
920                 phy_print_status(phydev);
921
922         mutex_unlock(&priv->lock);
923
924         if (phydev->is_pseudo_fixed_link)
925                 /* Stop PHY layer to call the hook to adjust the link in case
926                  * of a switch is attached to the stmmac driver.
927                  */
928                 phydev->irq = PHY_IGNORE_INTERRUPT;
929         else
930                 /* At this stage, init the EEE if supported.
931                  * Never called in case of fixed_link.
932                  */
933                 priv->eee_enabled = stmmac_eee_init(priv);
934 }
935
936 /**
937  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
938  * @priv: driver private structure
939  * Description: this is to verify if the HW supports the PCS.
940  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
941  * configured for the TBI, RTBI, or SGMII PHY interface.
942  */
943 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
944 {
945         int interface = priv->plat->interface;
946
947         if (priv->dma_cap.pcs) {
948                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
949                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
950                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
951                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
952                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
953                         priv->hw->pcs = STMMAC_PCS_RGMII;
954                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
955                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
956                         priv->hw->pcs = STMMAC_PCS_SGMII;
957                 }
958         }
959 }
960
961 /**
962  * stmmac_init_phy - PHY initialization
963  * @dev: net device structure
964  * Description: it initializes the driver's PHY state, and attaches the PHY
965  * to the mac driver.
966  *  Return value:
967  *  0 on success
968  */
969 static int stmmac_init_phy(struct net_device *dev)
970 {
971         struct stmmac_priv *priv = netdev_priv(dev);
972         u32 tx_cnt = priv->plat->tx_queues_to_use;
973         struct phy_device *phydev;
974         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
975         char bus_id[MII_BUS_ID_SIZE];
976         int interface = priv->plat->interface;
977         int max_speed = priv->plat->max_speed;
978         priv->oldlink = false;
979         priv->speed = SPEED_UNKNOWN;
980         priv->oldduplex = DUPLEX_UNKNOWN;
981
982         if (priv->plat->phy_node) {
983                 phydev = of_phy_connect(dev, priv->plat->phy_node,
984                                         &stmmac_adjust_link, 0, interface);
985         } else {
986                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
987                          priv->plat->bus_id);
988
989                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
990                          priv->plat->phy_addr);
991                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
992                            phy_id_fmt);
993
994                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
995                                      interface);
996         }
997
998         if (IS_ERR_OR_NULL(phydev)) {
999                 netdev_err(priv->dev, "Could not attach to PHY\n");
1000                 if (!phydev)
1001                         return -ENODEV;
1002
1003                 return PTR_ERR(phydev);
1004         }
1005
1006         /* Stop Advertising 1000BASE Capability if interface is not GMII */
1007         if ((interface == PHY_INTERFACE_MODE_MII) ||
1008             (interface == PHY_INTERFACE_MODE_RMII) ||
1009                 (max_speed < 1000 && max_speed > 0))
1010                 phy_set_max_speed(phydev, SPEED_100);
1011
1012         /*
1013          * Half-duplex mode not supported with multiqueue
1014          * half-duplex can only works with single queue
1015          */
1016         if (tx_cnt > 1) {
1017                 phy_remove_link_mode(phydev,
1018                                      ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1019                 phy_remove_link_mode(phydev,
1020                                      ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1021                 phy_remove_link_mode(phydev,
1022                                      ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1023         }
1024
1025         /*
1026          * Broken HW is sometimes missing the pull-up resistor on the
1027          * MDIO line, which results in reads to non-existent devices returning
1028          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1029          * device as well.
1030          * Note: phydev->phy_id is the result of reading the UID PHY registers.
1031          */
1032         if (!priv->plat->phy_node && phydev->phy_id == 0) {
1033                 phy_disconnect(phydev);
1034                 return -ENODEV;
1035         }
1036
1037         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1038          * subsequent PHY polling, make sure we force a link transition if
1039          * we have a UP/DOWN/UP transition
1040          */
1041         if (phydev->is_pseudo_fixed_link)
1042                 phydev->irq = PHY_POLL;
1043
1044         phy_attached_info(phydev);
1045         return 0;
1046 }
1047
1048 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1049 {
1050         u32 rx_cnt = priv->plat->rx_queues_to_use;
1051         void *head_rx;
1052         u32 queue;
1053
1054         /* Display RX rings */
1055         for (queue = 0; queue < rx_cnt; queue++) {
1056                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1057
1058                 pr_info("\tRX Queue %u rings\n", queue);
1059
1060                 if (priv->extend_desc)
1061                         head_rx = (void *)rx_q->dma_erx;
1062                 else
1063                         head_rx = (void *)rx_q->dma_rx;
1064
1065                 /* Display RX ring */
1066                 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1067         }
1068 }
1069
1070 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1071 {
1072         u32 tx_cnt = priv->plat->tx_queues_to_use;
1073         void *head_tx;
1074         u32 queue;
1075
1076         /* Display TX rings */
1077         for (queue = 0; queue < tx_cnt; queue++) {
1078                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1079
1080                 pr_info("\tTX Queue %d rings\n", queue);
1081
1082                 if (priv->extend_desc)
1083                         head_tx = (void *)tx_q->dma_etx;
1084                 else
1085                         head_tx = (void *)tx_q->dma_tx;
1086
1087                 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1088         }
1089 }
1090
1091 static void stmmac_display_rings(struct stmmac_priv *priv)
1092 {
1093         /* Display RX ring */
1094         stmmac_display_rx_rings(priv);
1095
1096         /* Display TX ring */
1097         stmmac_display_tx_rings(priv);
1098 }
1099
1100 static int stmmac_set_bfsize(int mtu, int bufsize)
1101 {
1102         int ret = bufsize;
1103
1104         if (mtu >= BUF_SIZE_4KiB)
1105                 ret = BUF_SIZE_8KiB;
1106         else if (mtu >= BUF_SIZE_2KiB)
1107                 ret = BUF_SIZE_4KiB;
1108         else if (mtu > DEFAULT_BUFSIZE)
1109                 ret = BUF_SIZE_2KiB;
1110         else
1111                 ret = DEFAULT_BUFSIZE;
1112
1113         return ret;
1114 }
1115
1116 /**
1117  * stmmac_clear_rx_descriptors - clear RX descriptors
1118  * @priv: driver private structure
1119  * @queue: RX queue index
1120  * Description: this function is called to clear the RX descriptors
1121  * in case of both basic and extended descriptors are used.
1122  */
1123 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1124 {
1125         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1126         int i;
1127
1128         /* Clear the RX descriptors */
1129         for (i = 0; i < DMA_RX_SIZE; i++)
1130                 if (priv->extend_desc)
1131                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1132                                         priv->use_riwt, priv->mode,
1133                                         (i == DMA_RX_SIZE - 1));
1134                 else
1135                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1136                                         priv->use_riwt, priv->mode,
1137                                         (i == DMA_RX_SIZE - 1));
1138 }
1139
1140 /**
1141  * stmmac_clear_tx_descriptors - clear tx descriptors
1142  * @priv: driver private structure
1143  * @queue: TX queue index.
1144  * Description: this function is called to clear the TX descriptors
1145  * in case of both basic and extended descriptors are used.
1146  */
1147 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1148 {
1149         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1150         int i;
1151
1152         /* Clear the TX descriptors */
1153         for (i = 0; i < DMA_TX_SIZE; i++)
1154                 if (priv->extend_desc)
1155                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1156                                         priv->mode, (i == DMA_TX_SIZE - 1));
1157                 else
1158                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1159                                         priv->mode, (i == DMA_TX_SIZE - 1));
1160 }
1161
1162 /**
1163  * stmmac_clear_descriptors - clear descriptors
1164  * @priv: driver private structure
1165  * Description: this function is called to clear the TX and RX descriptors
1166  * in case of both basic and extended descriptors are used.
1167  */
1168 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1169 {
1170         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1171         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1172         u32 queue;
1173
1174         /* Clear the RX descriptors */
1175         for (queue = 0; queue < rx_queue_cnt; queue++)
1176                 stmmac_clear_rx_descriptors(priv, queue);
1177
1178         /* Clear the TX descriptors */
1179         for (queue = 0; queue < tx_queue_cnt; queue++)
1180                 stmmac_clear_tx_descriptors(priv, queue);
1181 }
1182
1183 /**
1184  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1185  * @priv: driver private structure
1186  * @p: descriptor pointer
1187  * @i: descriptor index
1188  * @flags: gfp flag
1189  * @queue: RX queue index
1190  * Description: this function is called to allocate a receive buffer, perform
1191  * the DMA mapping and init the descriptor.
1192  */
1193 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1194                                   int i, gfp_t flags, u32 queue)
1195 {
1196         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1197         struct sk_buff *skb;
1198
1199         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1200         if (!skb) {
1201                 netdev_err(priv->dev,
1202                            "%s: Rx init fails; skb is NULL\n", __func__);
1203                 return -ENOMEM;
1204         }
1205         rx_q->rx_skbuff[i] = skb;
1206         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1207                                                 priv->dma_buf_sz,
1208                                                 DMA_FROM_DEVICE);
1209         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1210                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1211                 dev_kfree_skb_any(skb);
1212                 return -EINVAL;
1213         }
1214
1215         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1216
1217         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1218                 stmmac_init_desc3(priv, p);
1219
1220         return 0;
1221 }
1222
1223 /**
1224  * stmmac_free_rx_buffer - free RX dma buffers
1225  * @priv: private structure
1226  * @queue: RX queue index
1227  * @i: buffer index.
1228  */
1229 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1230 {
1231         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1232
1233         if (rx_q->rx_skbuff[i]) {
1234                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1235                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1236                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1237         }
1238         rx_q->rx_skbuff[i] = NULL;
1239 }
1240
1241 /**
1242  * stmmac_free_tx_buffer - free RX dma buffers
1243  * @priv: private structure
1244  * @queue: RX queue index
1245  * @i: buffer index.
1246  */
1247 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1248 {
1249         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1250
1251         if (tx_q->tx_skbuff_dma[i].buf) {
1252                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1253                         dma_unmap_page(priv->device,
1254                                        tx_q->tx_skbuff_dma[i].buf,
1255                                        tx_q->tx_skbuff_dma[i].len,
1256                                        DMA_TO_DEVICE);
1257                 else
1258                         dma_unmap_single(priv->device,
1259                                          tx_q->tx_skbuff_dma[i].buf,
1260                                          tx_q->tx_skbuff_dma[i].len,
1261                                          DMA_TO_DEVICE);
1262         }
1263
1264         if (tx_q->tx_skbuff[i]) {
1265                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1266                 tx_q->tx_skbuff[i] = NULL;
1267                 tx_q->tx_skbuff_dma[i].buf = 0;
1268                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1269         }
1270 }
1271
1272 /**
1273  * init_dma_rx_desc_rings - init the RX descriptor rings
1274  * @dev: net device structure
1275  * @flags: gfp flag.
1276  * Description: this function initializes the DMA RX descriptors
1277  * and allocates the socket buffers. It supports the chained and ring
1278  * modes.
1279  */
1280 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1281 {
1282         struct stmmac_priv *priv = netdev_priv(dev);
1283         u32 rx_count = priv->plat->rx_queues_to_use;
1284         int ret = -ENOMEM;
1285         int bfsize = 0;
1286         int queue;
1287         int i;
1288
1289         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1290         if (bfsize < 0)
1291                 bfsize = 0;
1292
1293         if (bfsize < BUF_SIZE_16KiB)
1294                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1295
1296         priv->dma_buf_sz = bfsize;
1297
1298         /* RX INITIALIZATION */
1299         netif_dbg(priv, probe, priv->dev,
1300                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1301
1302         for (queue = 0; queue < rx_count; queue++) {
1303                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1304
1305                 netif_dbg(priv, probe, priv->dev,
1306                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1307                           (u32)rx_q->dma_rx_phy);
1308
1309                 for (i = 0; i < DMA_RX_SIZE; i++) {
1310                         struct dma_desc *p;
1311
1312                         if (priv->extend_desc)
1313                                 p = &((rx_q->dma_erx + i)->basic);
1314                         else
1315                                 p = rx_q->dma_rx + i;
1316
1317                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1318                                                      queue);
1319                         if (ret)
1320                                 goto err_init_rx_buffers;
1321
1322                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1323                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1324                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1325                 }
1326
1327                 rx_q->cur_rx = 0;
1328                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1329
1330                 stmmac_clear_rx_descriptors(priv, queue);
1331
1332                 /* Setup the chained descriptor addresses */
1333                 if (priv->mode == STMMAC_CHAIN_MODE) {
1334                         if (priv->extend_desc)
1335                                 stmmac_mode_init(priv, rx_q->dma_erx,
1336                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1337                         else
1338                                 stmmac_mode_init(priv, rx_q->dma_rx,
1339                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1340                 }
1341         }
1342
1343         buf_sz = bfsize;
1344
1345         return 0;
1346
1347 err_init_rx_buffers:
1348         while (queue >= 0) {
1349                 while (--i >= 0)
1350                         stmmac_free_rx_buffer(priv, queue, i);
1351
1352                 if (queue == 0)
1353                         break;
1354
1355                 i = DMA_RX_SIZE;
1356                 queue--;
1357         }
1358
1359         return ret;
1360 }
1361
1362 /**
1363  * init_dma_tx_desc_rings - init the TX descriptor rings
1364  * @dev: net device structure.
1365  * Description: this function initializes the DMA TX descriptors
1366  * and allocates the socket buffers. It supports the chained and ring
1367  * modes.
1368  */
1369 static int init_dma_tx_desc_rings(struct net_device *dev)
1370 {
1371         struct stmmac_priv *priv = netdev_priv(dev);
1372         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1373         u32 queue;
1374         int i;
1375
1376         for (queue = 0; queue < tx_queue_cnt; queue++) {
1377                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1378
1379                 netif_dbg(priv, probe, priv->dev,
1380                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1381                          (u32)tx_q->dma_tx_phy);
1382
1383                 /* Setup the chained descriptor addresses */
1384                 if (priv->mode == STMMAC_CHAIN_MODE) {
1385                         if (priv->extend_desc)
1386                                 stmmac_mode_init(priv, tx_q->dma_etx,
1387                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1388                         else
1389                                 stmmac_mode_init(priv, tx_q->dma_tx,
1390                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1391                 }
1392
1393                 for (i = 0; i < DMA_TX_SIZE; i++) {
1394                         struct dma_desc *p;
1395                         if (priv->extend_desc)
1396                                 p = &((tx_q->dma_etx + i)->basic);
1397                         else
1398                                 p = tx_q->dma_tx + i;
1399
1400                         stmmac_clear_desc(priv, p);
1401
1402                         tx_q->tx_skbuff_dma[i].buf = 0;
1403                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1404                         tx_q->tx_skbuff_dma[i].len = 0;
1405                         tx_q->tx_skbuff_dma[i].last_segment = false;
1406                         tx_q->tx_skbuff[i] = NULL;
1407                 }
1408
1409                 tx_q->dirty_tx = 0;
1410                 tx_q->cur_tx = 0;
1411                 tx_q->mss = 0;
1412
1413                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1414         }
1415
1416         return 0;
1417 }
1418
1419 /**
1420  * init_dma_desc_rings - init the RX/TX descriptor rings
1421  * @dev: net device structure
1422  * @flags: gfp flag.
1423  * Description: this function initializes the DMA RX/TX descriptors
1424  * and allocates the socket buffers. It supports the chained and ring
1425  * modes.
1426  */
1427 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1428 {
1429         struct stmmac_priv *priv = netdev_priv(dev);
1430         int ret;
1431
1432         ret = init_dma_rx_desc_rings(dev, flags);
1433         if (ret)
1434                 return ret;
1435
1436         ret = init_dma_tx_desc_rings(dev);
1437
1438         stmmac_clear_descriptors(priv);
1439
1440         if (netif_msg_hw(priv))
1441                 stmmac_display_rings(priv);
1442
1443         return ret;
1444 }
1445
1446 /**
1447  * dma_free_rx_skbufs - free RX dma buffers
1448  * @priv: private structure
1449  * @queue: RX queue index
1450  */
1451 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1452 {
1453         int i;
1454
1455         for (i = 0; i < DMA_RX_SIZE; i++)
1456                 stmmac_free_rx_buffer(priv, queue, i);
1457 }
1458
1459 /**
1460  * dma_free_tx_skbufs - free TX dma buffers
1461  * @priv: private structure
1462  * @queue: TX queue index
1463  */
1464 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1465 {
1466         int i;
1467
1468         for (i = 0; i < DMA_TX_SIZE; i++)
1469                 stmmac_free_tx_buffer(priv, queue, i);
1470 }
1471
1472 /**
1473  * free_dma_rx_desc_resources - free RX dma desc resources
1474  * @priv: private structure
1475  */
1476 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1477 {
1478         u32 rx_count = priv->plat->rx_queues_to_use;
1479         u32 queue;
1480
1481         /* Free RX queue resources */
1482         for (queue = 0; queue < rx_count; queue++) {
1483                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1484
1485                 /* Release the DMA RX socket buffers */
1486                 dma_free_rx_skbufs(priv, queue);
1487
1488                 /* Free DMA regions of consistent memory previously allocated */
1489                 if (!priv->extend_desc)
1490                         dma_free_coherent(priv->device,
1491                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1492                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1493                 else
1494                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1495                                           sizeof(struct dma_extended_desc),
1496                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1497
1498                 kfree(rx_q->rx_skbuff_dma);
1499                 kfree(rx_q->rx_skbuff);
1500         }
1501 }
1502
1503 /**
1504  * free_dma_tx_desc_resources - free TX dma desc resources
1505  * @priv: private structure
1506  */
1507 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1508 {
1509         u32 tx_count = priv->plat->tx_queues_to_use;
1510         u32 queue;
1511
1512         /* Free TX queue resources */
1513         for (queue = 0; queue < tx_count; queue++) {
1514                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1515
1516                 /* Release the DMA TX socket buffers */
1517                 dma_free_tx_skbufs(priv, queue);
1518
1519                 /* Free DMA regions of consistent memory previously allocated */
1520                 if (!priv->extend_desc)
1521                         dma_free_coherent(priv->device,
1522                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1523                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1524                 else
1525                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1526                                           sizeof(struct dma_extended_desc),
1527                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1528
1529                 kfree(tx_q->tx_skbuff_dma);
1530                 kfree(tx_q->tx_skbuff);
1531         }
1532 }
1533
1534 /**
1535  * alloc_dma_rx_desc_resources - alloc RX resources.
1536  * @priv: private structure
1537  * Description: according to which descriptor can be used (extend or basic)
1538  * this function allocates the resources for TX and RX paths. In case of
1539  * reception, for example, it pre-allocated the RX socket buffer in order to
1540  * allow zero-copy mechanism.
1541  */
1542 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1543 {
1544         u32 rx_count = priv->plat->rx_queues_to_use;
1545         int ret = -ENOMEM;
1546         u32 queue;
1547
1548         /* RX queues buffers and DMA */
1549         for (queue = 0; queue < rx_count; queue++) {
1550                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1551
1552                 rx_q->queue_index = queue;
1553                 rx_q->priv_data = priv;
1554
1555                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1556                                                     sizeof(dma_addr_t),
1557                                                     GFP_KERNEL);
1558                 if (!rx_q->rx_skbuff_dma)
1559                         goto err_dma;
1560
1561                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1562                                                 sizeof(struct sk_buff *),
1563                                                 GFP_KERNEL);
1564                 if (!rx_q->rx_skbuff)
1565                         goto err_dma;
1566
1567                 if (priv->extend_desc) {
1568                         rx_q->dma_erx = dma_alloc_coherent(priv->device,
1569                                                            DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1570                                                            &rx_q->dma_rx_phy,
1571                                                            GFP_KERNEL);
1572                         if (!rx_q->dma_erx)
1573                                 goto err_dma;
1574
1575                 } else {
1576                         rx_q->dma_rx = dma_alloc_coherent(priv->device,
1577                                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1578                                                           &rx_q->dma_rx_phy,
1579                                                           GFP_KERNEL);
1580                         if (!rx_q->dma_rx)
1581                                 goto err_dma;
1582                 }
1583         }
1584
1585         return 0;
1586
1587 err_dma:
1588         free_dma_rx_desc_resources(priv);
1589
1590         return ret;
1591 }
1592
1593 /**
1594  * alloc_dma_tx_desc_resources - alloc TX resources.
1595  * @priv: private structure
1596  * Description: according to which descriptor can be used (extend or basic)
1597  * this function allocates the resources for TX and RX paths. In case of
1598  * reception, for example, it pre-allocated the RX socket buffer in order to
1599  * allow zero-copy mechanism.
1600  */
1601 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1602 {
1603         u32 tx_count = priv->plat->tx_queues_to_use;
1604         int ret = -ENOMEM;
1605         u32 queue;
1606
1607         /* TX queues buffers and DMA */
1608         for (queue = 0; queue < tx_count; queue++) {
1609                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1610
1611                 tx_q->queue_index = queue;
1612                 tx_q->priv_data = priv;
1613
1614                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1615                                                     sizeof(*tx_q->tx_skbuff_dma),
1616                                                     GFP_KERNEL);
1617                 if (!tx_q->tx_skbuff_dma)
1618                         goto err_dma;
1619
1620                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1621                                                 sizeof(struct sk_buff *),
1622                                                 GFP_KERNEL);
1623                 if (!tx_q->tx_skbuff)
1624                         goto err_dma;
1625
1626                 if (priv->extend_desc) {
1627                         tx_q->dma_etx = dma_alloc_coherent(priv->device,
1628                                                            DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1629                                                            &tx_q->dma_tx_phy,
1630                                                            GFP_KERNEL);
1631                         if (!tx_q->dma_etx)
1632                                 goto err_dma;
1633                 } else {
1634                         tx_q->dma_tx = dma_alloc_coherent(priv->device,
1635                                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1636                                                           &tx_q->dma_tx_phy,
1637                                                           GFP_KERNEL);
1638                         if (!tx_q->dma_tx)
1639                                 goto err_dma;
1640                 }
1641         }
1642
1643         return 0;
1644
1645 err_dma:
1646         free_dma_tx_desc_resources(priv);
1647
1648         return ret;
1649 }
1650
1651 /**
1652  * alloc_dma_desc_resources - alloc TX/RX resources.
1653  * @priv: private structure
1654  * Description: according to which descriptor can be used (extend or basic)
1655  * this function allocates the resources for TX and RX paths. In case of
1656  * reception, for example, it pre-allocated the RX socket buffer in order to
1657  * allow zero-copy mechanism.
1658  */
1659 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1660 {
1661         /* RX Allocation */
1662         int ret = alloc_dma_rx_desc_resources(priv);
1663
1664         if (ret)
1665                 return ret;
1666
1667         ret = alloc_dma_tx_desc_resources(priv);
1668
1669         return ret;
1670 }
1671
1672 /**
1673  * free_dma_desc_resources - free dma desc resources
1674  * @priv: private structure
1675  */
1676 static void free_dma_desc_resources(struct stmmac_priv *priv)
1677 {
1678         /* Release the DMA RX socket buffers */
1679         free_dma_rx_desc_resources(priv);
1680
1681         /* Release the DMA TX socket buffers */
1682         free_dma_tx_desc_resources(priv);
1683 }
1684
1685 /**
1686  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1687  *  @priv: driver private structure
1688  *  Description: It is used for enabling the rx queues in the MAC
1689  */
1690 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1691 {
1692         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1693         int queue;
1694         u8 mode;
1695
1696         for (queue = 0; queue < rx_queues_count; queue++) {
1697                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1698                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1699         }
1700 }
1701
1702 /**
1703  * stmmac_start_rx_dma - start RX DMA channel
1704  * @priv: driver private structure
1705  * @chan: RX channel index
1706  * Description:
1707  * This starts a RX DMA channel
1708  */
1709 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1710 {
1711         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1712         stmmac_start_rx(priv, priv->ioaddr, chan);
1713 }
1714
1715 /**
1716  * stmmac_start_tx_dma - start TX DMA channel
1717  * @priv: driver private structure
1718  * @chan: TX channel index
1719  * Description:
1720  * This starts a TX DMA channel
1721  */
1722 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1723 {
1724         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1725         stmmac_start_tx(priv, priv->ioaddr, chan);
1726 }
1727
1728 /**
1729  * stmmac_stop_rx_dma - stop RX DMA channel
1730  * @priv: driver private structure
1731  * @chan: RX channel index
1732  * Description:
1733  * This stops a RX DMA channel
1734  */
1735 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1736 {
1737         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1738         stmmac_stop_rx(priv, priv->ioaddr, chan);
1739 }
1740
1741 /**
1742  * stmmac_stop_tx_dma - stop TX DMA channel
1743  * @priv: driver private structure
1744  * @chan: TX channel index
1745  * Description:
1746  * This stops a TX DMA channel
1747  */
1748 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1749 {
1750         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1751         stmmac_stop_tx(priv, priv->ioaddr, chan);
1752 }
1753
1754 /**
1755  * stmmac_start_all_dma - start all RX and TX DMA channels
1756  * @priv: driver private structure
1757  * Description:
1758  * This starts all the RX and TX DMA channels
1759  */
1760 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1761 {
1762         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1763         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1764         u32 chan = 0;
1765
1766         for (chan = 0; chan < rx_channels_count; chan++)
1767                 stmmac_start_rx_dma(priv, chan);
1768
1769         for (chan = 0; chan < tx_channels_count; chan++)
1770                 stmmac_start_tx_dma(priv, chan);
1771 }
1772
1773 /**
1774  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1775  * @priv: driver private structure
1776  * Description:
1777  * This stops the RX and TX DMA channels
1778  */
1779 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1780 {
1781         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1782         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1783         u32 chan = 0;
1784
1785         for (chan = 0; chan < rx_channels_count; chan++)
1786                 stmmac_stop_rx_dma(priv, chan);
1787
1788         for (chan = 0; chan < tx_channels_count; chan++)
1789                 stmmac_stop_tx_dma(priv, chan);
1790 }
1791
1792 /**
1793  *  stmmac_dma_operation_mode - HW DMA operation mode
1794  *  @priv: driver private structure
1795  *  Description: it is used for configuring the DMA operation mode register in
1796  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1797  */
1798 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1799 {
1800         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1801         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1802         int rxfifosz = priv->plat->rx_fifo_size;
1803         int txfifosz = priv->plat->tx_fifo_size;
1804         u32 txmode = 0;
1805         u32 rxmode = 0;
1806         u32 chan = 0;
1807         u8 qmode = 0;
1808
1809         if (rxfifosz == 0)
1810                 rxfifosz = priv->dma_cap.rx_fifo_size;
1811         if (txfifosz == 0)
1812                 txfifosz = priv->dma_cap.tx_fifo_size;
1813
1814         /* Adjust for real per queue fifo size */
1815         rxfifosz /= rx_channels_count;
1816         txfifosz /= tx_channels_count;
1817
1818         if (priv->plat->force_thresh_dma_mode) {
1819                 txmode = tc;
1820                 rxmode = tc;
1821         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1822                 /*
1823                  * In case of GMAC, SF mode can be enabled
1824                  * to perform the TX COE in HW. This depends on:
1825                  * 1) TX COE if actually supported
1826                  * 2) There is no bugged Jumbo frame support
1827                  *    that needs to not insert csum in the TDES.
1828                  */
1829                 txmode = SF_DMA_MODE;
1830                 rxmode = SF_DMA_MODE;
1831                 priv->xstats.threshold = SF_DMA_MODE;
1832         } else {
1833                 txmode = tc;
1834                 rxmode = SF_DMA_MODE;
1835         }
1836
1837         /* configure all channels */
1838         for (chan = 0; chan < rx_channels_count; chan++) {
1839                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1840
1841                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1842                                 rxfifosz, qmode);
1843                 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1844                                 chan);
1845         }
1846
1847         for (chan = 0; chan < tx_channels_count; chan++) {
1848                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1849
1850                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1851                                 txfifosz, qmode);
1852         }
1853 }
1854
1855 /**
1856  * stmmac_tx_clean - to manage the transmission completion
1857  * @priv: driver private structure
1858  * @queue: TX queue index
1859  * Description: it reclaims the transmit resources after transmission completes.
1860  */
1861 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1862 {
1863         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1864         unsigned int bytes_compl = 0, pkts_compl = 0;
1865         unsigned int entry, count = 0;
1866
1867         __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1868
1869         priv->xstats.tx_clean++;
1870
1871         entry = tx_q->dirty_tx;
1872         while ((entry != tx_q->cur_tx) && (count < budget)) {
1873                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1874                 struct dma_desc *p;
1875                 int status;
1876
1877                 if (priv->extend_desc)
1878                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1879                 else
1880                         p = tx_q->dma_tx + entry;
1881
1882                 status = stmmac_tx_status(priv, &priv->dev->stats,
1883                                 &priv->xstats, p, priv->ioaddr);
1884                 /* Check if the descriptor is owned by the DMA */
1885                 if (unlikely(status & tx_dma_own))
1886                         break;
1887
1888                 count++;
1889
1890                 /* Make sure descriptor fields are read after reading
1891                  * the own bit.
1892                  */
1893                 dma_rmb();
1894
1895                 /* Just consider the last segment and ...*/
1896                 if (likely(!(status & tx_not_ls))) {
1897                         /* ... verify the status error condition */
1898                         if (unlikely(status & tx_err)) {
1899                                 priv->dev->stats.tx_errors++;
1900                         } else {
1901                                 priv->dev->stats.tx_packets++;
1902                                 priv->xstats.tx_pkt_n++;
1903                         }
1904                         stmmac_get_tx_hwtstamp(priv, p, skb);
1905                 }
1906
1907                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1908                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1909                                 dma_unmap_page(priv->device,
1910                                                tx_q->tx_skbuff_dma[entry].buf,
1911                                                tx_q->tx_skbuff_dma[entry].len,
1912                                                DMA_TO_DEVICE);
1913                         else
1914                                 dma_unmap_single(priv->device,
1915                                                  tx_q->tx_skbuff_dma[entry].buf,
1916                                                  tx_q->tx_skbuff_dma[entry].len,
1917                                                  DMA_TO_DEVICE);
1918                         tx_q->tx_skbuff_dma[entry].buf = 0;
1919                         tx_q->tx_skbuff_dma[entry].len = 0;
1920                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1921                 }
1922
1923                 stmmac_clean_desc3(priv, tx_q, p);
1924
1925                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1926                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1927
1928                 if (likely(skb != NULL)) {
1929                         pkts_compl++;
1930                         bytes_compl += skb->len;
1931                         dev_consume_skb_any(skb);
1932                         tx_q->tx_skbuff[entry] = NULL;
1933                 }
1934
1935                 stmmac_release_tx_desc(priv, p, priv->mode);
1936
1937                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1938         }
1939         tx_q->dirty_tx = entry;
1940
1941         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1942                                   pkts_compl, bytes_compl);
1943
1944         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1945                                                                 queue))) &&
1946             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1947
1948                 netif_dbg(priv, tx_done, priv->dev,
1949                           "%s: restart transmit\n", __func__);
1950                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1951         }
1952
1953         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1954                 stmmac_enable_eee_mode(priv);
1955                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1956         }
1957
1958         __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1959
1960         return count;
1961 }
1962
1963 /**
1964  * stmmac_tx_err - to manage the tx error
1965  * @priv: driver private structure
1966  * @chan: channel index
1967  * Description: it cleans the descriptors and restarts the transmission
1968  * in case of transmission errors.
1969  */
1970 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1971 {
1972         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1973         int i;
1974
1975         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1976
1977         stmmac_stop_tx_dma(priv, chan);
1978         dma_free_tx_skbufs(priv, chan);
1979         for (i = 0; i < DMA_TX_SIZE; i++)
1980                 if (priv->extend_desc)
1981                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1982                                         priv->mode, (i == DMA_TX_SIZE - 1));
1983                 else
1984                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1985                                         priv->mode, (i == DMA_TX_SIZE - 1));
1986         tx_q->dirty_tx = 0;
1987         tx_q->cur_tx = 0;
1988         tx_q->mss = 0;
1989         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1990         stmmac_start_tx_dma(priv, chan);
1991
1992         priv->dev->stats.tx_errors++;
1993         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1994 }
1995
1996 /**
1997  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1998  *  @priv: driver private structure
1999  *  @txmode: TX operating mode
2000  *  @rxmode: RX operating mode
2001  *  @chan: channel index
2002  *  Description: it is used for configuring of the DMA operation mode in
2003  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2004  *  mode.
2005  */
2006 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2007                                           u32 rxmode, u32 chan)
2008 {
2009         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2010         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2011         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2012         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2013         int rxfifosz = priv->plat->rx_fifo_size;
2014         int txfifosz = priv->plat->tx_fifo_size;
2015
2016         if (rxfifosz == 0)
2017                 rxfifosz = priv->dma_cap.rx_fifo_size;
2018         if (txfifosz == 0)
2019                 txfifosz = priv->dma_cap.tx_fifo_size;
2020
2021         /* Adjust for real per queue fifo size */
2022         rxfifosz /= rx_channels_count;
2023         txfifosz /= tx_channels_count;
2024
2025         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2026         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2027 }
2028
2029 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2030 {
2031         int ret;
2032
2033         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2034                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2035         if (ret && (ret != -EINVAL)) {
2036                 stmmac_global_err(priv);
2037                 return true;
2038         }
2039
2040         return false;
2041 }
2042
2043 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2044 {
2045         int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2046                                                  &priv->xstats, chan);
2047         struct stmmac_channel *ch = &priv->channel[chan];
2048         bool needs_work = false;
2049
2050         if ((status & handle_rx) && ch->has_rx) {
2051                 needs_work = true;
2052         } else {
2053                 status &= ~handle_rx;
2054         }
2055
2056         if ((status & handle_tx) && ch->has_tx) {
2057                 needs_work = true;
2058         } else {
2059                 status &= ~handle_tx;
2060         }
2061
2062         if (needs_work && napi_schedule_prep(&ch->napi)) {
2063                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2064                 __napi_schedule(&ch->napi);
2065         }
2066
2067         return status;
2068 }
2069
2070 /**
2071  * stmmac_dma_interrupt - DMA ISR
2072  * @priv: driver private structure
2073  * Description: this is the DMA ISR. It is called by the main ISR.
2074  * It calls the dwmac dma routine and schedule poll method in case of some
2075  * work can be done.
2076  */
2077 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2078 {
2079         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2080         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2081         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2082                                 tx_channel_count : rx_channel_count;
2083         u32 chan;
2084         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2085
2086         /* Make sure we never check beyond our status buffer. */
2087         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2088                 channels_to_check = ARRAY_SIZE(status);
2089
2090         for (chan = 0; chan < channels_to_check; chan++)
2091                 status[chan] = stmmac_napi_check(priv, chan);
2092
2093         for (chan = 0; chan < tx_channel_count; chan++) {
2094                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2095                         /* Try to bump up the dma threshold on this failure */
2096                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2097                             (tc <= 256)) {
2098                                 tc += 64;
2099                                 if (priv->plat->force_thresh_dma_mode)
2100                                         stmmac_set_dma_operation_mode(priv,
2101                                                                       tc,
2102                                                                       tc,
2103                                                                       chan);
2104                                 else
2105                                         stmmac_set_dma_operation_mode(priv,
2106                                                                     tc,
2107                                                                     SF_DMA_MODE,
2108                                                                     chan);
2109                                 priv->xstats.threshold = tc;
2110                         }
2111                 } else if (unlikely(status[chan] == tx_hard_error)) {
2112                         stmmac_tx_err(priv, chan);
2113                 }
2114         }
2115 }
2116
2117 /**
2118  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2119  * @priv: driver private structure
2120  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2121  */
2122 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2123 {
2124         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2125                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2126
2127         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2128
2129         if (priv->dma_cap.rmon) {
2130                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2131                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2132         } else
2133                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2134 }
2135
2136 /**
2137  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2138  * @priv: driver private structure
2139  * Description:
2140  *  new GMAC chip generations have a new register to indicate the
2141  *  presence of the optional feature/functions.
2142  *  This can be also used to override the value passed through the
2143  *  platform and necessary for old MAC10/100 and GMAC chips.
2144  */
2145 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2146 {
2147         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2148 }
2149
2150 /**
2151  * stmmac_check_ether_addr - check if the MAC addr is valid
2152  * @priv: driver private structure
2153  * Description:
2154  * it is to verify if the MAC address is valid, in case of failures it
2155  * generates a random MAC address
2156  */
2157 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2158 {
2159         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2160                 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2161                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2162                         eth_hw_addr_random(priv->dev);
2163                 netdev_info(priv->dev, "device MAC address %pM\n",
2164                             priv->dev->dev_addr);
2165         }
2166 }
2167
2168 /**
2169  * stmmac_init_dma_engine - DMA init.
2170  * @priv: driver private structure
2171  * Description:
2172  * It inits the DMA invoking the specific MAC/GMAC callback.
2173  * Some DMA parameters can be passed from the platform;
2174  * in case of these are not passed a default is kept for the MAC or GMAC.
2175  */
2176 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2177 {
2178         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2179         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2180         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2181         struct stmmac_rx_queue *rx_q;
2182         struct stmmac_tx_queue *tx_q;
2183         u32 chan = 0;
2184         int atds = 0;
2185         int ret = 0;
2186
2187         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2188                 dev_err(priv->device, "Invalid DMA configuration\n");
2189                 return -EINVAL;
2190         }
2191
2192         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2193                 atds = 1;
2194
2195         ret = stmmac_reset(priv, priv->ioaddr);
2196         if (ret) {
2197                 dev_err(priv->device, "Failed to reset the dma\n");
2198                 return ret;
2199         }
2200
2201         /* DMA Configuration */
2202         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2203
2204         if (priv->plat->axi)
2205                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2206
2207         /* DMA RX Channel Configuration */
2208         for (chan = 0; chan < rx_channels_count; chan++) {
2209                 rx_q = &priv->rx_queue[chan];
2210
2211                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2212                                     rx_q->dma_rx_phy, chan);
2213
2214                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2215                             (DMA_RX_SIZE * sizeof(struct dma_desc));
2216                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2217                                        rx_q->rx_tail_addr, chan);
2218         }
2219
2220         /* DMA TX Channel Configuration */
2221         for (chan = 0; chan < tx_channels_count; chan++) {
2222                 tx_q = &priv->tx_queue[chan];
2223
2224                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2225                                     tx_q->dma_tx_phy, chan);
2226
2227                 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2228                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2229                                        tx_q->tx_tail_addr, chan);
2230         }
2231
2232         /* DMA CSR Channel configuration */
2233         for (chan = 0; chan < dma_csr_ch; chan++)
2234                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2235
2236         return ret;
2237 }
2238
2239 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2240 {
2241         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2242
2243         mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2244 }
2245
2246 /**
2247  * stmmac_tx_timer - mitigation sw timer for tx.
2248  * @data: data pointer
2249  * Description:
2250  * This is the timer handler to directly invoke the stmmac_tx_clean.
2251  */
2252 static void stmmac_tx_timer(struct timer_list *t)
2253 {
2254         struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2255         struct stmmac_priv *priv = tx_q->priv_data;
2256         struct stmmac_channel *ch;
2257
2258         ch = &priv->channel[tx_q->queue_index];
2259
2260         if (likely(napi_schedule_prep(&ch->napi)))
2261                 __napi_schedule(&ch->napi);
2262 }
2263
2264 /**
2265  * stmmac_init_tx_coalesce - init tx mitigation options.
2266  * @priv: driver private structure
2267  * Description:
2268  * This inits the transmit coalesce parameters: i.e. timer rate,
2269  * timer handler and default threshold used for enabling the
2270  * interrupt on completion bit.
2271  */
2272 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2273 {
2274         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2275         u32 chan;
2276
2277         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2278         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2279
2280         for (chan = 0; chan < tx_channel_count; chan++) {
2281                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2282
2283                 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2284         }
2285 }
2286
2287 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2288 {
2289         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2290         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2291         u32 chan;
2292
2293         /* set TX ring length */
2294         for (chan = 0; chan < tx_channels_count; chan++)
2295                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2296                                 (DMA_TX_SIZE - 1), chan);
2297
2298         /* set RX ring length */
2299         for (chan = 0; chan < rx_channels_count; chan++)
2300                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2301                                 (DMA_RX_SIZE - 1), chan);
2302 }
2303
2304 /**
2305  *  stmmac_set_tx_queue_weight - Set TX queue weight
2306  *  @priv: driver private structure
2307  *  Description: It is used for setting TX queues weight
2308  */
2309 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2310 {
2311         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2312         u32 weight;
2313         u32 queue;
2314
2315         for (queue = 0; queue < tx_queues_count; queue++) {
2316                 weight = priv->plat->tx_queues_cfg[queue].weight;
2317                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2318         }
2319 }
2320
2321 /**
2322  *  stmmac_configure_cbs - Configure CBS in TX queue
2323  *  @priv: driver private structure
2324  *  Description: It is used for configuring CBS in AVB TX queues
2325  */
2326 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2327 {
2328         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2329         u32 mode_to_use;
2330         u32 queue;
2331
2332         /* queue 0 is reserved for legacy traffic */
2333         for (queue = 1; queue < tx_queues_count; queue++) {
2334                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2335                 if (mode_to_use == MTL_QUEUE_DCB)
2336                         continue;
2337
2338                 stmmac_config_cbs(priv, priv->hw,
2339                                 priv->plat->tx_queues_cfg[queue].send_slope,
2340                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2341                                 priv->plat->tx_queues_cfg[queue].high_credit,
2342                                 priv->plat->tx_queues_cfg[queue].low_credit,
2343                                 queue);
2344         }
2345 }
2346
2347 /**
2348  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2349  *  @priv: driver private structure
2350  *  Description: It is used for mapping RX queues to RX dma channels
2351  */
2352 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2353 {
2354         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2355         u32 queue;
2356         u32 chan;
2357
2358         for (queue = 0; queue < rx_queues_count; queue++) {
2359                 chan = priv->plat->rx_queues_cfg[queue].chan;
2360                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2361         }
2362 }
2363
2364 /**
2365  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2366  *  @priv: driver private structure
2367  *  Description: It is used for configuring the RX Queue Priority
2368  */
2369 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2370 {
2371         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2372         u32 queue;
2373         u32 prio;
2374
2375         for (queue = 0; queue < rx_queues_count; queue++) {
2376                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2377                         continue;
2378
2379                 prio = priv->plat->rx_queues_cfg[queue].prio;
2380                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2381         }
2382 }
2383
2384 /**
2385  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2386  *  @priv: driver private structure
2387  *  Description: It is used for configuring the TX Queue Priority
2388  */
2389 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2390 {
2391         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2392         u32 queue;
2393         u32 prio;
2394
2395         for (queue = 0; queue < tx_queues_count; queue++) {
2396                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2397                         continue;
2398
2399                 prio = priv->plat->tx_queues_cfg[queue].prio;
2400                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2401         }
2402 }
2403
2404 /**
2405  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2406  *  @priv: driver private structure
2407  *  Description: It is used for configuring the RX queue routing
2408  */
2409 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2410 {
2411         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2412         u32 queue;
2413         u8 packet;
2414
2415         for (queue = 0; queue < rx_queues_count; queue++) {
2416                 /* no specific packet type routing specified for the queue */
2417                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2418                         continue;
2419
2420                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2421                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2422         }
2423 }
2424
2425 /**
2426  *  stmmac_mtl_configuration - Configure MTL
2427  *  @priv: driver private structure
2428  *  Description: It is used for configurring MTL
2429  */
2430 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2431 {
2432         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2433         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2434
2435         if (tx_queues_count > 1)
2436                 stmmac_set_tx_queue_weight(priv);
2437
2438         /* Configure MTL RX algorithms */
2439         if (rx_queues_count > 1)
2440                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2441                                 priv->plat->rx_sched_algorithm);
2442
2443         /* Configure MTL TX algorithms */
2444         if (tx_queues_count > 1)
2445                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2446                                 priv->plat->tx_sched_algorithm);
2447
2448         /* Configure CBS in AVB TX queues */
2449         if (tx_queues_count > 1)
2450                 stmmac_configure_cbs(priv);
2451
2452         /* Map RX MTL to DMA channels */
2453         stmmac_rx_queue_dma_chan_map(priv);
2454
2455         /* Enable MAC RX Queues */
2456         stmmac_mac_enable_rx_queues(priv);
2457
2458         /* Set RX priorities */
2459         if (rx_queues_count > 1)
2460                 stmmac_mac_config_rx_queues_prio(priv);
2461
2462         /* Set TX priorities */
2463         if (tx_queues_count > 1)
2464                 stmmac_mac_config_tx_queues_prio(priv);
2465
2466         /* Set RX routing */
2467         if (rx_queues_count > 1)
2468                 stmmac_mac_config_rx_queues_routing(priv);
2469 }
2470
2471 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2472 {
2473         if (priv->dma_cap.asp) {
2474                 netdev_info(priv->dev, "Enabling Safety Features\n");
2475                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2476         } else {
2477                 netdev_info(priv->dev, "No Safety Features support found\n");
2478         }
2479 }
2480
2481 /**
2482  * stmmac_hw_setup - setup mac in a usable state.
2483  *  @dev : pointer to the device structure.
2484  *  Description:
2485  *  this is the main function to setup the HW in a usable state because the
2486  *  dma engine is reset, the core registers are configured (e.g. AXI,
2487  *  Checksum features, timers). The DMA is ready to start receiving and
2488  *  transmitting.
2489  *  Return value:
2490  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2491  *  file on failure.
2492  */
2493 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2494 {
2495         struct stmmac_priv *priv = netdev_priv(dev);
2496         u32 rx_cnt = priv->plat->rx_queues_to_use;
2497         u32 tx_cnt = priv->plat->tx_queues_to_use;
2498         u32 chan;
2499         int ret;
2500
2501         /* DMA initialization and SW reset */
2502         ret = stmmac_init_dma_engine(priv);
2503         if (ret < 0) {
2504                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2505                            __func__);
2506                 return ret;
2507         }
2508
2509         /* Copy the MAC addr into the HW  */
2510         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2511
2512         /* PS and related bits will be programmed according to the speed */
2513         if (priv->hw->pcs) {
2514                 int speed = priv->plat->mac_port_sel_speed;
2515
2516                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2517                     (speed == SPEED_1000)) {
2518                         priv->hw->ps = speed;
2519                 } else {
2520                         dev_warn(priv->device, "invalid port speed\n");
2521                         priv->hw->ps = 0;
2522                 }
2523         }
2524
2525         /* Initialize the MAC Core */
2526         stmmac_core_init(priv, priv->hw, dev);
2527
2528         /* Initialize MTL*/
2529         stmmac_mtl_configuration(priv);
2530
2531         /* Initialize Safety Features */
2532         stmmac_safety_feat_configuration(priv);
2533
2534         ret = stmmac_rx_ipc(priv, priv->hw);
2535         if (!ret) {
2536                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2537                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2538                 priv->hw->rx_csum = 0;
2539         }
2540
2541         /* Enable the MAC Rx/Tx */
2542         stmmac_mac_set(priv, priv->ioaddr, true);
2543
2544         /* Set the HW DMA mode and the COE */
2545         stmmac_dma_operation_mode(priv);
2546
2547         stmmac_mmc_setup(priv);
2548
2549         if (init_ptp) {
2550                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2551                 if (ret < 0)
2552                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2553
2554                 ret = stmmac_init_ptp(priv);
2555                 if (ret == -EOPNOTSUPP)
2556                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2557                 else if (ret)
2558                         netdev_warn(priv->dev, "PTP init failed\n");
2559         }
2560
2561         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2562
2563         if (priv->use_riwt) {
2564                 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2565                 if (!ret)
2566                         priv->rx_riwt = MAX_DMA_RIWT;
2567         }
2568
2569         if (priv->hw->pcs)
2570                 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2571
2572         /* set TX and RX rings length */
2573         stmmac_set_rings_length(priv);
2574
2575         /* Enable TSO */
2576         if (priv->tso) {
2577                 for (chan = 0; chan < tx_cnt; chan++)
2578                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2579         }
2580
2581         /* Start the ball rolling... */
2582         stmmac_start_all_dma(priv);
2583
2584         return 0;
2585 }
2586
2587 static void stmmac_hw_teardown(struct net_device *dev)
2588 {
2589         struct stmmac_priv *priv = netdev_priv(dev);
2590
2591         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2592 }
2593
2594 /**
2595  *  stmmac_open - open entry point of the driver
2596  *  @dev : pointer to the device structure.
2597  *  Description:
2598  *  This function is the open entry point of the driver.
2599  *  Return value:
2600  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2601  *  file on failure.
2602  */
2603 static int stmmac_open(struct net_device *dev)
2604 {
2605         struct stmmac_priv *priv = netdev_priv(dev);
2606         u32 chan;
2607         int ret;
2608
2609         stmmac_check_ether_addr(priv);
2610
2611         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2612             priv->hw->pcs != STMMAC_PCS_TBI &&
2613             priv->hw->pcs != STMMAC_PCS_RTBI) {
2614                 ret = stmmac_init_phy(dev);
2615                 if (ret) {
2616                         netdev_err(priv->dev,
2617                                    "%s: Cannot attach to PHY (error: %d)\n",
2618                                    __func__, ret);
2619                         return ret;
2620                 }
2621         }
2622
2623         /* Extra statistics */
2624         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2625         priv->xstats.threshold = tc;
2626
2627         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2628         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2629
2630         ret = alloc_dma_desc_resources(priv);
2631         if (ret < 0) {
2632                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2633                            __func__);
2634                 goto dma_desc_error;
2635         }
2636
2637         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2638         if (ret < 0) {
2639                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2640                            __func__);
2641                 goto init_error;
2642         }
2643
2644         ret = stmmac_hw_setup(dev, true);
2645         if (ret < 0) {
2646                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2647                 goto init_error;
2648         }
2649
2650         stmmac_init_tx_coalesce(priv);
2651
2652         if (dev->phydev)
2653                 phy_start(dev->phydev);
2654
2655         /* Request the IRQ lines */
2656         ret = request_irq(dev->irq, stmmac_interrupt,
2657                           IRQF_SHARED, dev->name, dev);
2658         if (unlikely(ret < 0)) {
2659                 netdev_err(priv->dev,
2660                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2661                            __func__, dev->irq, ret);
2662                 goto irq_error;
2663         }
2664
2665         /* Request the Wake IRQ in case of another line is used for WoL */
2666         if (priv->wol_irq != dev->irq) {
2667                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2668                                   IRQF_SHARED, dev->name, dev);
2669                 if (unlikely(ret < 0)) {
2670                         netdev_err(priv->dev,
2671                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2672                                    __func__, priv->wol_irq, ret);
2673                         goto wolirq_error;
2674                 }
2675         }
2676
2677         /* Request the IRQ lines */
2678         if (priv->lpi_irq > 0) {
2679                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2680                                   dev->name, dev);
2681                 if (unlikely(ret < 0)) {
2682                         netdev_err(priv->dev,
2683                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2684                                    __func__, priv->lpi_irq, ret);
2685                         goto lpiirq_error;
2686                 }
2687         }
2688
2689         stmmac_enable_all_queues(priv);
2690         stmmac_start_all_queues(priv);
2691
2692         return 0;
2693
2694 lpiirq_error:
2695         if (priv->wol_irq != dev->irq)
2696                 free_irq(priv->wol_irq, dev);
2697 wolirq_error:
2698         free_irq(dev->irq, dev);
2699 irq_error:
2700         if (dev->phydev)
2701                 phy_stop(dev->phydev);
2702
2703         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2704                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2705
2706         stmmac_hw_teardown(dev);
2707 init_error:
2708         free_dma_desc_resources(priv);
2709 dma_desc_error:
2710         if (dev->phydev)
2711                 phy_disconnect(dev->phydev);
2712
2713         return ret;
2714 }
2715
2716 /**
2717  *  stmmac_release - close entry point of the driver
2718  *  @dev : device pointer.
2719  *  Description:
2720  *  This is the stop entry point of the driver.
2721  */
2722 static int stmmac_release(struct net_device *dev)
2723 {
2724         struct stmmac_priv *priv = netdev_priv(dev);
2725         u32 chan;
2726
2727         if (priv->eee_enabled)
2728                 del_timer_sync(&priv->eee_ctrl_timer);
2729
2730         /* Stop and disconnect the PHY */
2731         if (dev->phydev) {
2732                 phy_stop(dev->phydev);
2733                 phy_disconnect(dev->phydev);
2734         }
2735
2736         stmmac_stop_all_queues(priv);
2737
2738         stmmac_disable_all_queues(priv);
2739
2740         for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2741                 del_timer_sync(&priv->tx_queue[chan].txtimer);
2742
2743         /* Free the IRQ lines */
2744         free_irq(dev->irq, dev);
2745         if (priv->wol_irq != dev->irq)
2746                 free_irq(priv->wol_irq, dev);
2747         if (priv->lpi_irq > 0)
2748                 free_irq(priv->lpi_irq, dev);
2749
2750         /* Stop TX/RX DMA and clear the descriptors */
2751         stmmac_stop_all_dma(priv);
2752
2753         /* Release and free the Rx/Tx resources */
2754         free_dma_desc_resources(priv);
2755
2756         /* Disable the MAC Rx/Tx */
2757         stmmac_mac_set(priv, priv->ioaddr, false);
2758
2759         netif_carrier_off(dev);
2760
2761         stmmac_release_ptp(priv);
2762
2763         return 0;
2764 }
2765
2766 /**
2767  *  stmmac_tso_allocator - close entry point of the driver
2768  *  @priv: driver private structure
2769  *  @des: buffer start address
2770  *  @total_len: total length to fill in descriptors
2771  *  @last_segmant: condition for the last descriptor
2772  *  @queue: TX queue index
2773  *  Description:
2774  *  This function fills descriptor and request new descriptors according to
2775  *  buffer length to fill
2776  */
2777 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2778                                  int total_len, bool last_segment, u32 queue)
2779 {
2780         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2781         struct dma_desc *desc;
2782         u32 buff_size;
2783         int tmp_len;
2784
2785         tmp_len = total_len;
2786
2787         while (tmp_len > 0) {
2788                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2789                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2790                 desc = tx_q->dma_tx + tx_q->cur_tx;
2791
2792                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2793                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2794                             TSO_MAX_BUFF_SIZE : tmp_len;
2795
2796                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2797                                 0, 1,
2798                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2799                                 0, 0);
2800
2801                 tmp_len -= TSO_MAX_BUFF_SIZE;
2802         }
2803 }
2804
2805 /**
2806  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2807  *  @skb : the socket buffer
2808  *  @dev : device pointer
2809  *  Description: this is the transmit function that is called on TSO frames
2810  *  (support available on GMAC4 and newer chips).
2811  *  Diagram below show the ring programming in case of TSO frames:
2812  *
2813  *  First Descriptor
2814  *   --------
2815  *   | DES0 |---> buffer1 = L2/L3/L4 header
2816  *   | DES1 |---> TCP Payload (can continue on next descr...)
2817  *   | DES2 |---> buffer 1 and 2 len
2818  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2819  *   --------
2820  *      |
2821  *     ...
2822  *      |
2823  *   --------
2824  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2825  *   | DES1 | --|
2826  *   | DES2 | --> buffer 1 and 2 len
2827  *   | DES3 |
2828  *   --------
2829  *
2830  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2831  */
2832 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2833 {
2834         struct dma_desc *desc, *first, *mss_desc = NULL;
2835         struct stmmac_priv *priv = netdev_priv(dev);
2836         int nfrags = skb_shinfo(skb)->nr_frags;
2837         u32 queue = skb_get_queue_mapping(skb);
2838         unsigned int first_entry, des;
2839         struct stmmac_tx_queue *tx_q;
2840         int tmp_pay_len = 0;
2841         u32 pay_len, mss;
2842         u8 proto_hdr_len;
2843         int i;
2844
2845         tx_q = &priv->tx_queue[queue];
2846
2847         /* Compute header lengths */
2848         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2849
2850         /* Desc availability based on threshold should be enough safe */
2851         if (unlikely(stmmac_tx_avail(priv, queue) <
2852                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2853                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2854                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2855                                                                 queue));
2856                         /* This is a hard error, log it. */
2857                         netdev_err(priv->dev,
2858                                    "%s: Tx Ring full when queue awake\n",
2859                                    __func__);
2860                 }
2861                 return NETDEV_TX_BUSY;
2862         }
2863
2864         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2865
2866         mss = skb_shinfo(skb)->gso_size;
2867
2868         /* set new MSS value if needed */
2869         if (mss != tx_q->mss) {
2870                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2871                 stmmac_set_mss(priv, mss_desc, mss);
2872                 tx_q->mss = mss;
2873                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2874                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2875         }
2876
2877         if (netif_msg_tx_queued(priv)) {
2878                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2879                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2880                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2881                         skb->data_len);
2882         }
2883
2884         first_entry = tx_q->cur_tx;
2885         WARN_ON(tx_q->tx_skbuff[first_entry]);
2886
2887         desc = tx_q->dma_tx + first_entry;
2888         first = desc;
2889
2890         /* first descriptor: fill Headers on Buf1 */
2891         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2892                              DMA_TO_DEVICE);
2893         if (dma_mapping_error(priv->device, des))
2894                 goto dma_map_err;
2895
2896         tx_q->tx_skbuff_dma[first_entry].buf = des;
2897         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2898
2899         first->des0 = cpu_to_le32(des);
2900
2901         /* Fill start of payload in buff2 of first descriptor */
2902         if (pay_len)
2903                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2904
2905         /* If needed take extra descriptors to fill the remaining payload */
2906         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2907
2908         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2909
2910         /* Prepare fragments */
2911         for (i = 0; i < nfrags; i++) {
2912                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2913
2914                 des = skb_frag_dma_map(priv->device, frag, 0,
2915                                        skb_frag_size(frag),
2916                                        DMA_TO_DEVICE);
2917                 if (dma_mapping_error(priv->device, des))
2918                         goto dma_map_err;
2919
2920                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2921                                      (i == nfrags - 1), queue);
2922
2923                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2924                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2925                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2926         }
2927
2928         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2929
2930         /* Only the last descriptor gets to point to the skb. */
2931         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2932
2933         /* We've used all descriptors we need for this skb, however,
2934          * advance cur_tx so that it references a fresh descriptor.
2935          * ndo_start_xmit will fill this descriptor the next time it's
2936          * called and stmmac_tx_clean may clean up to this descriptor.
2937          */
2938         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2939
2940         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2941                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2942                           __func__);
2943                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2944         }
2945
2946         dev->stats.tx_bytes += skb->len;
2947         priv->xstats.tx_tso_frames++;
2948         priv->xstats.tx_tso_nfrags += nfrags;
2949
2950         /* Manage tx mitigation */
2951         tx_q->tx_count_frames += nfrags + 1;
2952         if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
2953                 stmmac_set_tx_ic(priv, desc);
2954                 priv->xstats.tx_set_ic_bit++;
2955                 tx_q->tx_count_frames = 0;
2956         } else {
2957                 stmmac_tx_timer_arm(priv, queue);
2958         }
2959
2960         skb_tx_timestamp(skb);
2961
2962         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2963                      priv->hwts_tx_en)) {
2964                 /* declare that device is doing timestamping */
2965                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2966                 stmmac_enable_tx_timestamp(priv, first);
2967         }
2968
2969         /* Complete the first descriptor before granting the DMA */
2970         stmmac_prepare_tso_tx_desc(priv, first, 1,
2971                         proto_hdr_len,
2972                         pay_len,
2973                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2974                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2975
2976         /* If context desc is used to change MSS */
2977         if (mss_desc) {
2978                 /* Make sure that first descriptor has been completely
2979                  * written, including its own bit. This is because MSS is
2980                  * actually before first descriptor, so we need to make
2981                  * sure that MSS's own bit is the last thing written.
2982                  */
2983                 dma_wmb();
2984                 stmmac_set_tx_owner(priv, mss_desc);
2985         }
2986
2987         /* The own bit must be the latest setting done when prepare the
2988          * descriptor and then barrier is needed to make sure that
2989          * all is coherent before granting the DMA engine.
2990          */
2991         wmb();
2992
2993         if (netif_msg_pktdata(priv)) {
2994                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2995                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2996                         tx_q->cur_tx, first, nfrags);
2997
2998                 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2999
3000                 pr_info(">>> frame to be transmitted: ");
3001                 print_pkt(skb->data, skb_headlen(skb));
3002         }
3003
3004         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3005
3006         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3007         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3008
3009         return NETDEV_TX_OK;
3010
3011 dma_map_err:
3012         dev_err(priv->device, "Tx dma map failed\n");
3013         dev_kfree_skb(skb);
3014         priv->dev->stats.tx_dropped++;
3015         return NETDEV_TX_OK;
3016 }
3017
3018 /**
3019  *  stmmac_xmit - Tx entry point of the driver
3020  *  @skb : the socket buffer
3021  *  @dev : device pointer
3022  *  Description : this is the tx entry point of the driver.
3023  *  It programs the chain or the ring and supports oversized frames
3024  *  and SG feature.
3025  */
3026 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3027 {
3028         struct stmmac_priv *priv = netdev_priv(dev);
3029         unsigned int nopaged_len = skb_headlen(skb);
3030         int i, csum_insertion = 0, is_jumbo = 0;
3031         u32 queue = skb_get_queue_mapping(skb);
3032         int nfrags = skb_shinfo(skb)->nr_frags;
3033         int entry;
3034         unsigned int first_entry;
3035         struct dma_desc *desc, *first;
3036         struct stmmac_tx_queue *tx_q;
3037         unsigned int enh_desc;
3038         unsigned int des;
3039
3040         tx_q = &priv->tx_queue[queue];
3041
3042         if (priv->tx_path_in_lpi_mode)
3043                 stmmac_disable_eee_mode(priv);
3044
3045         /* Manage oversized TCP frames for GMAC4 device */
3046         if (skb_is_gso(skb) && priv->tso) {
3047                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3048                         /*
3049                          * There is no way to determine the number of TSO
3050                          * capable Queues. Let's use always the Queue 0
3051                          * because if TSO is supported then at least this
3052                          * one will be capable.
3053                          */
3054                         skb_set_queue_mapping(skb, 0);
3055
3056                         return stmmac_tso_xmit(skb, dev);
3057                 }
3058         }
3059
3060         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3061                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3062                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3063                                                                 queue));
3064                         /* This is a hard error, log it. */
3065                         netdev_err(priv->dev,
3066                                    "%s: Tx Ring full when queue awake\n",
3067                                    __func__);
3068                 }
3069                 return NETDEV_TX_BUSY;
3070         }
3071
3072         entry = tx_q->cur_tx;
3073         first_entry = entry;
3074         WARN_ON(tx_q->tx_skbuff[first_entry]);
3075
3076         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3077
3078         if (likely(priv->extend_desc))
3079                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3080         else
3081                 desc = tx_q->dma_tx + entry;
3082
3083         first = desc;
3084
3085         enh_desc = priv->plat->enh_desc;
3086         /* To program the descriptors according to the size of the frame */
3087         if (enh_desc)
3088                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3089
3090         if (unlikely(is_jumbo)) {
3091                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3092                 if (unlikely(entry < 0) && (entry != -EINVAL))
3093                         goto dma_map_err;
3094         }
3095
3096         for (i = 0; i < nfrags; i++) {
3097                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3098                 int len = skb_frag_size(frag);
3099                 bool last_segment = (i == (nfrags - 1));
3100
3101                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3102                 WARN_ON(tx_q->tx_skbuff[entry]);
3103
3104                 if (likely(priv->extend_desc))
3105                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3106                 else
3107                         desc = tx_q->dma_tx + entry;
3108
3109                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3110                                        DMA_TO_DEVICE);
3111                 if (dma_mapping_error(priv->device, des))
3112                         goto dma_map_err; /* should reuse desc w/o issues */
3113
3114                 tx_q->tx_skbuff_dma[entry].buf = des;
3115
3116                 stmmac_set_desc_addr(priv, desc, des);
3117
3118                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3119                 tx_q->tx_skbuff_dma[entry].len = len;
3120                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3121
3122                 /* Prepare the descriptor and set the own bit too */
3123                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3124                                 priv->mode, 1, last_segment, skb->len);
3125         }
3126
3127         /* Only the last descriptor gets to point to the skb. */
3128         tx_q->tx_skbuff[entry] = skb;
3129
3130         /* We've used all descriptors we need for this skb, however,
3131          * advance cur_tx so that it references a fresh descriptor.
3132          * ndo_start_xmit will fill this descriptor the next time it's
3133          * called and stmmac_tx_clean may clean up to this descriptor.
3134          */
3135         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3136         tx_q->cur_tx = entry;
3137
3138         if (netif_msg_pktdata(priv)) {
3139                 void *tx_head;
3140
3141                 netdev_dbg(priv->dev,
3142                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3143                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3144                            entry, first, nfrags);
3145
3146                 if (priv->extend_desc)
3147                         tx_head = (void *)tx_q->dma_etx;
3148                 else
3149                         tx_head = (void *)tx_q->dma_tx;
3150
3151                 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3152
3153                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3154                 print_pkt(skb->data, skb->len);
3155         }
3156
3157         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3158                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3159                           __func__);
3160                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3161         }
3162
3163         dev->stats.tx_bytes += skb->len;
3164
3165         /* According to the coalesce parameter the IC bit for the latest
3166          * segment is reset and the timer re-started to clean the tx status.
3167          * This approach takes care about the fragments: desc is the first
3168          * element in case of no SG.
3169          */
3170         tx_q->tx_count_frames += nfrags + 1;
3171         if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3172                 stmmac_set_tx_ic(priv, desc);
3173                 priv->xstats.tx_set_ic_bit++;
3174                 tx_q->tx_count_frames = 0;
3175         } else {
3176                 stmmac_tx_timer_arm(priv, queue);
3177         }
3178
3179         skb_tx_timestamp(skb);
3180
3181         /* Ready to fill the first descriptor and set the OWN bit w/o any
3182          * problems because all the descriptors are actually ready to be
3183          * passed to the DMA engine.
3184          */
3185         if (likely(!is_jumbo)) {
3186                 bool last_segment = (nfrags == 0);
3187
3188                 des = dma_map_single(priv->device, skb->data,
3189                                      nopaged_len, DMA_TO_DEVICE);
3190                 if (dma_mapping_error(priv->device, des))
3191                         goto dma_map_err;
3192
3193                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3194
3195                 stmmac_set_desc_addr(priv, first, des);
3196
3197                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3198                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3199
3200                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3201                              priv->hwts_tx_en)) {
3202                         /* declare that device is doing timestamping */
3203                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3204                         stmmac_enable_tx_timestamp(priv, first);
3205                 }
3206
3207                 /* Prepare the first descriptor setting the OWN bit too */
3208                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3209                                 csum_insertion, priv->mode, 1, last_segment,
3210                                 skb->len);
3211
3212                 /* The own bit must be the latest setting done when prepare the
3213                  * descriptor and then barrier is needed to make sure that
3214                  * all is coherent before granting the DMA engine.
3215                  */
3216                 wmb();
3217         }
3218
3219         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3220
3221         stmmac_enable_dma_transmission(priv, priv->ioaddr);
3222
3223         tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3224         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3225
3226         return NETDEV_TX_OK;
3227
3228 dma_map_err:
3229         netdev_err(priv->dev, "Tx DMA map failed\n");
3230         dev_kfree_skb(skb);
3231         priv->dev->stats.tx_dropped++;
3232         return NETDEV_TX_OK;
3233 }
3234
3235 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3236 {
3237         struct vlan_ethhdr *veth;
3238         __be16 vlan_proto;
3239         u16 vlanid;
3240
3241         veth = (struct vlan_ethhdr *)skb->data;
3242         vlan_proto = veth->h_vlan_proto;
3243
3244         if ((vlan_proto == htons(ETH_P_8021Q) &&
3245              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3246             (vlan_proto == htons(ETH_P_8021AD) &&
3247              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3248                 /* pop the vlan tag */
3249                 vlanid = ntohs(veth->h_vlan_TCI);
3250                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3251                 skb_pull(skb, VLAN_HLEN);
3252                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3253         }
3254 }
3255
3256
3257 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3258 {
3259         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3260                 return 0;
3261
3262         return 1;
3263 }
3264
3265 /**
3266  * stmmac_rx_refill - refill used skb preallocated buffers
3267  * @priv: driver private structure
3268  * @queue: RX queue index
3269  * Description : this is to reallocate the skb for the reception process
3270  * that is based on zero-copy.
3271  */
3272 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3273 {
3274         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3275         int dirty = stmmac_rx_dirty(priv, queue);
3276         unsigned int entry = rx_q->dirty_rx;
3277
3278         int bfsize = priv->dma_buf_sz;
3279
3280         while (dirty-- > 0) {
3281                 struct dma_desc *p;
3282
3283                 if (priv->extend_desc)
3284                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3285                 else
3286                         p = rx_q->dma_rx + entry;
3287
3288                 if (likely(!rx_q->rx_skbuff[entry])) {
3289                         struct sk_buff *skb;
3290
3291                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3292                         if (unlikely(!skb)) {
3293                                 /* so for a while no zero-copy! */
3294                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3295                                 if (unlikely(net_ratelimit()))
3296                                         dev_err(priv->device,
3297                                                 "fail to alloc skb entry %d\n",
3298                                                 entry);
3299                                 break;
3300                         }
3301
3302                         rx_q->rx_skbuff[entry] = skb;
3303                         rx_q->rx_skbuff_dma[entry] =
3304                             dma_map_single(priv->device, skb->data, bfsize,
3305                                            DMA_FROM_DEVICE);
3306                         if (dma_mapping_error(priv->device,
3307                                               rx_q->rx_skbuff_dma[entry])) {
3308                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3309                                 dev_kfree_skb(skb);
3310                                 break;
3311                         }
3312
3313                         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3314                         stmmac_refill_desc3(priv, rx_q, p);
3315
3316                         if (rx_q->rx_zeroc_thresh > 0)
3317                                 rx_q->rx_zeroc_thresh--;
3318
3319                         netif_dbg(priv, rx_status, priv->dev,
3320                                   "refill entry #%d\n", entry);
3321                 }
3322                 dma_wmb();
3323
3324                 stmmac_set_rx_owner(priv, p, priv->use_riwt);
3325
3326                 dma_wmb();
3327
3328                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3329         }
3330         rx_q->dirty_rx = entry;
3331 }
3332
3333 /**
3334  * stmmac_rx - manage the receive process
3335  * @priv: driver private structure
3336  * @limit: napi bugget
3337  * @queue: RX queue index.
3338  * Description :  this the function called by the napi poll method.
3339  * It gets all the frames inside the ring.
3340  */
3341 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3342 {
3343         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3344         struct stmmac_channel *ch = &priv->channel[queue];
3345         unsigned int entry = rx_q->cur_rx;
3346         int coe = priv->hw->rx_csum;
3347         unsigned int next_entry;
3348         unsigned int count = 0;
3349         bool xmac;
3350
3351         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3352
3353         if (netif_msg_rx_status(priv)) {
3354                 void *rx_head;
3355
3356                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3357                 if (priv->extend_desc)
3358                         rx_head = (void *)rx_q->dma_erx;
3359                 else
3360                         rx_head = (void *)rx_q->dma_rx;
3361
3362                 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3363         }
3364         while (count < limit) {
3365                 int status;
3366                 struct dma_desc *p;
3367                 struct dma_desc *np;
3368
3369                 if (priv->extend_desc)
3370                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3371                 else
3372                         p = rx_q->dma_rx + entry;
3373
3374                 /* read the status of the incoming frame */
3375                 status = stmmac_rx_status(priv, &priv->dev->stats,
3376                                 &priv->xstats, p);
3377                 /* check if managed by the DMA otherwise go ahead */
3378                 if (unlikely(status & dma_own))
3379                         break;
3380
3381                 count++;
3382
3383                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3384                 next_entry = rx_q->cur_rx;
3385
3386                 if (priv->extend_desc)
3387                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3388                 else
3389                         np = rx_q->dma_rx + next_entry;
3390
3391                 prefetch(np);
3392
3393                 if (priv->extend_desc)
3394                         stmmac_rx_extended_status(priv, &priv->dev->stats,
3395                                         &priv->xstats, rx_q->dma_erx + entry);
3396                 if (unlikely(status == discard_frame)) {
3397                         priv->dev->stats.rx_errors++;
3398                         if (priv->hwts_rx_en && !priv->extend_desc) {
3399                                 /* DESC2 & DESC3 will be overwritten by device
3400                                  * with timestamp value, hence reinitialize
3401                                  * them in stmmac_rx_refill() function so that
3402                                  * device can reuse it.
3403                                  */
3404                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3405                                 rx_q->rx_skbuff[entry] = NULL;
3406                                 dma_unmap_single(priv->device,
3407                                                  rx_q->rx_skbuff_dma[entry],
3408                                                  priv->dma_buf_sz,
3409                                                  DMA_FROM_DEVICE);
3410                         }
3411                 } else {
3412                         struct sk_buff *skb;
3413                         int frame_len;
3414                         unsigned int des;
3415
3416                         stmmac_get_desc_addr(priv, p, &des);
3417                         frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3418
3419                         /*  If frame length is greater than skb buffer size
3420                          *  (preallocated during init) then the packet is
3421                          *  ignored
3422                          */
3423                         if (frame_len > priv->dma_buf_sz) {
3424                                 netdev_err(priv->dev,
3425                                            "len %d larger than size (%d)\n",
3426                                            frame_len, priv->dma_buf_sz);
3427                                 priv->dev->stats.rx_length_errors++;
3428                                 break;
3429                         }
3430
3431                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3432                          * Type frames (LLC/LLC-SNAP)
3433                          *
3434                          * llc_snap is never checked in GMAC >= 4, so this ACS
3435                          * feature is always disabled and packets need to be
3436                          * stripped manually.
3437                          */
3438                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3439                             unlikely(status != llc_snap))
3440                                 frame_len -= ETH_FCS_LEN;
3441
3442                         if (netif_msg_rx_status(priv)) {
3443                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3444                                            p, entry, des);
3445                                 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3446                                            frame_len, status);
3447                         }
3448
3449                         /* The zero-copy is always used for all the sizes
3450                          * in case of GMAC4 because it needs
3451                          * to refill the used descriptors, always.
3452                          */
3453                         if (unlikely(!xmac &&
3454                                      ((frame_len < priv->rx_copybreak) ||
3455                                      stmmac_rx_threshold_count(rx_q)))) {
3456                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3457                                                                 frame_len);
3458                                 if (unlikely(!skb)) {
3459                                         if (net_ratelimit())
3460                                                 dev_warn(priv->device,
3461                                                          "packet dropped\n");
3462                                         priv->dev->stats.rx_dropped++;
3463                                         break;
3464                                 }
3465
3466                                 dma_sync_single_for_cpu(priv->device,
3467                                                         rx_q->rx_skbuff_dma
3468                                                         [entry], frame_len,
3469                                                         DMA_FROM_DEVICE);
3470                                 skb_copy_to_linear_data(skb,
3471                                                         rx_q->
3472                                                         rx_skbuff[entry]->data,
3473                                                         frame_len);
3474
3475                                 skb_put(skb, frame_len);
3476                                 dma_sync_single_for_device(priv->device,
3477                                                            rx_q->rx_skbuff_dma
3478                                                            [entry], frame_len,
3479                                                            DMA_FROM_DEVICE);
3480                         } else {
3481                                 skb = rx_q->rx_skbuff[entry];
3482                                 if (unlikely(!skb)) {
3483                                         netdev_err(priv->dev,
3484                                                    "%s: Inconsistent Rx chain\n",
3485                                                    priv->dev->name);
3486                                         priv->dev->stats.rx_dropped++;
3487                                         break;
3488                                 }
3489                                 prefetch(skb->data - NET_IP_ALIGN);
3490                                 rx_q->rx_skbuff[entry] = NULL;
3491                                 rx_q->rx_zeroc_thresh++;
3492
3493                                 skb_put(skb, frame_len);
3494                                 dma_unmap_single(priv->device,
3495                                                  rx_q->rx_skbuff_dma[entry],
3496                                                  priv->dma_buf_sz,
3497                                                  DMA_FROM_DEVICE);
3498                         }
3499
3500                         if (netif_msg_pktdata(priv)) {
3501                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3502                                            frame_len);
3503                                 print_pkt(skb->data, frame_len);
3504                         }
3505
3506                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3507
3508                         stmmac_rx_vlan(priv->dev, skb);
3509
3510                         skb->protocol = eth_type_trans(skb, priv->dev);
3511
3512                         if (unlikely(!coe))
3513                                 skb_checksum_none_assert(skb);
3514                         else
3515                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3516
3517                         napi_gro_receive(&ch->napi, skb);
3518
3519                         priv->dev->stats.rx_packets++;
3520                         priv->dev->stats.rx_bytes += frame_len;
3521                 }
3522                 entry = next_entry;
3523         }
3524
3525         stmmac_rx_refill(priv, queue);
3526
3527         priv->xstats.rx_pkt_n += count;
3528
3529         return count;
3530 }
3531
3532 /**
3533  *  stmmac_poll - stmmac poll method (NAPI)
3534  *  @napi : pointer to the napi structure.
3535  *  @budget : maximum number of packets that the current CPU can receive from
3536  *            all interfaces.
3537  *  Description :
3538  *  To look at the incoming frames and clear the tx resources.
3539  */
3540 static int stmmac_napi_poll(struct napi_struct *napi, int budget)
3541 {
3542         struct stmmac_channel *ch =
3543                 container_of(napi, struct stmmac_channel, napi);
3544         struct stmmac_priv *priv = ch->priv_data;
3545         int work_done, rx_done = 0, tx_done = 0;
3546         u32 chan = ch->index;
3547
3548         priv->xstats.napi_poll++;
3549
3550         if (ch->has_tx)
3551                 tx_done = stmmac_tx_clean(priv, budget, chan);
3552         if (ch->has_rx)
3553                 rx_done = stmmac_rx(priv, budget, chan);
3554
3555         work_done = max(rx_done, tx_done);
3556         work_done = min(work_done, budget);
3557
3558         if (work_done < budget && napi_complete_done(napi, work_done)) {
3559                 int stat;
3560
3561                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3562                 stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3563                                                    &priv->xstats, chan);
3564                 if (stat && napi_reschedule(napi))
3565                         stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
3566         }
3567
3568         return work_done;
3569 }
3570
3571 /**
3572  *  stmmac_tx_timeout
3573  *  @dev : Pointer to net device structure
3574  *  Description: this function is called when a packet transmission fails to
3575  *   complete within a reasonable time. The driver will mark the error in the
3576  *   netdev structure and arrange for the device to be reset to a sane state
3577  *   in order to transmit a new packet.
3578  */
3579 static void stmmac_tx_timeout(struct net_device *dev)
3580 {
3581         struct stmmac_priv *priv = netdev_priv(dev);
3582
3583         stmmac_global_err(priv);
3584 }
3585
3586 /**
3587  *  stmmac_set_rx_mode - entry point for multicast addressing
3588  *  @dev : pointer to the device structure
3589  *  Description:
3590  *  This function is a driver entry point which gets called by the kernel
3591  *  whenever multicast addresses must be enabled/disabled.
3592  *  Return value:
3593  *  void.
3594  */
3595 static void stmmac_set_rx_mode(struct net_device *dev)
3596 {
3597         struct stmmac_priv *priv = netdev_priv(dev);
3598
3599         stmmac_set_filter(priv, priv->hw, dev);
3600 }
3601
3602 /**
3603  *  stmmac_change_mtu - entry point to change MTU size for the device.
3604  *  @dev : device pointer.
3605  *  @new_mtu : the new MTU size for the device.
3606  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3607  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3608  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3609  *  Return value:
3610  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3611  *  file on failure.
3612  */
3613 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3614 {
3615         struct stmmac_priv *priv = netdev_priv(dev);
3616
3617         if (netif_running(dev)) {
3618                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3619                 return -EBUSY;
3620         }
3621
3622         dev->mtu = new_mtu;
3623
3624         netdev_update_features(dev);
3625
3626         return 0;
3627 }
3628
3629 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3630                                              netdev_features_t features)
3631 {
3632         struct stmmac_priv *priv = netdev_priv(dev);
3633
3634         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3635                 features &= ~NETIF_F_RXCSUM;
3636
3637         if (!priv->plat->tx_coe)
3638                 features &= ~NETIF_F_CSUM_MASK;
3639
3640         /* Some GMAC devices have a bugged Jumbo frame support that
3641          * needs to have the Tx COE disabled for oversized frames
3642          * (due to limited buffer sizes). In this case we disable
3643          * the TX csum insertion in the TDES and not use SF.
3644          */
3645         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3646                 features &= ~NETIF_F_CSUM_MASK;
3647
3648         /* Disable tso if asked by ethtool */
3649         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3650                 if (features & NETIF_F_TSO)
3651                         priv->tso = true;
3652                 else
3653                         priv->tso = false;
3654         }
3655
3656         return features;
3657 }
3658
3659 static int stmmac_set_features(struct net_device *netdev,
3660                                netdev_features_t features)
3661 {
3662         struct stmmac_priv *priv = netdev_priv(netdev);
3663
3664         /* Keep the COE Type in case of csum is supporting */
3665         if (features & NETIF_F_RXCSUM)
3666                 priv->hw->rx_csum = priv->plat->rx_coe;
3667         else
3668                 priv->hw->rx_csum = 0;
3669         /* No check needed because rx_coe has been set before and it will be
3670          * fixed in case of issue.
3671          */
3672         stmmac_rx_ipc(priv, priv->hw);
3673
3674         return 0;
3675 }
3676
3677 /**
3678  *  stmmac_interrupt - main ISR
3679  *  @irq: interrupt number.
3680  *  @dev_id: to pass the net device pointer.
3681  *  Description: this is the main driver interrupt service routine.
3682  *  It can call:
3683  *  o DMA service routine (to manage incoming frame reception and transmission
3684  *    status)
3685  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3686  *    interrupts.
3687  */
3688 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3689 {
3690         struct net_device *dev = (struct net_device *)dev_id;
3691         struct stmmac_priv *priv = netdev_priv(dev);
3692         u32 rx_cnt = priv->plat->rx_queues_to_use;
3693         u32 tx_cnt = priv->plat->tx_queues_to_use;
3694         u32 queues_count;
3695         u32 queue;
3696         bool xmac;
3697
3698         xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3699         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3700
3701         if (priv->irq_wake)
3702                 pm_wakeup_event(priv->device, 0);
3703
3704         if (unlikely(!dev)) {
3705                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3706                 return IRQ_NONE;
3707         }
3708
3709         /* Check if adapter is up */
3710         if (test_bit(STMMAC_DOWN, &priv->state))
3711                 return IRQ_HANDLED;
3712         /* Check if a fatal error happened */
3713         if (stmmac_safety_feat_interrupt(priv))
3714                 return IRQ_HANDLED;
3715
3716         /* To handle GMAC own interrupts */
3717         if ((priv->plat->has_gmac) || xmac) {
3718                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3719                 int mtl_status;
3720
3721                 if (unlikely(status)) {
3722                         /* For LPI we need to save the tx status */
3723                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3724                                 priv->tx_path_in_lpi_mode = true;
3725                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3726                                 priv->tx_path_in_lpi_mode = false;
3727                 }
3728
3729                 for (queue = 0; queue < queues_count; queue++) {
3730                         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3731
3732                         mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3733                                                                 queue);
3734                         if (mtl_status != -EINVAL)
3735                                 status |= mtl_status;
3736
3737                         if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3738                                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3739                                                        rx_q->rx_tail_addr,
3740                                                        queue);
3741                 }
3742
3743                 /* PCS link status */
3744                 if (priv->hw->pcs) {
3745                         if (priv->xstats.pcs_link)
3746                                 netif_carrier_on(dev);
3747                         else
3748                                 netif_carrier_off(dev);
3749                 }
3750         }
3751
3752         /* To handle DMA interrupts */
3753         stmmac_dma_interrupt(priv);
3754
3755         return IRQ_HANDLED;
3756 }
3757
3758 #ifdef CONFIG_NET_POLL_CONTROLLER
3759 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3760  * to allow network I/O with interrupts disabled.
3761  */
3762 static void stmmac_poll_controller(struct net_device *dev)
3763 {
3764         disable_irq(dev->irq);
3765         stmmac_interrupt(dev->irq, dev);
3766         enable_irq(dev->irq);
3767 }
3768 #endif
3769
3770 /**
3771  *  stmmac_ioctl - Entry point for the Ioctl
3772  *  @dev: Device pointer.
3773  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3774  *  a proprietary structure used to pass information to the driver.
3775  *  @cmd: IOCTL command
3776  *  Description:
3777  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3778  */
3779 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3780 {
3781         int ret = -EOPNOTSUPP;
3782
3783         if (!netif_running(dev))
3784                 return -EINVAL;
3785
3786         switch (cmd) {
3787         case SIOCGMIIPHY:
3788         case SIOCGMIIREG:
3789         case SIOCSMIIREG:
3790                 if (!dev->phydev)
3791                         return -EINVAL;
3792                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3793                 break;
3794         case SIOCSHWTSTAMP:
3795                 ret = stmmac_hwtstamp_set(dev, rq);
3796                 break;
3797         case SIOCGHWTSTAMP:
3798                 ret = stmmac_hwtstamp_get(dev, rq);
3799                 break;
3800         default:
3801                 break;
3802         }
3803
3804         return ret;
3805 }
3806
3807 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3808                                     void *cb_priv)
3809 {
3810         struct stmmac_priv *priv = cb_priv;
3811         int ret = -EOPNOTSUPP;
3812
3813         stmmac_disable_all_queues(priv);
3814
3815         switch (type) {
3816         case TC_SETUP_CLSU32:
3817                 if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3818                         ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3819                 break;
3820         default:
3821                 break;
3822         }
3823
3824         stmmac_enable_all_queues(priv);
3825         return ret;
3826 }
3827
3828 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3829                                  struct tc_block_offload *f)
3830 {
3831         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3832                 return -EOPNOTSUPP;
3833
3834         switch (f->command) {
3835         case TC_BLOCK_BIND:
3836                 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3837                                 priv, priv, f->extack);
3838         case TC_BLOCK_UNBIND:
3839                 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3840                 return 0;
3841         default:
3842                 return -EOPNOTSUPP;
3843         }
3844 }
3845
3846 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3847                            void *type_data)
3848 {
3849         struct stmmac_priv *priv = netdev_priv(ndev);
3850
3851         switch (type) {
3852         case TC_SETUP_BLOCK:
3853                 return stmmac_setup_tc_block(priv, type_data);
3854         case TC_SETUP_QDISC_CBS:
3855                 return stmmac_tc_setup_cbs(priv, priv, type_data);
3856         default:
3857                 return -EOPNOTSUPP;
3858         }
3859 }
3860
3861 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3862 {
3863         struct stmmac_priv *priv = netdev_priv(ndev);
3864         int ret = 0;
3865
3866         ret = eth_mac_addr(ndev, addr);
3867         if (ret)
3868                 return ret;
3869
3870         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3871
3872         return ret;
3873 }
3874
3875 #ifdef CONFIG_DEBUG_FS
3876 static struct dentry *stmmac_fs_dir;
3877
3878 static void sysfs_display_ring(void *head, int size, int extend_desc,
3879                                struct seq_file *seq)
3880 {
3881         int i;
3882         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3883         struct dma_desc *p = (struct dma_desc *)head;
3884
3885         for (i = 0; i < size; i++) {
3886                 if (extend_desc) {
3887                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3888                                    i, (unsigned int)virt_to_phys(ep),
3889                                    le32_to_cpu(ep->basic.des0),
3890                                    le32_to_cpu(ep->basic.des1),
3891                                    le32_to_cpu(ep->basic.des2),
3892                                    le32_to_cpu(ep->basic.des3));
3893                         ep++;
3894                 } else {
3895                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3896                                    i, (unsigned int)virt_to_phys(p),
3897                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3898                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3899                         p++;
3900                 }
3901                 seq_printf(seq, "\n");
3902         }
3903 }
3904
3905 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
3906 {
3907         struct net_device *dev = seq->private;
3908         struct stmmac_priv *priv = netdev_priv(dev);
3909         u32 rx_count = priv->plat->rx_queues_to_use;
3910         u32 tx_count = priv->plat->tx_queues_to_use;
3911         u32 queue;
3912
3913         if ((dev->flags & IFF_UP) == 0)
3914                 return 0;
3915
3916         for (queue = 0; queue < rx_count; queue++) {
3917                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3918
3919                 seq_printf(seq, "RX Queue %d:\n", queue);
3920
3921                 if (priv->extend_desc) {
3922                         seq_printf(seq, "Extended descriptor ring:\n");
3923                         sysfs_display_ring((void *)rx_q->dma_erx,
3924                                            DMA_RX_SIZE, 1, seq);
3925                 } else {
3926                         seq_printf(seq, "Descriptor ring:\n");
3927                         sysfs_display_ring((void *)rx_q->dma_rx,
3928                                            DMA_RX_SIZE, 0, seq);
3929                 }
3930         }
3931
3932         for (queue = 0; queue < tx_count; queue++) {
3933                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3934
3935                 seq_printf(seq, "TX Queue %d:\n", queue);
3936
3937                 if (priv->extend_desc) {
3938                         seq_printf(seq, "Extended descriptor ring:\n");
3939                         sysfs_display_ring((void *)tx_q->dma_etx,
3940                                            DMA_TX_SIZE, 1, seq);
3941                 } else {
3942                         seq_printf(seq, "Descriptor ring:\n");
3943                         sysfs_display_ring((void *)tx_q->dma_tx,
3944                                            DMA_TX_SIZE, 0, seq);
3945                 }
3946         }
3947
3948         return 0;
3949 }
3950 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
3951
3952 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
3953 {
3954         struct net_device *dev = seq->private;
3955         struct stmmac_priv *priv = netdev_priv(dev);
3956
3957         if (!priv->hw_cap_support) {
3958                 seq_printf(seq, "DMA HW features not supported\n");
3959                 return 0;
3960         }
3961
3962         seq_printf(seq, "==============================\n");
3963         seq_printf(seq, "\tDMA HW features\n");
3964         seq_printf(seq, "==============================\n");
3965
3966         seq_printf(seq, "\t10/100 Mbps: %s\n",
3967                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3968         seq_printf(seq, "\t1000 Mbps: %s\n",
3969                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3970         seq_printf(seq, "\tHalf duplex: %s\n",
3971                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3972         seq_printf(seq, "\tHash Filter: %s\n",
3973                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3974         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3975                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3976         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3977                    (priv->dma_cap.pcs) ? "Y" : "N");
3978         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3979                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3980         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3981                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3982         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3983                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3984         seq_printf(seq, "\tRMON module: %s\n",
3985                    (priv->dma_cap.rmon) ? "Y" : "N");
3986         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3987                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3988         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3989                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3990         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3991                    (priv->dma_cap.eee) ? "Y" : "N");
3992         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3993         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3994                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3995         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3996                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3997                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3998         } else {
3999                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4000                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4001                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4002                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4003         }
4004         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4005                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4006         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4007                    priv->dma_cap.number_rx_channel);
4008         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4009                    priv->dma_cap.number_tx_channel);
4010         seq_printf(seq, "\tEnhanced descriptors: %s\n",
4011                    (priv->dma_cap.enh_desc) ? "Y" : "N");
4012
4013         return 0;
4014 }
4015 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
4016
4017 static int stmmac_init_fs(struct net_device *dev)
4018 {
4019         struct stmmac_priv *priv = netdev_priv(dev);
4020
4021         /* Create per netdev entries */
4022         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4023
4024         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4025                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4026
4027                 return -ENOMEM;
4028         }
4029
4030         /* Entry to report DMA RX/TX rings */
4031         priv->dbgfs_rings_status =
4032                 debugfs_create_file("descriptors_status", 0444,
4033                                     priv->dbgfs_dir, dev,
4034                                     &stmmac_rings_status_fops);
4035
4036         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4037                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4038                 debugfs_remove_recursive(priv->dbgfs_dir);
4039
4040                 return -ENOMEM;
4041         }
4042
4043         /* Entry to report the DMA HW features */
4044         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4045                                                   priv->dbgfs_dir,
4046                                                   dev, &stmmac_dma_cap_fops);
4047
4048         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4049                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4050                 debugfs_remove_recursive(priv->dbgfs_dir);
4051
4052                 return -ENOMEM;
4053         }
4054
4055         return 0;
4056 }
4057
4058 static void stmmac_exit_fs(struct net_device *dev)
4059 {
4060         struct stmmac_priv *priv = netdev_priv(dev);
4061
4062         debugfs_remove_recursive(priv->dbgfs_dir);
4063 }
4064 #endif /* CONFIG_DEBUG_FS */
4065
4066 static const struct net_device_ops stmmac_netdev_ops = {
4067         .ndo_open = stmmac_open,
4068         .ndo_start_xmit = stmmac_xmit,
4069         .ndo_stop = stmmac_release,
4070         .ndo_change_mtu = stmmac_change_mtu,
4071         .ndo_fix_features = stmmac_fix_features,
4072         .ndo_set_features = stmmac_set_features,
4073         .ndo_set_rx_mode = stmmac_set_rx_mode,
4074         .ndo_tx_timeout = stmmac_tx_timeout,
4075         .ndo_do_ioctl = stmmac_ioctl,
4076         .ndo_setup_tc = stmmac_setup_tc,
4077 #ifdef CONFIG_NET_POLL_CONTROLLER
4078         .ndo_poll_controller = stmmac_poll_controller,
4079 #endif
4080         .ndo_set_mac_address = stmmac_set_mac_address,
4081 };
4082
4083 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4084 {
4085         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4086                 return;
4087         if (test_bit(STMMAC_DOWN, &priv->state))
4088                 return;
4089
4090         netdev_err(priv->dev, "Reset adapter.\n");
4091
4092         rtnl_lock();
4093         netif_trans_update(priv->dev);
4094         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4095                 usleep_range(1000, 2000);
4096
4097         set_bit(STMMAC_DOWN, &priv->state);
4098         dev_close(priv->dev);
4099         dev_open(priv->dev, NULL);
4100         clear_bit(STMMAC_DOWN, &priv->state);
4101         clear_bit(STMMAC_RESETING, &priv->state);
4102         rtnl_unlock();
4103 }
4104
4105 static void stmmac_service_task(struct work_struct *work)
4106 {
4107         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4108                         service_task);
4109
4110         stmmac_reset_subtask(priv);
4111         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4112 }
4113
4114 /**
4115  *  stmmac_hw_init - Init the MAC device
4116  *  @priv: driver private structure
4117  *  Description: this function is to configure the MAC device according to
4118  *  some platform parameters or the HW capability register. It prepares the
4119  *  driver to use either ring or chain modes and to setup either enhanced or
4120  *  normal descriptors.
4121  */
4122 static int stmmac_hw_init(struct stmmac_priv *priv)
4123 {
4124         int ret;
4125
4126         /* dwmac-sun8i only work in chain mode */
4127         if (priv->plat->has_sun8i)
4128                 chain_mode = 1;
4129         priv->chain_mode = chain_mode;
4130
4131         /* Initialize HW Interface */
4132         ret = stmmac_hwif_init(priv);
4133         if (ret)
4134                 return ret;
4135
4136         /* Get the HW capability (new GMAC newer than 3.50a) */
4137         priv->hw_cap_support = stmmac_get_hw_features(priv);
4138         if (priv->hw_cap_support) {
4139                 dev_info(priv->device, "DMA HW capability register supported\n");
4140
4141                 /* We can override some gmac/dma configuration fields: e.g.
4142                  * enh_desc, tx_coe (e.g. that are passed through the
4143                  * platform) with the values from the HW capability
4144                  * register (if supported).
4145                  */
4146                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4147                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4148                 priv->hw->pmt = priv->plat->pmt;
4149
4150                 /* TXCOE doesn't work in thresh DMA mode */
4151                 if (priv->plat->force_thresh_dma_mode)
4152                         priv->plat->tx_coe = 0;
4153                 else
4154                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4155
4156                 /* In case of GMAC4 rx_coe is from HW cap register. */
4157                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4158
4159                 if (priv->dma_cap.rx_coe_type2)
4160                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4161                 else if (priv->dma_cap.rx_coe_type1)
4162                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4163
4164         } else {
4165                 dev_info(priv->device, "No HW DMA feature register supported\n");
4166         }
4167
4168         if (priv->plat->rx_coe) {
4169                 priv->hw->rx_csum = priv->plat->rx_coe;
4170                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4171                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4172                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4173         }
4174         if (priv->plat->tx_coe)
4175                 dev_info(priv->device, "TX Checksum insertion supported\n");
4176
4177         if (priv->plat->pmt) {
4178                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4179                 device_set_wakeup_capable(priv->device, 1);
4180         }
4181
4182         if (priv->dma_cap.tsoen)
4183                 dev_info(priv->device, "TSO supported\n");
4184
4185         /* Run HW quirks, if any */
4186         if (priv->hwif_quirks) {
4187                 ret = priv->hwif_quirks(priv);
4188                 if (ret)
4189                         return ret;
4190         }
4191
4192         /* Rx Watchdog is available in the COREs newer than the 3.40.
4193          * In some case, for example on bugged HW this feature
4194          * has to be disable and this can be done by passing the
4195          * riwt_off field from the platform.
4196          */
4197         if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4198             (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4199                 priv->use_riwt = 1;
4200                 dev_info(priv->device,
4201                          "Enable RX Mitigation via HW Watchdog Timer\n");
4202         }
4203
4204         return 0;
4205 }
4206
4207 /**
4208  * stmmac_dvr_probe
4209  * @device: device pointer
4210  * @plat_dat: platform data pointer
4211  * @res: stmmac resource pointer
4212  * Description: this is the main probe function used to
4213  * call the alloc_etherdev, allocate the priv structure.
4214  * Return:
4215  * returns 0 on success, otherwise errno.
4216  */
4217 int stmmac_dvr_probe(struct device *device,
4218                      struct plat_stmmacenet_data *plat_dat,
4219                      struct stmmac_resources *res)
4220 {
4221         struct net_device *ndev = NULL;
4222         struct stmmac_priv *priv;
4223         u32 queue, maxq;
4224         int ret = 0;
4225
4226         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4227                                   MTL_MAX_TX_QUEUES,
4228                                   MTL_MAX_RX_QUEUES);
4229         if (!ndev)
4230                 return -ENOMEM;
4231
4232         SET_NETDEV_DEV(ndev, device);
4233
4234         priv = netdev_priv(ndev);
4235         priv->device = device;
4236         priv->dev = ndev;
4237
4238         stmmac_set_ethtool_ops(ndev);
4239         priv->pause = pause;
4240         priv->plat = plat_dat;
4241         priv->ioaddr = res->addr;
4242         priv->dev->base_addr = (unsigned long)res->addr;
4243
4244         priv->dev->irq = res->irq;
4245         priv->wol_irq = res->wol_irq;
4246         priv->lpi_irq = res->lpi_irq;
4247
4248         if (res->mac)
4249                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4250
4251         dev_set_drvdata(device, priv->dev);
4252
4253         /* Verify driver arguments */
4254         stmmac_verify_args();
4255
4256         /* Allocate workqueue */
4257         priv->wq = create_singlethread_workqueue("stmmac_wq");
4258         if (!priv->wq) {
4259                 dev_err(priv->device, "failed to create workqueue\n");
4260                 ret = -ENOMEM;
4261                 goto error_wq;
4262         }
4263
4264         INIT_WORK(&priv->service_task, stmmac_service_task);
4265
4266         /* Override with kernel parameters if supplied XXX CRS XXX
4267          * this needs to have multiple instances
4268          */
4269         if ((phyaddr >= 0) && (phyaddr <= 31))
4270                 priv->plat->phy_addr = phyaddr;
4271
4272         if (priv->plat->stmmac_rst) {
4273                 ret = reset_control_assert(priv->plat->stmmac_rst);
4274                 reset_control_deassert(priv->plat->stmmac_rst);
4275                 /* Some reset controllers have only reset callback instead of
4276                  * assert + deassert callbacks pair.
4277                  */
4278                 if (ret == -ENOTSUPP)
4279                         reset_control_reset(priv->plat->stmmac_rst);
4280         }
4281
4282         /* Init MAC and get the capabilities */
4283         ret = stmmac_hw_init(priv);
4284         if (ret)
4285                 goto error_hw_init;
4286
4287         /* Configure real RX and TX queues */
4288         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4289         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4290
4291         ndev->netdev_ops = &stmmac_netdev_ops;
4292
4293         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4294                             NETIF_F_RXCSUM;
4295
4296         ret = stmmac_tc_init(priv, priv);
4297         if (!ret) {
4298                 ndev->hw_features |= NETIF_F_HW_TC;
4299         }
4300
4301         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4302                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4303                 priv->tso = true;
4304                 dev_info(priv->device, "TSO feature enabled\n");
4305         }
4306         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4307         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4308 #ifdef STMMAC_VLAN_TAG_USED
4309         /* Both mac100 and gmac support receive VLAN tag detection */
4310         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4311 #endif
4312         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4313
4314         /* MTU range: 46 - hw-specific max */
4315         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4316         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4317                 ndev->max_mtu = JUMBO_LEN;
4318         else if (priv->plat->has_xgmac)
4319                 ndev->max_mtu = XGMAC_JUMBO_LEN;
4320         else
4321                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4322         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4323          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4324          */
4325         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4326             (priv->plat->maxmtu >= ndev->min_mtu))
4327                 ndev->max_mtu = priv->plat->maxmtu;
4328         else if (priv->plat->maxmtu < ndev->min_mtu)
4329                 dev_warn(priv->device,
4330                          "%s: warning: maxmtu having invalid value (%d)\n",
4331                          __func__, priv->plat->maxmtu);
4332
4333         if (flow_ctrl)
4334                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4335
4336         /* Setup channels NAPI */
4337         maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4338
4339         for (queue = 0; queue < maxq; queue++) {
4340                 struct stmmac_channel *ch = &priv->channel[queue];
4341
4342                 ch->priv_data = priv;
4343                 ch->index = queue;
4344
4345                 if (queue < priv->plat->rx_queues_to_use)
4346                         ch->has_rx = true;
4347                 if (queue < priv->plat->tx_queues_to_use)
4348                         ch->has_tx = true;
4349
4350                 netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
4351                                NAPI_POLL_WEIGHT);
4352         }
4353
4354         mutex_init(&priv->lock);
4355
4356         /* If a specific clk_csr value is passed from the platform
4357          * this means that the CSR Clock Range selection cannot be
4358          * changed at run-time and it is fixed. Viceversa the driver'll try to
4359          * set the MDC clock dynamically according to the csr actual
4360          * clock input.
4361          */
4362         if (!priv->plat->clk_csr)
4363                 stmmac_clk_csr_set(priv);
4364         else
4365                 priv->clk_csr = priv->plat->clk_csr;
4366
4367         stmmac_check_pcs_mode(priv);
4368
4369         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4370             priv->hw->pcs != STMMAC_PCS_TBI &&
4371             priv->hw->pcs != STMMAC_PCS_RTBI) {
4372                 /* MDIO bus Registration */
4373                 ret = stmmac_mdio_register(ndev);
4374                 if (ret < 0) {
4375                         dev_err(priv->device,
4376                                 "%s: MDIO bus (id: %d) registration failed",
4377                                 __func__, priv->plat->bus_id);
4378                         goto error_mdio_register;
4379                 }
4380         }
4381
4382         ret = register_netdev(ndev);
4383         if (ret) {
4384                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4385                         __func__, ret);
4386                 goto error_netdev_register;
4387         }
4388
4389 #ifdef CONFIG_DEBUG_FS
4390         ret = stmmac_init_fs(ndev);
4391         if (ret < 0)
4392                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4393                             __func__);
4394 #endif
4395
4396         return ret;
4397
4398 error_netdev_register:
4399         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4400             priv->hw->pcs != STMMAC_PCS_TBI &&
4401             priv->hw->pcs != STMMAC_PCS_RTBI)
4402                 stmmac_mdio_unregister(ndev);
4403 error_mdio_register:
4404         for (queue = 0; queue < maxq; queue++) {
4405                 struct stmmac_channel *ch = &priv->channel[queue];
4406
4407                 netif_napi_del(&ch->napi);
4408         }
4409 error_hw_init:
4410         destroy_workqueue(priv->wq);
4411 error_wq:
4412         free_netdev(ndev);
4413
4414         return ret;
4415 }
4416 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4417
4418 /**
4419  * stmmac_dvr_remove
4420  * @dev: device pointer
4421  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4422  * changes the link status, releases the DMA descriptor rings.
4423  */
4424 int stmmac_dvr_remove(struct device *dev)
4425 {
4426         struct net_device *ndev = dev_get_drvdata(dev);
4427         struct stmmac_priv *priv = netdev_priv(ndev);
4428
4429         netdev_info(priv->dev, "%s: removing driver", __func__);
4430
4431 #ifdef CONFIG_DEBUG_FS
4432         stmmac_exit_fs(ndev);
4433 #endif
4434         stmmac_stop_all_dma(priv);
4435
4436         stmmac_mac_set(priv, priv->ioaddr, false);
4437         netif_carrier_off(ndev);
4438         unregister_netdev(ndev);
4439         if (priv->plat->stmmac_rst)
4440                 reset_control_assert(priv->plat->stmmac_rst);
4441         clk_disable_unprepare(priv->plat->pclk);
4442         clk_disable_unprepare(priv->plat->stmmac_clk);
4443         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4444             priv->hw->pcs != STMMAC_PCS_TBI &&
4445             priv->hw->pcs != STMMAC_PCS_RTBI)
4446                 stmmac_mdio_unregister(ndev);
4447         destroy_workqueue(priv->wq);
4448         mutex_destroy(&priv->lock);
4449         free_netdev(ndev);
4450
4451         return 0;
4452 }
4453 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4454
4455 /**
4456  * stmmac_suspend - suspend callback
4457  * @dev: device pointer
4458  * Description: this is the function to suspend the device and it is called
4459  * by the platform driver to stop the network queue, release the resources,
4460  * program the PMT register (for WoL), clean and release driver resources.
4461  */
4462 int stmmac_suspend(struct device *dev)
4463 {
4464         struct net_device *ndev = dev_get_drvdata(dev);
4465         struct stmmac_priv *priv = netdev_priv(ndev);
4466
4467         if (!ndev || !netif_running(ndev))
4468                 return 0;
4469
4470         if (ndev->phydev)
4471                 phy_stop(ndev->phydev);
4472
4473         mutex_lock(&priv->lock);
4474
4475         netif_device_detach(ndev);
4476         stmmac_stop_all_queues(priv);
4477
4478         stmmac_disable_all_queues(priv);
4479
4480         /* Stop TX/RX DMA */
4481         stmmac_stop_all_dma(priv);
4482
4483         /* Enable Power down mode by programming the PMT regs */
4484         if (device_may_wakeup(priv->device)) {
4485                 stmmac_pmt(priv, priv->hw, priv->wolopts);
4486                 priv->irq_wake = 1;
4487         } else {
4488                 stmmac_mac_set(priv, priv->ioaddr, false);
4489                 pinctrl_pm_select_sleep_state(priv->device);
4490                 /* Disable clock in case of PWM is off */
4491                 clk_disable(priv->plat->pclk);
4492                 clk_disable(priv->plat->stmmac_clk);
4493         }
4494         mutex_unlock(&priv->lock);
4495
4496         priv->oldlink = false;
4497         priv->speed = SPEED_UNKNOWN;
4498         priv->oldduplex = DUPLEX_UNKNOWN;
4499         return 0;
4500 }
4501 EXPORT_SYMBOL_GPL(stmmac_suspend);
4502
4503 /**
4504  * stmmac_reset_queues_param - reset queue parameters
4505  * @dev: device pointer
4506  */
4507 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4508 {
4509         u32 rx_cnt = priv->plat->rx_queues_to_use;
4510         u32 tx_cnt = priv->plat->tx_queues_to_use;
4511         u32 queue;
4512
4513         for (queue = 0; queue < rx_cnt; queue++) {
4514                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4515
4516                 rx_q->cur_rx = 0;
4517                 rx_q->dirty_rx = 0;
4518         }
4519
4520         for (queue = 0; queue < tx_cnt; queue++) {
4521                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4522
4523                 tx_q->cur_tx = 0;
4524                 tx_q->dirty_tx = 0;
4525                 tx_q->mss = 0;
4526         }
4527 }
4528
4529 /**
4530  * stmmac_resume - resume callback
4531  * @dev: device pointer
4532  * Description: when resume this function is invoked to setup the DMA and CORE
4533  * in a usable state.
4534  */
4535 int stmmac_resume(struct device *dev)
4536 {
4537         struct net_device *ndev = dev_get_drvdata(dev);
4538         struct stmmac_priv *priv = netdev_priv(ndev);
4539
4540         if (!netif_running(ndev))
4541                 return 0;
4542
4543         /* Power Down bit, into the PM register, is cleared
4544          * automatically as soon as a magic packet or a Wake-up frame
4545          * is received. Anyway, it's better to manually clear
4546          * this bit because it can generate problems while resuming
4547          * from another devices (e.g. serial console).
4548          */
4549         if (device_may_wakeup(priv->device)) {
4550                 mutex_lock(&priv->lock);
4551                 stmmac_pmt(priv, priv->hw, 0);
4552                 mutex_unlock(&priv->lock);
4553                 priv->irq_wake = 0;
4554         } else {
4555                 pinctrl_pm_select_default_state(priv->device);
4556                 /* enable the clk previously disabled */
4557                 clk_enable(priv->plat->stmmac_clk);
4558                 clk_enable(priv->plat->pclk);
4559                 /* reset the phy so that it's ready */
4560                 if (priv->mii)
4561                         stmmac_mdio_reset(priv->mii);
4562         }
4563
4564         netif_device_attach(ndev);
4565
4566         mutex_lock(&priv->lock);
4567
4568         stmmac_reset_queues_param(priv);
4569
4570         stmmac_clear_descriptors(priv);
4571
4572         stmmac_hw_setup(ndev, false);
4573         stmmac_init_tx_coalesce(priv);
4574         stmmac_set_rx_mode(ndev);
4575
4576         stmmac_enable_all_queues(priv);
4577
4578         stmmac_start_all_queues(priv);
4579
4580         mutex_unlock(&priv->lock);
4581
4582         if (ndev->phydev)
4583                 phy_start(ndev->phydev);
4584
4585         return 0;
4586 }
4587 EXPORT_SYMBOL_GPL(stmmac_resume);
4588
4589 #ifndef MODULE
4590 static int __init stmmac_cmdline_opt(char *str)
4591 {
4592         char *opt;
4593
4594         if (!str || !*str)
4595                 return -EINVAL;
4596         while ((opt = strsep(&str, ",")) != NULL) {
4597                 if (!strncmp(opt, "debug:", 6)) {
4598                         if (kstrtoint(opt + 6, 0, &debug))
4599                                 goto err;
4600                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4601                         if (kstrtoint(opt + 8, 0, &phyaddr))
4602                                 goto err;
4603                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4604                         if (kstrtoint(opt + 7, 0, &buf_sz))
4605                                 goto err;
4606                 } else if (!strncmp(opt, "tc:", 3)) {
4607                         if (kstrtoint(opt + 3, 0, &tc))
4608                                 goto err;
4609                 } else if (!strncmp(opt, "watchdog:", 9)) {
4610                         if (kstrtoint(opt + 9, 0, &watchdog))
4611                                 goto err;
4612                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4613                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4614                                 goto err;
4615                 } else if (!strncmp(opt, "pause:", 6)) {
4616                         if (kstrtoint(opt + 6, 0, &pause))
4617                                 goto err;
4618                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4619                         if (kstrtoint(opt + 10, 0, &eee_timer))
4620                                 goto err;
4621                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4622                         if (kstrtoint(opt + 11, 0, &chain_mode))
4623                                 goto err;
4624                 }
4625         }
4626         return 0;
4627
4628 err:
4629         pr_err("%s: ERROR broken module parameter conversion", __func__);
4630         return -EINVAL;
4631 }
4632
4633 __setup("stmmaceth=", stmmac_cmdline_opt);
4634 #endif /* MODULE */
4635
4636 static int __init stmmac_init(void)
4637 {
4638 #ifdef CONFIG_DEBUG_FS
4639         /* Create debugfs main directory if it doesn't exist yet */
4640         if (!stmmac_fs_dir) {
4641                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4642
4643                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4644                         pr_err("ERROR %s, debugfs create directory failed\n",
4645                                STMMAC_RESOURCE_NAME);
4646
4647                         return -ENOMEM;
4648                 }
4649         }
4650 #endif
4651
4652         return 0;
4653 }
4654
4655 static void __exit stmmac_exit(void)
4656 {
4657 #ifdef CONFIG_DEBUG_FS
4658         debugfs_remove_recursive(stmmac_fs_dir);
4659 #endif
4660 }
4661
4662 module_init(stmmac_init)
4663 module_exit(stmmac_exit)
4664
4665 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4666 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4667 MODULE_LICENSE("GPL");