scsi: cxlflash: fix assignment of the backend operations
[linux-2.6-microblaze.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   The full GNU General Public License is included in this distribution in
17   the file called "COPYING".
18
19   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21   Documentation available at:
22         http://www.stlinux.com
23   Support available at:
24         https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include <net/pkt_cls.h>
49 #include "stmmac_ptp.h"
50 #include "stmmac.h"
51 #include <linux/reset.h>
52 #include <linux/of_mdio.h>
53 #include "dwmac1000.h"
54 #include "hwif.h"
55
56 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
57 #define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
58
59 /* Module parameters */
60 #define TX_TIMEO        5000
61 static int watchdog = TX_TIMEO;
62 module_param(watchdog, int, 0644);
63 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
64
65 static int debug = -1;
66 module_param(debug, int, 0644);
67 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
68
69 static int phyaddr = -1;
70 module_param(phyaddr, int, 0444);
71 MODULE_PARM_DESC(phyaddr, "Physical device address");
72
73 #define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
74 #define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
75
76 static int flow_ctrl = FLOW_OFF;
77 module_param(flow_ctrl, int, 0644);
78 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
79
80 static int pause = PAUSE_TIME;
81 module_param(pause, int, 0644);
82 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
83
84 #define TC_DEFAULT 64
85 static int tc = TC_DEFAULT;
86 module_param(tc, int, 0644);
87 MODULE_PARM_DESC(tc, "DMA threshold control value");
88
89 #define DEFAULT_BUFSIZE 1536
90 static int buf_sz = DEFAULT_BUFSIZE;
91 module_param(buf_sz, int, 0644);
92 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
93
94 #define STMMAC_RX_COPYBREAK     256
95
96 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
97                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
98                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
99
100 #define STMMAC_DEFAULT_LPI_TIMER        1000
101 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
102 module_param(eee_timer, int, 0644);
103 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
104 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
105
106 /* By default the driver will use the ring mode to manage tx and rx descriptors,
107  * but allow user to force to use the chain instead of the ring
108  */
109 static unsigned int chain_mode;
110 module_param(chain_mode, int, 0444);
111 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
112
113 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
114
115 #ifdef CONFIG_DEBUG_FS
116 static int stmmac_init_fs(struct net_device *dev);
117 static void stmmac_exit_fs(struct net_device *dev);
118 #endif
119
120 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
121
122 /**
123  * stmmac_verify_args - verify the driver parameters.
124  * Description: it checks the driver parameters and set a default in case of
125  * errors.
126  */
127 static void stmmac_verify_args(void)
128 {
129         if (unlikely(watchdog < 0))
130                 watchdog = TX_TIMEO;
131         if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
132                 buf_sz = DEFAULT_BUFSIZE;
133         if (unlikely(flow_ctrl > 1))
134                 flow_ctrl = FLOW_AUTO;
135         else if (likely(flow_ctrl < 0))
136                 flow_ctrl = FLOW_OFF;
137         if (unlikely((pause < 0) || (pause > 0xffff)))
138                 pause = PAUSE_TIME;
139         if (eee_timer < 0)
140                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
141 }
142
143 /**
144  * stmmac_disable_all_queues - Disable all queues
145  * @priv: driver private structure
146  */
147 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
148 {
149         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
150         u32 queue;
151
152         for (queue = 0; queue < rx_queues_cnt; queue++) {
153                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
154
155                 napi_disable(&rx_q->napi);
156         }
157 }
158
159 /**
160  * stmmac_enable_all_queues - Enable all queues
161  * @priv: driver private structure
162  */
163 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
164 {
165         u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
166         u32 queue;
167
168         for (queue = 0; queue < rx_queues_cnt; queue++) {
169                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
170
171                 napi_enable(&rx_q->napi);
172         }
173 }
174
175 /**
176  * stmmac_stop_all_queues - Stop all queues
177  * @priv: driver private structure
178  */
179 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
180 {
181         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
182         u32 queue;
183
184         for (queue = 0; queue < tx_queues_cnt; queue++)
185                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
186 }
187
188 /**
189  * stmmac_start_all_queues - Start all queues
190  * @priv: driver private structure
191  */
192 static void stmmac_start_all_queues(struct stmmac_priv *priv)
193 {
194         u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
195         u32 queue;
196
197         for (queue = 0; queue < tx_queues_cnt; queue++)
198                 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
199 }
200
201 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
202 {
203         if (!test_bit(STMMAC_DOWN, &priv->state) &&
204             !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
205                 queue_work(priv->wq, &priv->service_task);
206 }
207
208 static void stmmac_global_err(struct stmmac_priv *priv)
209 {
210         netif_carrier_off(priv->dev);
211         set_bit(STMMAC_RESET_REQUESTED, &priv->state);
212         stmmac_service_event_schedule(priv);
213 }
214
215 /**
216  * stmmac_clk_csr_set - dynamically set the MDC clock
217  * @priv: driver private structure
218  * Description: this is to dynamically set the MDC clock according to the csr
219  * clock input.
220  * Note:
221  *      If a specific clk_csr value is passed from the platform
222  *      this means that the CSR Clock Range selection cannot be
223  *      changed at run-time and it is fixed (as reported in the driver
224  *      documentation). Viceversa the driver will try to set the MDC
225  *      clock dynamically according to the actual clock input.
226  */
227 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
228 {
229         u32 clk_rate;
230
231         clk_rate = clk_get_rate(priv->plat->stmmac_clk);
232
233         /* Platform provided default clk_csr would be assumed valid
234          * for all other cases except for the below mentioned ones.
235          * For values higher than the IEEE 802.3 specified frequency
236          * we can not estimate the proper divider as it is not known
237          * the frequency of clk_csr_i. So we do not change the default
238          * divider.
239          */
240         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
241                 if (clk_rate < CSR_F_35M)
242                         priv->clk_csr = STMMAC_CSR_20_35M;
243                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
244                         priv->clk_csr = STMMAC_CSR_35_60M;
245                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
246                         priv->clk_csr = STMMAC_CSR_60_100M;
247                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
248                         priv->clk_csr = STMMAC_CSR_100_150M;
249                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
250                         priv->clk_csr = STMMAC_CSR_150_250M;
251                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
252                         priv->clk_csr = STMMAC_CSR_250_300M;
253         }
254
255         if (priv->plat->has_sun8i) {
256                 if (clk_rate > 160000000)
257                         priv->clk_csr = 0x03;
258                 else if (clk_rate > 80000000)
259                         priv->clk_csr = 0x02;
260                 else if (clk_rate > 40000000)
261                         priv->clk_csr = 0x01;
262                 else
263                         priv->clk_csr = 0;
264         }
265 }
266
267 static void print_pkt(unsigned char *buf, int len)
268 {
269         pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
270         print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
271 }
272
273 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
274 {
275         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
276         u32 avail;
277
278         if (tx_q->dirty_tx > tx_q->cur_tx)
279                 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
280         else
281                 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
282
283         return avail;
284 }
285
286 /**
287  * stmmac_rx_dirty - Get RX queue dirty
288  * @priv: driver private structure
289  * @queue: RX queue index
290  */
291 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
292 {
293         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
294         u32 dirty;
295
296         if (rx_q->dirty_rx <= rx_q->cur_rx)
297                 dirty = rx_q->cur_rx - rx_q->dirty_rx;
298         else
299                 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
300
301         return dirty;
302 }
303
304 /**
305  * stmmac_hw_fix_mac_speed - callback for speed selection
306  * @priv: driver private structure
307  * Description: on some platforms (e.g. ST), some HW system configuration
308  * registers have to be set according to the link speed negotiated.
309  */
310 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
311 {
312         struct net_device *ndev = priv->dev;
313         struct phy_device *phydev = ndev->phydev;
314
315         if (likely(priv->plat->fix_mac_speed))
316                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
317 }
318
319 /**
320  * stmmac_enable_eee_mode - check and enter in LPI mode
321  * @priv: driver private structure
322  * Description: this function is to verify and enter in LPI mode in case of
323  * EEE.
324  */
325 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
326 {
327         u32 tx_cnt = priv->plat->tx_queues_to_use;
328         u32 queue;
329
330         /* check if all TX queues have the work finished */
331         for (queue = 0; queue < tx_cnt; queue++) {
332                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
333
334                 if (tx_q->dirty_tx != tx_q->cur_tx)
335                         return; /* still unfinished work */
336         }
337
338         /* Check and enter in LPI mode */
339         if (!priv->tx_path_in_lpi_mode)
340                 stmmac_set_eee_mode(priv, priv->hw,
341                                 priv->plat->en_tx_lpi_clockgating);
342 }
343
344 /**
345  * stmmac_disable_eee_mode - disable and exit from LPI mode
346  * @priv: driver private structure
347  * Description: this function is to exit and disable EEE in case of
348  * LPI state is true. This is called by the xmit.
349  */
350 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
351 {
352         stmmac_reset_eee_mode(priv, priv->hw);
353         del_timer_sync(&priv->eee_ctrl_timer);
354         priv->tx_path_in_lpi_mode = false;
355 }
356
357 /**
358  * stmmac_eee_ctrl_timer - EEE TX SW timer.
359  * @arg : data hook
360  * Description:
361  *  if there is no data transfer and if we are not in LPI state,
362  *  then MAC Transmitter can be moved to LPI state.
363  */
364 static void stmmac_eee_ctrl_timer(struct timer_list *t)
365 {
366         struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
367
368         stmmac_enable_eee_mode(priv);
369         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
370 }
371
372 /**
373  * stmmac_eee_init - init EEE
374  * @priv: driver private structure
375  * Description:
376  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
377  *  can also manage EEE, this function enable the LPI state and start related
378  *  timer.
379  */
380 bool stmmac_eee_init(struct stmmac_priv *priv)
381 {
382         struct net_device *ndev = priv->dev;
383         int interface = priv->plat->interface;
384         bool ret = false;
385
386         if ((interface != PHY_INTERFACE_MODE_MII) &&
387             (interface != PHY_INTERFACE_MODE_GMII) &&
388             !phy_interface_mode_is_rgmii(interface))
389                 goto out;
390
391         /* Using PCS we cannot dial with the phy registers at this stage
392          * so we do not support extra feature like EEE.
393          */
394         if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
395             (priv->hw->pcs == STMMAC_PCS_TBI) ||
396             (priv->hw->pcs == STMMAC_PCS_RTBI))
397                 goto out;
398
399         /* MAC core supports the EEE feature. */
400         if (priv->dma_cap.eee) {
401                 int tx_lpi_timer = priv->tx_lpi_timer;
402
403                 /* Check if the PHY supports EEE */
404                 if (phy_init_eee(ndev->phydev, 1)) {
405                         /* To manage at run-time if the EEE cannot be supported
406                          * anymore (for example because the lp caps have been
407                          * changed).
408                          * In that case the driver disable own timers.
409                          */
410                         mutex_lock(&priv->lock);
411                         if (priv->eee_active) {
412                                 netdev_dbg(priv->dev, "disable EEE\n");
413                                 del_timer_sync(&priv->eee_ctrl_timer);
414                                 stmmac_set_eee_timer(priv, priv->hw, 0,
415                                                 tx_lpi_timer);
416                         }
417                         priv->eee_active = 0;
418                         mutex_unlock(&priv->lock);
419                         goto out;
420                 }
421                 /* Activate the EEE and start timers */
422                 mutex_lock(&priv->lock);
423                 if (!priv->eee_active) {
424                         priv->eee_active = 1;
425                         timer_setup(&priv->eee_ctrl_timer,
426                                     stmmac_eee_ctrl_timer, 0);
427                         mod_timer(&priv->eee_ctrl_timer,
428                                   STMMAC_LPI_T(eee_timer));
429
430                         stmmac_set_eee_timer(priv, priv->hw,
431                                         STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
432                 }
433                 /* Set HW EEE according to the speed */
434                 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
435
436                 ret = true;
437                 mutex_unlock(&priv->lock);
438
439                 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
440         }
441 out:
442         return ret;
443 }
444
445 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
446  * @priv: driver private structure
447  * @p : descriptor pointer
448  * @skb : the socket buffer
449  * Description :
450  * This function will read timestamp from the descriptor & pass it to stack.
451  * and also perform some sanity checks.
452  */
453 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
454                                    struct dma_desc *p, struct sk_buff *skb)
455 {
456         struct skb_shared_hwtstamps shhwtstamp;
457         u64 ns;
458
459         if (!priv->hwts_tx_en)
460                 return;
461
462         /* exit if skb doesn't support hw tstamp */
463         if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
464                 return;
465
466         /* check tx tstamp status */
467         if (stmmac_get_tx_timestamp_status(priv, p)) {
468                 /* get the valid tstamp */
469                 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
470
471                 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
472                 shhwtstamp.hwtstamp = ns_to_ktime(ns);
473
474                 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
475                 /* pass tstamp to stack */
476                 skb_tstamp_tx(skb, &shhwtstamp);
477         }
478
479         return;
480 }
481
482 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
483  * @priv: driver private structure
484  * @p : descriptor pointer
485  * @np : next descriptor pointer
486  * @skb : the socket buffer
487  * Description :
488  * This function will read received packet's timestamp from the descriptor
489  * and pass it to stack. It also perform some sanity checks.
490  */
491 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
492                                    struct dma_desc *np, struct sk_buff *skb)
493 {
494         struct skb_shared_hwtstamps *shhwtstamp = NULL;
495         struct dma_desc *desc = p;
496         u64 ns;
497
498         if (!priv->hwts_rx_en)
499                 return;
500         /* For GMAC4, the valid timestamp is from CTX next desc. */
501         if (priv->plat->has_gmac4)
502                 desc = np;
503
504         /* Check if timestamp is available */
505         if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
506                 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
507                 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
508                 shhwtstamp = skb_hwtstamps(skb);
509                 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
510                 shhwtstamp->hwtstamp = ns_to_ktime(ns);
511         } else  {
512                 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
513         }
514 }
515
516 /**
517  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
518  *  @dev: device pointer.
519  *  @ifr: An IOCTL specific structure, that can contain a pointer to
520  *  a proprietary structure used to pass information to the driver.
521  *  Description:
522  *  This function configures the MAC to enable/disable both outgoing(TX)
523  *  and incoming(RX) packets time stamping based on user input.
524  *  Return Value:
525  *  0 on success and an appropriate -ve integer on failure.
526  */
527 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
528 {
529         struct stmmac_priv *priv = netdev_priv(dev);
530         struct hwtstamp_config config;
531         struct timespec64 now;
532         u64 temp = 0;
533         u32 ptp_v2 = 0;
534         u32 tstamp_all = 0;
535         u32 ptp_over_ipv4_udp = 0;
536         u32 ptp_over_ipv6_udp = 0;
537         u32 ptp_over_ethernet = 0;
538         u32 snap_type_sel = 0;
539         u32 ts_master_en = 0;
540         u32 ts_event_en = 0;
541         u32 value = 0;
542         u32 sec_inc;
543
544         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
545                 netdev_alert(priv->dev, "No support for HW time stamping\n");
546                 priv->hwts_tx_en = 0;
547                 priv->hwts_rx_en = 0;
548
549                 return -EOPNOTSUPP;
550         }
551
552         if (copy_from_user(&config, ifr->ifr_data,
553                            sizeof(struct hwtstamp_config)))
554                 return -EFAULT;
555
556         netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
557                    __func__, config.flags, config.tx_type, config.rx_filter);
558
559         /* reserved for future extensions */
560         if (config.flags)
561                 return -EINVAL;
562
563         if (config.tx_type != HWTSTAMP_TX_OFF &&
564             config.tx_type != HWTSTAMP_TX_ON)
565                 return -ERANGE;
566
567         if (priv->adv_ts) {
568                 switch (config.rx_filter) {
569                 case HWTSTAMP_FILTER_NONE:
570                         /* time stamp no incoming packet at all */
571                         config.rx_filter = HWTSTAMP_FILTER_NONE;
572                         break;
573
574                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
575                         /* PTP v1, UDP, any kind of event packet */
576                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
577                         /* take time stamp for all event messages */
578                         if (priv->plat->has_gmac4)
579                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
580                         else
581                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
582
583                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585                         break;
586
587                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
588                         /* PTP v1, UDP, Sync packet */
589                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
590                         /* take time stamp for SYNC messages only */
591                         ts_event_en = PTP_TCR_TSEVNTENA;
592
593                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
594                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
595                         break;
596
597                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
598                         /* PTP v1, UDP, Delay_req packet */
599                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
600                         /* take time stamp for Delay_Req messages only */
601                         ts_master_en = PTP_TCR_TSMSTRENA;
602                         ts_event_en = PTP_TCR_TSEVNTENA;
603
604                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606                         break;
607
608                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
609                         /* PTP v2, UDP, any kind of event packet */
610                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
611                         ptp_v2 = PTP_TCR_TSVER2ENA;
612                         /* take time stamp for all event messages */
613                         if (priv->plat->has_gmac4)
614                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
615                         else
616                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
617
618                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
619                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
620                         break;
621
622                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
623                         /* PTP v2, UDP, Sync packet */
624                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
625                         ptp_v2 = PTP_TCR_TSVER2ENA;
626                         /* take time stamp for SYNC messages only */
627                         ts_event_en = PTP_TCR_TSEVNTENA;
628
629                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
630                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
631                         break;
632
633                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
634                         /* PTP v2, UDP, Delay_req packet */
635                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
636                         ptp_v2 = PTP_TCR_TSVER2ENA;
637                         /* take time stamp for Delay_Req messages only */
638                         ts_master_en = PTP_TCR_TSMSTRENA;
639                         ts_event_en = PTP_TCR_TSEVNTENA;
640
641                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643                         break;
644
645                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
646                         /* PTP v2/802.AS1 any layer, any kind of event packet */
647                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
648                         ptp_v2 = PTP_TCR_TSVER2ENA;
649                         /* take time stamp for all event messages */
650                         if (priv->plat->has_gmac4)
651                                 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
652                         else
653                                 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
654
655                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
656                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
657                         ptp_over_ethernet = PTP_TCR_TSIPENA;
658                         break;
659
660                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
661                         /* PTP v2/802.AS1, any layer, Sync packet */
662                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
663                         ptp_v2 = PTP_TCR_TSVER2ENA;
664                         /* take time stamp for SYNC messages only */
665                         ts_event_en = PTP_TCR_TSEVNTENA;
666
667                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669                         ptp_over_ethernet = PTP_TCR_TSIPENA;
670                         break;
671
672                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
673                         /* PTP v2/802.AS1, any layer, Delay_req packet */
674                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
675                         ptp_v2 = PTP_TCR_TSVER2ENA;
676                         /* take time stamp for Delay_Req messages only */
677                         ts_master_en = PTP_TCR_TSMSTRENA;
678                         ts_event_en = PTP_TCR_TSEVNTENA;
679
680                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
681                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
682                         ptp_over_ethernet = PTP_TCR_TSIPENA;
683                         break;
684
685                 case HWTSTAMP_FILTER_NTP_ALL:
686                 case HWTSTAMP_FILTER_ALL:
687                         /* time stamp any incoming packet */
688                         config.rx_filter = HWTSTAMP_FILTER_ALL;
689                         tstamp_all = PTP_TCR_TSENALL;
690                         break;
691
692                 default:
693                         return -ERANGE;
694                 }
695         } else {
696                 switch (config.rx_filter) {
697                 case HWTSTAMP_FILTER_NONE:
698                         config.rx_filter = HWTSTAMP_FILTER_NONE;
699                         break;
700                 default:
701                         /* PTP v1, UDP, any kind of event packet */
702                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
703                         break;
704                 }
705         }
706         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
707         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
708
709         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
710                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
711         else {
712                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
713                          tstamp_all | ptp_v2 | ptp_over_ethernet |
714                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
715                          ts_master_en | snap_type_sel);
716                 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
717
718                 /* program Sub Second Increment reg */
719                 stmmac_config_sub_second_increment(priv,
720                                 priv->ptpaddr, priv->plat->clk_ptp_rate,
721                                 priv->plat->has_gmac4, &sec_inc);
722                 temp = div_u64(1000000000ULL, sec_inc);
723
724                 /* Store sub second increment and flags for later use */
725                 priv->sub_second_inc = sec_inc;
726                 priv->systime_flags = value;
727
728                 /* calculate default added value:
729                  * formula is :
730                  * addend = (2^32)/freq_div_ratio;
731                  * where, freq_div_ratio = 1e9ns/sec_inc
732                  */
733                 temp = (u64)(temp << 32);
734                 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
735                 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
736
737                 /* initialize system time */
738                 ktime_get_real_ts64(&now);
739
740                 /* lower 32 bits of tv_sec are safe until y2106 */
741                 stmmac_init_systime(priv, priv->ptpaddr,
742                                 (u32)now.tv_sec, now.tv_nsec);
743         }
744
745         return copy_to_user(ifr->ifr_data, &config,
746                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
747 }
748
749 /**
750  * stmmac_init_ptp - init PTP
751  * @priv: driver private structure
752  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
753  * This is done by looking at the HW cap. register.
754  * This function also registers the ptp driver.
755  */
756 static int stmmac_init_ptp(struct stmmac_priv *priv)
757 {
758         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
759                 return -EOPNOTSUPP;
760
761         priv->adv_ts = 0;
762         /* Check if adv_ts can be enabled for dwmac 4.x core */
763         if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
764                 priv->adv_ts = 1;
765         /* Dwmac 3.x core with extend_desc can support adv_ts */
766         else if (priv->extend_desc && priv->dma_cap.atime_stamp)
767                 priv->adv_ts = 1;
768
769         if (priv->dma_cap.time_stamp)
770                 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
771
772         if (priv->adv_ts)
773                 netdev_info(priv->dev,
774                             "IEEE 1588-2008 Advanced Timestamp supported\n");
775
776         priv->hwts_tx_en = 0;
777         priv->hwts_rx_en = 0;
778
779         stmmac_ptp_register(priv);
780
781         return 0;
782 }
783
784 static void stmmac_release_ptp(struct stmmac_priv *priv)
785 {
786         if (priv->plat->clk_ptp_ref)
787                 clk_disable_unprepare(priv->plat->clk_ptp_ref);
788         stmmac_ptp_unregister(priv);
789 }
790
791 /**
792  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
793  *  @priv: driver private structure
794  *  Description: It is used for configuring the flow control in all queues
795  */
796 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
797 {
798         u32 tx_cnt = priv->plat->tx_queues_to_use;
799
800         stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
801                         priv->pause, tx_cnt);
802 }
803
804 /**
805  * stmmac_adjust_link - adjusts the link parameters
806  * @dev: net device structure
807  * Description: this is the helper called by the physical abstraction layer
808  * drivers to communicate the phy link status. According the speed and duplex
809  * this driver can invoke registered glue-logic as well.
810  * It also invoke the eee initialization because it could happen when switch
811  * on different networks (that are eee capable).
812  */
813 static void stmmac_adjust_link(struct net_device *dev)
814 {
815         struct stmmac_priv *priv = netdev_priv(dev);
816         struct phy_device *phydev = dev->phydev;
817         bool new_state = false;
818
819         if (!phydev)
820                 return;
821
822         mutex_lock(&priv->lock);
823
824         if (phydev->link) {
825                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
826
827                 /* Now we make sure that we can be in full duplex mode.
828                  * If not, we operate in half-duplex mode. */
829                 if (phydev->duplex != priv->oldduplex) {
830                         new_state = true;
831                         if (!phydev->duplex)
832                                 ctrl &= ~priv->hw->link.duplex;
833                         else
834                                 ctrl |= priv->hw->link.duplex;
835                         priv->oldduplex = phydev->duplex;
836                 }
837                 /* Flow Control operation */
838                 if (phydev->pause)
839                         stmmac_mac_flow_ctrl(priv, phydev->duplex);
840
841                 if (phydev->speed != priv->speed) {
842                         new_state = true;
843                         ctrl &= ~priv->hw->link.speed_mask;
844                         switch (phydev->speed) {
845                         case SPEED_1000:
846                                 ctrl |= priv->hw->link.speed1000;
847                                 break;
848                         case SPEED_100:
849                                 ctrl |= priv->hw->link.speed100;
850                                 break;
851                         case SPEED_10:
852                                 ctrl |= priv->hw->link.speed10;
853                                 break;
854                         default:
855                                 netif_warn(priv, link, priv->dev,
856                                            "broken speed: %d\n", phydev->speed);
857                                 phydev->speed = SPEED_UNKNOWN;
858                                 break;
859                         }
860                         if (phydev->speed != SPEED_UNKNOWN)
861                                 stmmac_hw_fix_mac_speed(priv);
862                         priv->speed = phydev->speed;
863                 }
864
865                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
866
867                 if (!priv->oldlink) {
868                         new_state = true;
869                         priv->oldlink = true;
870                 }
871         } else if (priv->oldlink) {
872                 new_state = true;
873                 priv->oldlink = false;
874                 priv->speed = SPEED_UNKNOWN;
875                 priv->oldduplex = DUPLEX_UNKNOWN;
876         }
877
878         if (new_state && netif_msg_link(priv))
879                 phy_print_status(phydev);
880
881         mutex_unlock(&priv->lock);
882
883         if (phydev->is_pseudo_fixed_link)
884                 /* Stop PHY layer to call the hook to adjust the link in case
885                  * of a switch is attached to the stmmac driver.
886                  */
887                 phydev->irq = PHY_IGNORE_INTERRUPT;
888         else
889                 /* At this stage, init the EEE if supported.
890                  * Never called in case of fixed_link.
891                  */
892                 priv->eee_enabled = stmmac_eee_init(priv);
893 }
894
895 /**
896  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
897  * @priv: driver private structure
898  * Description: this is to verify if the HW supports the PCS.
899  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
900  * configured for the TBI, RTBI, or SGMII PHY interface.
901  */
902 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
903 {
904         int interface = priv->plat->interface;
905
906         if (priv->dma_cap.pcs) {
907                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
908                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
909                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
910                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
911                         netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
912                         priv->hw->pcs = STMMAC_PCS_RGMII;
913                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
914                         netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
915                         priv->hw->pcs = STMMAC_PCS_SGMII;
916                 }
917         }
918 }
919
920 /**
921  * stmmac_init_phy - PHY initialization
922  * @dev: net device structure
923  * Description: it initializes the driver's PHY state, and attaches the PHY
924  * to the mac driver.
925  *  Return value:
926  *  0 on success
927  */
928 static int stmmac_init_phy(struct net_device *dev)
929 {
930         struct stmmac_priv *priv = netdev_priv(dev);
931         struct phy_device *phydev;
932         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
933         char bus_id[MII_BUS_ID_SIZE];
934         int interface = priv->plat->interface;
935         int max_speed = priv->plat->max_speed;
936         priv->oldlink = false;
937         priv->speed = SPEED_UNKNOWN;
938         priv->oldduplex = DUPLEX_UNKNOWN;
939
940         if (priv->plat->phy_node) {
941                 phydev = of_phy_connect(dev, priv->plat->phy_node,
942                                         &stmmac_adjust_link, 0, interface);
943         } else {
944                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
945                          priv->plat->bus_id);
946
947                 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
948                          priv->plat->phy_addr);
949                 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
950                            phy_id_fmt);
951
952                 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
953                                      interface);
954         }
955
956         if (IS_ERR_OR_NULL(phydev)) {
957                 netdev_err(priv->dev, "Could not attach to PHY\n");
958                 if (!phydev)
959                         return -ENODEV;
960
961                 return PTR_ERR(phydev);
962         }
963
964         /* Stop Advertising 1000BASE Capability if interface is not GMII */
965         if ((interface == PHY_INTERFACE_MODE_MII) ||
966             (interface == PHY_INTERFACE_MODE_RMII) ||
967                 (max_speed < 1000 && max_speed > 0))
968                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
969                                          SUPPORTED_1000baseT_Full);
970
971         /*
972          * Broken HW is sometimes missing the pull-up resistor on the
973          * MDIO line, which results in reads to non-existent devices returning
974          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
975          * device as well.
976          * Note: phydev->phy_id is the result of reading the UID PHY registers.
977          */
978         if (!priv->plat->phy_node && phydev->phy_id == 0) {
979                 phy_disconnect(phydev);
980                 return -ENODEV;
981         }
982
983         /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
984          * subsequent PHY polling, make sure we force a link transition if
985          * we have a UP/DOWN/UP transition
986          */
987         if (phydev->is_pseudo_fixed_link)
988                 phydev->irq = PHY_POLL;
989
990         phy_attached_info(phydev);
991         return 0;
992 }
993
994 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
995 {
996         u32 rx_cnt = priv->plat->rx_queues_to_use;
997         void *head_rx;
998         u32 queue;
999
1000         /* Display RX rings */
1001         for (queue = 0; queue < rx_cnt; queue++) {
1002                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1003
1004                 pr_info("\tRX Queue %u rings\n", queue);
1005
1006                 if (priv->extend_desc)
1007                         head_rx = (void *)rx_q->dma_erx;
1008                 else
1009                         head_rx = (void *)rx_q->dma_rx;
1010
1011                 /* Display RX ring */
1012                 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1013         }
1014 }
1015
1016 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1017 {
1018         u32 tx_cnt = priv->plat->tx_queues_to_use;
1019         void *head_tx;
1020         u32 queue;
1021
1022         /* Display TX rings */
1023         for (queue = 0; queue < tx_cnt; queue++) {
1024                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1025
1026                 pr_info("\tTX Queue %d rings\n", queue);
1027
1028                 if (priv->extend_desc)
1029                         head_tx = (void *)tx_q->dma_etx;
1030                 else
1031                         head_tx = (void *)tx_q->dma_tx;
1032
1033                 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1034         }
1035 }
1036
1037 static void stmmac_display_rings(struct stmmac_priv *priv)
1038 {
1039         /* Display RX ring */
1040         stmmac_display_rx_rings(priv);
1041
1042         /* Display TX ring */
1043         stmmac_display_tx_rings(priv);
1044 }
1045
1046 static int stmmac_set_bfsize(int mtu, int bufsize)
1047 {
1048         int ret = bufsize;
1049
1050         if (mtu >= BUF_SIZE_4KiB)
1051                 ret = BUF_SIZE_8KiB;
1052         else if (mtu >= BUF_SIZE_2KiB)
1053                 ret = BUF_SIZE_4KiB;
1054         else if (mtu > DEFAULT_BUFSIZE)
1055                 ret = BUF_SIZE_2KiB;
1056         else
1057                 ret = DEFAULT_BUFSIZE;
1058
1059         return ret;
1060 }
1061
1062 /**
1063  * stmmac_clear_rx_descriptors - clear RX descriptors
1064  * @priv: driver private structure
1065  * @queue: RX queue index
1066  * Description: this function is called to clear the RX descriptors
1067  * in case of both basic and extended descriptors are used.
1068  */
1069 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1070 {
1071         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1072         int i;
1073
1074         /* Clear the RX descriptors */
1075         for (i = 0; i < DMA_RX_SIZE; i++)
1076                 if (priv->extend_desc)
1077                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1078                                         priv->use_riwt, priv->mode,
1079                                         (i == DMA_RX_SIZE - 1));
1080                 else
1081                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1082                                         priv->use_riwt, priv->mode,
1083                                         (i == DMA_RX_SIZE - 1));
1084 }
1085
1086 /**
1087  * stmmac_clear_tx_descriptors - clear tx descriptors
1088  * @priv: driver private structure
1089  * @queue: TX queue index.
1090  * Description: this function is called to clear the TX descriptors
1091  * in case of both basic and extended descriptors are used.
1092  */
1093 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1094 {
1095         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1096         int i;
1097
1098         /* Clear the TX descriptors */
1099         for (i = 0; i < DMA_TX_SIZE; i++)
1100                 if (priv->extend_desc)
1101                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1102                                         priv->mode, (i == DMA_TX_SIZE - 1));
1103                 else
1104                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1105                                         priv->mode, (i == DMA_TX_SIZE - 1));
1106 }
1107
1108 /**
1109  * stmmac_clear_descriptors - clear descriptors
1110  * @priv: driver private structure
1111  * Description: this function is called to clear the TX and RX descriptors
1112  * in case of both basic and extended descriptors are used.
1113  */
1114 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1115 {
1116         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1117         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1118         u32 queue;
1119
1120         /* Clear the RX descriptors */
1121         for (queue = 0; queue < rx_queue_cnt; queue++)
1122                 stmmac_clear_rx_descriptors(priv, queue);
1123
1124         /* Clear the TX descriptors */
1125         for (queue = 0; queue < tx_queue_cnt; queue++)
1126                 stmmac_clear_tx_descriptors(priv, queue);
1127 }
1128
1129 /**
1130  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1131  * @priv: driver private structure
1132  * @p: descriptor pointer
1133  * @i: descriptor index
1134  * @flags: gfp flag
1135  * @queue: RX queue index
1136  * Description: this function is called to allocate a receive buffer, perform
1137  * the DMA mapping and init the descriptor.
1138  */
1139 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1140                                   int i, gfp_t flags, u32 queue)
1141 {
1142         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1143         struct sk_buff *skb;
1144
1145         skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1146         if (!skb) {
1147                 netdev_err(priv->dev,
1148                            "%s: Rx init fails; skb is NULL\n", __func__);
1149                 return -ENOMEM;
1150         }
1151         rx_q->rx_skbuff[i] = skb;
1152         rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1153                                                 priv->dma_buf_sz,
1154                                                 DMA_FROM_DEVICE);
1155         if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1156                 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1157                 dev_kfree_skb_any(skb);
1158                 return -EINVAL;
1159         }
1160
1161         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1162
1163         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1164                 stmmac_init_desc3(priv, p);
1165
1166         return 0;
1167 }
1168
1169 /**
1170  * stmmac_free_rx_buffer - free RX dma buffers
1171  * @priv: private structure
1172  * @queue: RX queue index
1173  * @i: buffer index.
1174  */
1175 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1176 {
1177         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1178
1179         if (rx_q->rx_skbuff[i]) {
1180                 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1181                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
1182                 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1183         }
1184         rx_q->rx_skbuff[i] = NULL;
1185 }
1186
1187 /**
1188  * stmmac_free_tx_buffer - free RX dma buffers
1189  * @priv: private structure
1190  * @queue: RX queue index
1191  * @i: buffer index.
1192  */
1193 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1194 {
1195         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1196
1197         if (tx_q->tx_skbuff_dma[i].buf) {
1198                 if (tx_q->tx_skbuff_dma[i].map_as_page)
1199                         dma_unmap_page(priv->device,
1200                                        tx_q->tx_skbuff_dma[i].buf,
1201                                        tx_q->tx_skbuff_dma[i].len,
1202                                        DMA_TO_DEVICE);
1203                 else
1204                         dma_unmap_single(priv->device,
1205                                          tx_q->tx_skbuff_dma[i].buf,
1206                                          tx_q->tx_skbuff_dma[i].len,
1207                                          DMA_TO_DEVICE);
1208         }
1209
1210         if (tx_q->tx_skbuff[i]) {
1211                 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1212                 tx_q->tx_skbuff[i] = NULL;
1213                 tx_q->tx_skbuff_dma[i].buf = 0;
1214                 tx_q->tx_skbuff_dma[i].map_as_page = false;
1215         }
1216 }
1217
1218 /**
1219  * init_dma_rx_desc_rings - init the RX descriptor rings
1220  * @dev: net device structure
1221  * @flags: gfp flag.
1222  * Description: this function initializes the DMA RX descriptors
1223  * and allocates the socket buffers. It supports the chained and ring
1224  * modes.
1225  */
1226 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1227 {
1228         struct stmmac_priv *priv = netdev_priv(dev);
1229         u32 rx_count = priv->plat->rx_queues_to_use;
1230         int ret = -ENOMEM;
1231         int bfsize = 0;
1232         int queue;
1233         int i;
1234
1235         bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1236         if (bfsize < 0)
1237                 bfsize = 0;
1238
1239         if (bfsize < BUF_SIZE_16KiB)
1240                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1241
1242         priv->dma_buf_sz = bfsize;
1243
1244         /* RX INITIALIZATION */
1245         netif_dbg(priv, probe, priv->dev,
1246                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
1247
1248         for (queue = 0; queue < rx_count; queue++) {
1249                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1250
1251                 netif_dbg(priv, probe, priv->dev,
1252                           "(%s) dma_rx_phy=0x%08x\n", __func__,
1253                           (u32)rx_q->dma_rx_phy);
1254
1255                 for (i = 0; i < DMA_RX_SIZE; i++) {
1256                         struct dma_desc *p;
1257
1258                         if (priv->extend_desc)
1259                                 p = &((rx_q->dma_erx + i)->basic);
1260                         else
1261                                 p = rx_q->dma_rx + i;
1262
1263                         ret = stmmac_init_rx_buffers(priv, p, i, flags,
1264                                                      queue);
1265                         if (ret)
1266                                 goto err_init_rx_buffers;
1267
1268                         netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1269                                   rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1270                                   (unsigned int)rx_q->rx_skbuff_dma[i]);
1271                 }
1272
1273                 rx_q->cur_rx = 0;
1274                 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1275
1276                 stmmac_clear_rx_descriptors(priv, queue);
1277
1278                 /* Setup the chained descriptor addresses */
1279                 if (priv->mode == STMMAC_CHAIN_MODE) {
1280                         if (priv->extend_desc)
1281                                 stmmac_mode_init(priv, rx_q->dma_erx,
1282                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1283                         else
1284                                 stmmac_mode_init(priv, rx_q->dma_rx,
1285                                                 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1286                 }
1287         }
1288
1289         buf_sz = bfsize;
1290
1291         return 0;
1292
1293 err_init_rx_buffers:
1294         while (queue >= 0) {
1295                 while (--i >= 0)
1296                         stmmac_free_rx_buffer(priv, queue, i);
1297
1298                 if (queue == 0)
1299                         break;
1300
1301                 i = DMA_RX_SIZE;
1302                 queue--;
1303         }
1304
1305         return ret;
1306 }
1307
1308 /**
1309  * init_dma_tx_desc_rings - init the TX descriptor rings
1310  * @dev: net device structure.
1311  * Description: this function initializes the DMA TX descriptors
1312  * and allocates the socket buffers. It supports the chained and ring
1313  * modes.
1314  */
1315 static int init_dma_tx_desc_rings(struct net_device *dev)
1316 {
1317         struct stmmac_priv *priv = netdev_priv(dev);
1318         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1319         u32 queue;
1320         int i;
1321
1322         for (queue = 0; queue < tx_queue_cnt; queue++) {
1323                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1324
1325                 netif_dbg(priv, probe, priv->dev,
1326                           "(%s) dma_tx_phy=0x%08x\n", __func__,
1327                          (u32)tx_q->dma_tx_phy);
1328
1329                 /* Setup the chained descriptor addresses */
1330                 if (priv->mode == STMMAC_CHAIN_MODE) {
1331                         if (priv->extend_desc)
1332                                 stmmac_mode_init(priv, tx_q->dma_etx,
1333                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1334                         else
1335                                 stmmac_mode_init(priv, tx_q->dma_tx,
1336                                                 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1337                 }
1338
1339                 for (i = 0; i < DMA_TX_SIZE; i++) {
1340                         struct dma_desc *p;
1341                         if (priv->extend_desc)
1342                                 p = &((tx_q->dma_etx + i)->basic);
1343                         else
1344                                 p = tx_q->dma_tx + i;
1345
1346                         stmmac_clear_desc(priv, p);
1347
1348                         tx_q->tx_skbuff_dma[i].buf = 0;
1349                         tx_q->tx_skbuff_dma[i].map_as_page = false;
1350                         tx_q->tx_skbuff_dma[i].len = 0;
1351                         tx_q->tx_skbuff_dma[i].last_segment = false;
1352                         tx_q->tx_skbuff[i] = NULL;
1353                 }
1354
1355                 tx_q->dirty_tx = 0;
1356                 tx_q->cur_tx = 0;
1357                 tx_q->mss = 0;
1358
1359                 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1360         }
1361
1362         return 0;
1363 }
1364
1365 /**
1366  * init_dma_desc_rings - init the RX/TX descriptor rings
1367  * @dev: net device structure
1368  * @flags: gfp flag.
1369  * Description: this function initializes the DMA RX/TX descriptors
1370  * and allocates the socket buffers. It supports the chained and ring
1371  * modes.
1372  */
1373 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1374 {
1375         struct stmmac_priv *priv = netdev_priv(dev);
1376         int ret;
1377
1378         ret = init_dma_rx_desc_rings(dev, flags);
1379         if (ret)
1380                 return ret;
1381
1382         ret = init_dma_tx_desc_rings(dev);
1383
1384         stmmac_clear_descriptors(priv);
1385
1386         if (netif_msg_hw(priv))
1387                 stmmac_display_rings(priv);
1388
1389         return ret;
1390 }
1391
1392 /**
1393  * dma_free_rx_skbufs - free RX dma buffers
1394  * @priv: private structure
1395  * @queue: RX queue index
1396  */
1397 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1398 {
1399         int i;
1400
1401         for (i = 0; i < DMA_RX_SIZE; i++)
1402                 stmmac_free_rx_buffer(priv, queue, i);
1403 }
1404
1405 /**
1406  * dma_free_tx_skbufs - free TX dma buffers
1407  * @priv: private structure
1408  * @queue: TX queue index
1409  */
1410 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1411 {
1412         int i;
1413
1414         for (i = 0; i < DMA_TX_SIZE; i++)
1415                 stmmac_free_tx_buffer(priv, queue, i);
1416 }
1417
1418 /**
1419  * free_dma_rx_desc_resources - free RX dma desc resources
1420  * @priv: private structure
1421  */
1422 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1423 {
1424         u32 rx_count = priv->plat->rx_queues_to_use;
1425         u32 queue;
1426
1427         /* Free RX queue resources */
1428         for (queue = 0; queue < rx_count; queue++) {
1429                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1430
1431                 /* Release the DMA RX socket buffers */
1432                 dma_free_rx_skbufs(priv, queue);
1433
1434                 /* Free DMA regions of consistent memory previously allocated */
1435                 if (!priv->extend_desc)
1436                         dma_free_coherent(priv->device,
1437                                           DMA_RX_SIZE * sizeof(struct dma_desc),
1438                                           rx_q->dma_rx, rx_q->dma_rx_phy);
1439                 else
1440                         dma_free_coherent(priv->device, DMA_RX_SIZE *
1441                                           sizeof(struct dma_extended_desc),
1442                                           rx_q->dma_erx, rx_q->dma_rx_phy);
1443
1444                 kfree(rx_q->rx_skbuff_dma);
1445                 kfree(rx_q->rx_skbuff);
1446         }
1447 }
1448
1449 /**
1450  * free_dma_tx_desc_resources - free TX dma desc resources
1451  * @priv: private structure
1452  */
1453 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1454 {
1455         u32 tx_count = priv->plat->tx_queues_to_use;
1456         u32 queue;
1457
1458         /* Free TX queue resources */
1459         for (queue = 0; queue < tx_count; queue++) {
1460                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1461
1462                 /* Release the DMA TX socket buffers */
1463                 dma_free_tx_skbufs(priv, queue);
1464
1465                 /* Free DMA regions of consistent memory previously allocated */
1466                 if (!priv->extend_desc)
1467                         dma_free_coherent(priv->device,
1468                                           DMA_TX_SIZE * sizeof(struct dma_desc),
1469                                           tx_q->dma_tx, tx_q->dma_tx_phy);
1470                 else
1471                         dma_free_coherent(priv->device, DMA_TX_SIZE *
1472                                           sizeof(struct dma_extended_desc),
1473                                           tx_q->dma_etx, tx_q->dma_tx_phy);
1474
1475                 kfree(tx_q->tx_skbuff_dma);
1476                 kfree(tx_q->tx_skbuff);
1477         }
1478 }
1479
1480 /**
1481  * alloc_dma_rx_desc_resources - alloc RX resources.
1482  * @priv: private structure
1483  * Description: according to which descriptor can be used (extend or basic)
1484  * this function allocates the resources for TX and RX paths. In case of
1485  * reception, for example, it pre-allocated the RX socket buffer in order to
1486  * allow zero-copy mechanism.
1487  */
1488 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1489 {
1490         u32 rx_count = priv->plat->rx_queues_to_use;
1491         int ret = -ENOMEM;
1492         u32 queue;
1493
1494         /* RX queues buffers and DMA */
1495         for (queue = 0; queue < rx_count; queue++) {
1496                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1497
1498                 rx_q->queue_index = queue;
1499                 rx_q->priv_data = priv;
1500
1501                 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1502                                                     sizeof(dma_addr_t),
1503                                                     GFP_KERNEL);
1504                 if (!rx_q->rx_skbuff_dma)
1505                         goto err_dma;
1506
1507                 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1508                                                 sizeof(struct sk_buff *),
1509                                                 GFP_KERNEL);
1510                 if (!rx_q->rx_skbuff)
1511                         goto err_dma;
1512
1513                 if (priv->extend_desc) {
1514                         rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1515                                                             DMA_RX_SIZE *
1516                                                             sizeof(struct
1517                                                             dma_extended_desc),
1518                                                             &rx_q->dma_rx_phy,
1519                                                             GFP_KERNEL);
1520                         if (!rx_q->dma_erx)
1521                                 goto err_dma;
1522
1523                 } else {
1524                         rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1525                                                            DMA_RX_SIZE *
1526                                                            sizeof(struct
1527                                                            dma_desc),
1528                                                            &rx_q->dma_rx_phy,
1529                                                            GFP_KERNEL);
1530                         if (!rx_q->dma_rx)
1531                                 goto err_dma;
1532                 }
1533         }
1534
1535         return 0;
1536
1537 err_dma:
1538         free_dma_rx_desc_resources(priv);
1539
1540         return ret;
1541 }
1542
1543 /**
1544  * alloc_dma_tx_desc_resources - alloc TX resources.
1545  * @priv: private structure
1546  * Description: according to which descriptor can be used (extend or basic)
1547  * this function allocates the resources for TX and RX paths. In case of
1548  * reception, for example, it pre-allocated the RX socket buffer in order to
1549  * allow zero-copy mechanism.
1550  */
1551 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1552 {
1553         u32 tx_count = priv->plat->tx_queues_to_use;
1554         int ret = -ENOMEM;
1555         u32 queue;
1556
1557         /* TX queues buffers and DMA */
1558         for (queue = 0; queue < tx_count; queue++) {
1559                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1560
1561                 tx_q->queue_index = queue;
1562                 tx_q->priv_data = priv;
1563
1564                 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1565                                                     sizeof(*tx_q->tx_skbuff_dma),
1566                                                     GFP_KERNEL);
1567                 if (!tx_q->tx_skbuff_dma)
1568                         goto err_dma;
1569
1570                 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1571                                                 sizeof(struct sk_buff *),
1572                                                 GFP_KERNEL);
1573                 if (!tx_q->tx_skbuff)
1574                         goto err_dma;
1575
1576                 if (priv->extend_desc) {
1577                         tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1578                                                             DMA_TX_SIZE *
1579                                                             sizeof(struct
1580                                                             dma_extended_desc),
1581                                                             &tx_q->dma_tx_phy,
1582                                                             GFP_KERNEL);
1583                         if (!tx_q->dma_etx)
1584                                 goto err_dma;
1585                 } else {
1586                         tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1587                                                            DMA_TX_SIZE *
1588                                                            sizeof(struct
1589                                                                   dma_desc),
1590                                                            &tx_q->dma_tx_phy,
1591                                                            GFP_KERNEL);
1592                         if (!tx_q->dma_tx)
1593                                 goto err_dma;
1594                 }
1595         }
1596
1597         return 0;
1598
1599 err_dma:
1600         free_dma_tx_desc_resources(priv);
1601
1602         return ret;
1603 }
1604
1605 /**
1606  * alloc_dma_desc_resources - alloc TX/RX resources.
1607  * @priv: private structure
1608  * Description: according to which descriptor can be used (extend or basic)
1609  * this function allocates the resources for TX and RX paths. In case of
1610  * reception, for example, it pre-allocated the RX socket buffer in order to
1611  * allow zero-copy mechanism.
1612  */
1613 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1614 {
1615         /* RX Allocation */
1616         int ret = alloc_dma_rx_desc_resources(priv);
1617
1618         if (ret)
1619                 return ret;
1620
1621         ret = alloc_dma_tx_desc_resources(priv);
1622
1623         return ret;
1624 }
1625
1626 /**
1627  * free_dma_desc_resources - free dma desc resources
1628  * @priv: private structure
1629  */
1630 static void free_dma_desc_resources(struct stmmac_priv *priv)
1631 {
1632         /* Release the DMA RX socket buffers */
1633         free_dma_rx_desc_resources(priv);
1634
1635         /* Release the DMA TX socket buffers */
1636         free_dma_tx_desc_resources(priv);
1637 }
1638
1639 /**
1640  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1641  *  @priv: driver private structure
1642  *  Description: It is used for enabling the rx queues in the MAC
1643  */
1644 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1645 {
1646         u32 rx_queues_count = priv->plat->rx_queues_to_use;
1647         int queue;
1648         u8 mode;
1649
1650         for (queue = 0; queue < rx_queues_count; queue++) {
1651                 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1652                 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1653         }
1654 }
1655
1656 /**
1657  * stmmac_start_rx_dma - start RX DMA channel
1658  * @priv: driver private structure
1659  * @chan: RX channel index
1660  * Description:
1661  * This starts a RX DMA channel
1662  */
1663 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1664 {
1665         netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1666         stmmac_start_rx(priv, priv->ioaddr, chan);
1667 }
1668
1669 /**
1670  * stmmac_start_tx_dma - start TX DMA channel
1671  * @priv: driver private structure
1672  * @chan: TX channel index
1673  * Description:
1674  * This starts a TX DMA channel
1675  */
1676 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1677 {
1678         netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1679         stmmac_start_tx(priv, priv->ioaddr, chan);
1680 }
1681
1682 /**
1683  * stmmac_stop_rx_dma - stop RX DMA channel
1684  * @priv: driver private structure
1685  * @chan: RX channel index
1686  * Description:
1687  * This stops a RX DMA channel
1688  */
1689 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1690 {
1691         netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1692         stmmac_stop_rx(priv, priv->ioaddr, chan);
1693 }
1694
1695 /**
1696  * stmmac_stop_tx_dma - stop TX DMA channel
1697  * @priv: driver private structure
1698  * @chan: TX channel index
1699  * Description:
1700  * This stops a TX DMA channel
1701  */
1702 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1703 {
1704         netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1705         stmmac_stop_tx(priv, priv->ioaddr, chan);
1706 }
1707
1708 /**
1709  * stmmac_start_all_dma - start all RX and TX DMA channels
1710  * @priv: driver private structure
1711  * Description:
1712  * This starts all the RX and TX DMA channels
1713  */
1714 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1715 {
1716         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1717         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1718         u32 chan = 0;
1719
1720         for (chan = 0; chan < rx_channels_count; chan++)
1721                 stmmac_start_rx_dma(priv, chan);
1722
1723         for (chan = 0; chan < tx_channels_count; chan++)
1724                 stmmac_start_tx_dma(priv, chan);
1725 }
1726
1727 /**
1728  * stmmac_stop_all_dma - stop all RX and TX DMA channels
1729  * @priv: driver private structure
1730  * Description:
1731  * This stops the RX and TX DMA channels
1732  */
1733 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1734 {
1735         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1736         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1737         u32 chan = 0;
1738
1739         for (chan = 0; chan < rx_channels_count; chan++)
1740                 stmmac_stop_rx_dma(priv, chan);
1741
1742         for (chan = 0; chan < tx_channels_count; chan++)
1743                 stmmac_stop_tx_dma(priv, chan);
1744 }
1745
1746 /**
1747  *  stmmac_dma_operation_mode - HW DMA operation mode
1748  *  @priv: driver private structure
1749  *  Description: it is used for configuring the DMA operation mode register in
1750  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1751  */
1752 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1753 {
1754         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1755         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1756         int rxfifosz = priv->plat->rx_fifo_size;
1757         int txfifosz = priv->plat->tx_fifo_size;
1758         u32 txmode = 0;
1759         u32 rxmode = 0;
1760         u32 chan = 0;
1761         u8 qmode = 0;
1762
1763         if (rxfifosz == 0)
1764                 rxfifosz = priv->dma_cap.rx_fifo_size;
1765         if (txfifosz == 0)
1766                 txfifosz = priv->dma_cap.tx_fifo_size;
1767
1768         /* Adjust for real per queue fifo size */
1769         rxfifosz /= rx_channels_count;
1770         txfifosz /= tx_channels_count;
1771
1772         if (priv->plat->force_thresh_dma_mode) {
1773                 txmode = tc;
1774                 rxmode = tc;
1775         } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1776                 /*
1777                  * In case of GMAC, SF mode can be enabled
1778                  * to perform the TX COE in HW. This depends on:
1779                  * 1) TX COE if actually supported
1780                  * 2) There is no bugged Jumbo frame support
1781                  *    that needs to not insert csum in the TDES.
1782                  */
1783                 txmode = SF_DMA_MODE;
1784                 rxmode = SF_DMA_MODE;
1785                 priv->xstats.threshold = SF_DMA_MODE;
1786         } else {
1787                 txmode = tc;
1788                 rxmode = SF_DMA_MODE;
1789         }
1790
1791         /* configure all channels */
1792         for (chan = 0; chan < rx_channels_count; chan++) {
1793                 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1794
1795                 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1796                                 rxfifosz, qmode);
1797         }
1798
1799         for (chan = 0; chan < tx_channels_count; chan++) {
1800                 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1801
1802                 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1803                                 txfifosz, qmode);
1804         }
1805 }
1806
1807 /**
1808  * stmmac_tx_clean - to manage the transmission completion
1809  * @priv: driver private structure
1810  * @queue: TX queue index
1811  * Description: it reclaims the transmit resources after transmission completes.
1812  */
1813 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1814 {
1815         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1816         unsigned int bytes_compl = 0, pkts_compl = 0;
1817         unsigned int entry;
1818
1819         netif_tx_lock(priv->dev);
1820
1821         priv->xstats.tx_clean++;
1822
1823         entry = tx_q->dirty_tx;
1824         while (entry != tx_q->cur_tx) {
1825                 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1826                 struct dma_desc *p;
1827                 int status;
1828
1829                 if (priv->extend_desc)
1830                         p = (struct dma_desc *)(tx_q->dma_etx + entry);
1831                 else
1832                         p = tx_q->dma_tx + entry;
1833
1834                 status = stmmac_tx_status(priv, &priv->dev->stats,
1835                                 &priv->xstats, p, priv->ioaddr);
1836                 /* Check if the descriptor is owned by the DMA */
1837                 if (unlikely(status & tx_dma_own))
1838                         break;
1839
1840                 /* Make sure descriptor fields are read after reading
1841                  * the own bit.
1842                  */
1843                 dma_rmb();
1844
1845                 /* Just consider the last segment and ...*/
1846                 if (likely(!(status & tx_not_ls))) {
1847                         /* ... verify the status error condition */
1848                         if (unlikely(status & tx_err)) {
1849                                 priv->dev->stats.tx_errors++;
1850                         } else {
1851                                 priv->dev->stats.tx_packets++;
1852                                 priv->xstats.tx_pkt_n++;
1853                         }
1854                         stmmac_get_tx_hwtstamp(priv, p, skb);
1855                 }
1856
1857                 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1858                         if (tx_q->tx_skbuff_dma[entry].map_as_page)
1859                                 dma_unmap_page(priv->device,
1860                                                tx_q->tx_skbuff_dma[entry].buf,
1861                                                tx_q->tx_skbuff_dma[entry].len,
1862                                                DMA_TO_DEVICE);
1863                         else
1864                                 dma_unmap_single(priv->device,
1865                                                  tx_q->tx_skbuff_dma[entry].buf,
1866                                                  tx_q->tx_skbuff_dma[entry].len,
1867                                                  DMA_TO_DEVICE);
1868                         tx_q->tx_skbuff_dma[entry].buf = 0;
1869                         tx_q->tx_skbuff_dma[entry].len = 0;
1870                         tx_q->tx_skbuff_dma[entry].map_as_page = false;
1871                 }
1872
1873                 stmmac_clean_desc3(priv, tx_q, p);
1874
1875                 tx_q->tx_skbuff_dma[entry].last_segment = false;
1876                 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1877
1878                 if (likely(skb != NULL)) {
1879                         pkts_compl++;
1880                         bytes_compl += skb->len;
1881                         dev_consume_skb_any(skb);
1882                         tx_q->tx_skbuff[entry] = NULL;
1883                 }
1884
1885                 stmmac_release_tx_desc(priv, p, priv->mode);
1886
1887                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1888         }
1889         tx_q->dirty_tx = entry;
1890
1891         netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1892                                   pkts_compl, bytes_compl);
1893
1894         if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1895                                                                 queue))) &&
1896             stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1897
1898                 netif_dbg(priv, tx_done, priv->dev,
1899                           "%s: restart transmit\n", __func__);
1900                 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1901         }
1902
1903         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1904                 stmmac_enable_eee_mode(priv);
1905                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1906         }
1907         netif_tx_unlock(priv->dev);
1908 }
1909
1910 /**
1911  * stmmac_tx_err - to manage the tx error
1912  * @priv: driver private structure
1913  * @chan: channel index
1914  * Description: it cleans the descriptors and restarts the transmission
1915  * in case of transmission errors.
1916  */
1917 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1918 {
1919         struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1920         int i;
1921
1922         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1923
1924         stmmac_stop_tx_dma(priv, chan);
1925         dma_free_tx_skbufs(priv, chan);
1926         for (i = 0; i < DMA_TX_SIZE; i++)
1927                 if (priv->extend_desc)
1928                         stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1929                                         priv->mode, (i == DMA_TX_SIZE - 1));
1930                 else
1931                         stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1932                                         priv->mode, (i == DMA_TX_SIZE - 1));
1933         tx_q->dirty_tx = 0;
1934         tx_q->cur_tx = 0;
1935         tx_q->mss = 0;
1936         netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1937         stmmac_start_tx_dma(priv, chan);
1938
1939         priv->dev->stats.tx_errors++;
1940         netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1941 }
1942
1943 /**
1944  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1945  *  @priv: driver private structure
1946  *  @txmode: TX operating mode
1947  *  @rxmode: RX operating mode
1948  *  @chan: channel index
1949  *  Description: it is used for configuring of the DMA operation mode in
1950  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1951  *  mode.
1952  */
1953 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1954                                           u32 rxmode, u32 chan)
1955 {
1956         u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1957         u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1958         u32 rx_channels_count = priv->plat->rx_queues_to_use;
1959         u32 tx_channels_count = priv->plat->tx_queues_to_use;
1960         int rxfifosz = priv->plat->rx_fifo_size;
1961         int txfifosz = priv->plat->tx_fifo_size;
1962
1963         if (rxfifosz == 0)
1964                 rxfifosz = priv->dma_cap.rx_fifo_size;
1965         if (txfifosz == 0)
1966                 txfifosz = priv->dma_cap.tx_fifo_size;
1967
1968         /* Adjust for real per queue fifo size */
1969         rxfifosz /= rx_channels_count;
1970         txfifosz /= tx_channels_count;
1971
1972         stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
1973         stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
1974 }
1975
1976 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
1977 {
1978         int ret;
1979
1980         ret = stmmac_safety_feat_irq_status(priv, priv->dev,
1981                         priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
1982         if (ret && (ret != -EINVAL)) {
1983                 stmmac_global_err(priv);
1984                 return true;
1985         }
1986
1987         return false;
1988 }
1989
1990 /**
1991  * stmmac_dma_interrupt - DMA ISR
1992  * @priv: driver private structure
1993  * Description: this is the DMA ISR. It is called by the main ISR.
1994  * It calls the dwmac dma routine and schedule poll method in case of some
1995  * work can be done.
1996  */
1997 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1998 {
1999         u32 tx_channel_count = priv->plat->tx_queues_to_use;
2000         u32 rx_channel_count = priv->plat->rx_queues_to_use;
2001         u32 channels_to_check = tx_channel_count > rx_channel_count ?
2002                                 tx_channel_count : rx_channel_count;
2003         u32 chan;
2004         bool poll_scheduled = false;
2005         int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2006
2007         /* Make sure we never check beyond our status buffer. */
2008         if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2009                 channels_to_check = ARRAY_SIZE(status);
2010
2011         /* Each DMA channel can be used for rx and tx simultaneously, yet
2012          * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2013          * stmmac_channel struct.
2014          * Because of this, stmmac_poll currently checks (and possibly wakes)
2015          * all tx queues rather than just a single tx queue.
2016          */
2017         for (chan = 0; chan < channels_to_check; chan++)
2018                 status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2019                                 &priv->xstats, chan);
2020
2021         for (chan = 0; chan < rx_channel_count; chan++) {
2022                 if (likely(status[chan] & handle_rx)) {
2023                         struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2024
2025                         if (likely(napi_schedule_prep(&rx_q->napi))) {
2026                                 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2027                                 __napi_schedule(&rx_q->napi);
2028                                 poll_scheduled = true;
2029                         }
2030                 }
2031         }
2032
2033         /* If we scheduled poll, we already know that tx queues will be checked.
2034          * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2035          * completed transmission, if so, call stmmac_poll (once).
2036          */
2037         if (!poll_scheduled) {
2038                 for (chan = 0; chan < tx_channel_count; chan++) {
2039                         if (status[chan] & handle_tx) {
2040                                 /* It doesn't matter what rx queue we choose
2041                                  * here. We use 0 since it always exists.
2042                                  */
2043                                 struct stmmac_rx_queue *rx_q =
2044                                         &priv->rx_queue[0];
2045
2046                                 if (likely(napi_schedule_prep(&rx_q->napi))) {
2047                                         stmmac_disable_dma_irq(priv,
2048                                                         priv->ioaddr, chan);
2049                                         __napi_schedule(&rx_q->napi);
2050                                 }
2051                                 break;
2052                         }
2053                 }
2054         }
2055
2056         for (chan = 0; chan < tx_channel_count; chan++) {
2057                 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2058                         /* Try to bump up the dma threshold on this failure */
2059                         if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2060                             (tc <= 256)) {
2061                                 tc += 64;
2062                                 if (priv->plat->force_thresh_dma_mode)
2063                                         stmmac_set_dma_operation_mode(priv,
2064                                                                       tc,
2065                                                                       tc,
2066                                                                       chan);
2067                                 else
2068                                         stmmac_set_dma_operation_mode(priv,
2069                                                                     tc,
2070                                                                     SF_DMA_MODE,
2071                                                                     chan);
2072                                 priv->xstats.threshold = tc;
2073                         }
2074                 } else if (unlikely(status[chan] == tx_hard_error)) {
2075                         stmmac_tx_err(priv, chan);
2076                 }
2077         }
2078 }
2079
2080 /**
2081  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2082  * @priv: driver private structure
2083  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2084  */
2085 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2086 {
2087         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2088                             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2089
2090         dwmac_mmc_intr_all_mask(priv->mmcaddr);
2091
2092         if (priv->dma_cap.rmon) {
2093                 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2094                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2095         } else
2096                 netdev_info(priv->dev, "No MAC Management Counters available\n");
2097 }
2098
2099 /**
2100  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2101  * @priv: driver private structure
2102  * Description:
2103  *  new GMAC chip generations have a new register to indicate the
2104  *  presence of the optional feature/functions.
2105  *  This can be also used to override the value passed through the
2106  *  platform and necessary for old MAC10/100 and GMAC chips.
2107  */
2108 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2109 {
2110         return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2111 }
2112
2113 /**
2114  * stmmac_check_ether_addr - check if the MAC addr is valid
2115  * @priv: driver private structure
2116  * Description:
2117  * it is to verify if the MAC address is valid, in case of failures it
2118  * generates a random MAC address
2119  */
2120 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2121 {
2122         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2123                 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2124                 if (!is_valid_ether_addr(priv->dev->dev_addr))
2125                         eth_hw_addr_random(priv->dev);
2126                 netdev_info(priv->dev, "device MAC address %pM\n",
2127                             priv->dev->dev_addr);
2128         }
2129 }
2130
2131 /**
2132  * stmmac_init_dma_engine - DMA init.
2133  * @priv: driver private structure
2134  * Description:
2135  * It inits the DMA invoking the specific MAC/GMAC callback.
2136  * Some DMA parameters can be passed from the platform;
2137  * in case of these are not passed a default is kept for the MAC or GMAC.
2138  */
2139 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2140 {
2141         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2142         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2143         u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2144         struct stmmac_rx_queue *rx_q;
2145         struct stmmac_tx_queue *tx_q;
2146         u32 chan = 0;
2147         int atds = 0;
2148         int ret = 0;
2149
2150         if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2151                 dev_err(priv->device, "Invalid DMA configuration\n");
2152                 return -EINVAL;
2153         }
2154
2155         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2156                 atds = 1;
2157
2158         ret = stmmac_reset(priv, priv->ioaddr);
2159         if (ret) {
2160                 dev_err(priv->device, "Failed to reset the dma\n");
2161                 return ret;
2162         }
2163
2164         /* DMA RX Channel Configuration */
2165         for (chan = 0; chan < rx_channels_count; chan++) {
2166                 rx_q = &priv->rx_queue[chan];
2167
2168                 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2169                                     rx_q->dma_rx_phy, chan);
2170
2171                 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2172                             (DMA_RX_SIZE * sizeof(struct dma_desc));
2173                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2174                                        rx_q->rx_tail_addr, chan);
2175         }
2176
2177         /* DMA TX Channel Configuration */
2178         for (chan = 0; chan < tx_channels_count; chan++) {
2179                 tx_q = &priv->tx_queue[chan];
2180
2181                 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2182                                     tx_q->dma_tx_phy, chan);
2183
2184                 tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2185                             (DMA_TX_SIZE * sizeof(struct dma_desc));
2186                 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2187                                        tx_q->tx_tail_addr, chan);
2188         }
2189
2190         /* DMA CSR Channel configuration */
2191         for (chan = 0; chan < dma_csr_ch; chan++)
2192                 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2193
2194         /* DMA Configuration */
2195         stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2196
2197         if (priv->plat->axi)
2198                 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2199
2200         return ret;
2201 }
2202
2203 /**
2204  * stmmac_tx_timer - mitigation sw timer for tx.
2205  * @data: data pointer
2206  * Description:
2207  * This is the timer handler to directly invoke the stmmac_tx_clean.
2208  */
2209 static void stmmac_tx_timer(struct timer_list *t)
2210 {
2211         struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2212         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2213         u32 queue;
2214
2215         /* let's scan all the tx queues */
2216         for (queue = 0; queue < tx_queues_count; queue++)
2217                 stmmac_tx_clean(priv, queue);
2218 }
2219
2220 /**
2221  * stmmac_init_tx_coalesce - init tx mitigation options.
2222  * @priv: driver private structure
2223  * Description:
2224  * This inits the transmit coalesce parameters: i.e. timer rate,
2225  * timer handler and default threshold used for enabling the
2226  * interrupt on completion bit.
2227  */
2228 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2229 {
2230         priv->tx_coal_frames = STMMAC_TX_FRAMES;
2231         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2232         timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2233         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2234         add_timer(&priv->txtimer);
2235 }
2236
2237 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2238 {
2239         u32 rx_channels_count = priv->plat->rx_queues_to_use;
2240         u32 tx_channels_count = priv->plat->tx_queues_to_use;
2241         u32 chan;
2242
2243         /* set TX ring length */
2244         for (chan = 0; chan < tx_channels_count; chan++)
2245                 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2246                                 (DMA_TX_SIZE - 1), chan);
2247
2248         /* set RX ring length */
2249         for (chan = 0; chan < rx_channels_count; chan++)
2250                 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2251                                 (DMA_RX_SIZE - 1), chan);
2252 }
2253
2254 /**
2255  *  stmmac_set_tx_queue_weight - Set TX queue weight
2256  *  @priv: driver private structure
2257  *  Description: It is used for setting TX queues weight
2258  */
2259 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2260 {
2261         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2262         u32 weight;
2263         u32 queue;
2264
2265         for (queue = 0; queue < tx_queues_count; queue++) {
2266                 weight = priv->plat->tx_queues_cfg[queue].weight;
2267                 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2268         }
2269 }
2270
2271 /**
2272  *  stmmac_configure_cbs - Configure CBS in TX queue
2273  *  @priv: driver private structure
2274  *  Description: It is used for configuring CBS in AVB TX queues
2275  */
2276 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2277 {
2278         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2279         u32 mode_to_use;
2280         u32 queue;
2281
2282         /* queue 0 is reserved for legacy traffic */
2283         for (queue = 1; queue < tx_queues_count; queue++) {
2284                 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2285                 if (mode_to_use == MTL_QUEUE_DCB)
2286                         continue;
2287
2288                 stmmac_config_cbs(priv, priv->hw,
2289                                 priv->plat->tx_queues_cfg[queue].send_slope,
2290                                 priv->plat->tx_queues_cfg[queue].idle_slope,
2291                                 priv->plat->tx_queues_cfg[queue].high_credit,
2292                                 priv->plat->tx_queues_cfg[queue].low_credit,
2293                                 queue);
2294         }
2295 }
2296
2297 /**
2298  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2299  *  @priv: driver private structure
2300  *  Description: It is used for mapping RX queues to RX dma channels
2301  */
2302 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2303 {
2304         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2305         u32 queue;
2306         u32 chan;
2307
2308         for (queue = 0; queue < rx_queues_count; queue++) {
2309                 chan = priv->plat->rx_queues_cfg[queue].chan;
2310                 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2311         }
2312 }
2313
2314 /**
2315  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2316  *  @priv: driver private structure
2317  *  Description: It is used for configuring the RX Queue Priority
2318  */
2319 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2320 {
2321         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2322         u32 queue;
2323         u32 prio;
2324
2325         for (queue = 0; queue < rx_queues_count; queue++) {
2326                 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2327                         continue;
2328
2329                 prio = priv->plat->rx_queues_cfg[queue].prio;
2330                 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2331         }
2332 }
2333
2334 /**
2335  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2336  *  @priv: driver private structure
2337  *  Description: It is used for configuring the TX Queue Priority
2338  */
2339 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2340 {
2341         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2342         u32 queue;
2343         u32 prio;
2344
2345         for (queue = 0; queue < tx_queues_count; queue++) {
2346                 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2347                         continue;
2348
2349                 prio = priv->plat->tx_queues_cfg[queue].prio;
2350                 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2351         }
2352 }
2353
2354 /**
2355  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2356  *  @priv: driver private structure
2357  *  Description: It is used for configuring the RX queue routing
2358  */
2359 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2360 {
2361         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2362         u32 queue;
2363         u8 packet;
2364
2365         for (queue = 0; queue < rx_queues_count; queue++) {
2366                 /* no specific packet type routing specified for the queue */
2367                 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2368                         continue;
2369
2370                 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2371                 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2372         }
2373 }
2374
2375 /**
2376  *  stmmac_mtl_configuration - Configure MTL
2377  *  @priv: driver private structure
2378  *  Description: It is used for configurring MTL
2379  */
2380 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2381 {
2382         u32 rx_queues_count = priv->plat->rx_queues_to_use;
2383         u32 tx_queues_count = priv->plat->tx_queues_to_use;
2384
2385         if (tx_queues_count > 1)
2386                 stmmac_set_tx_queue_weight(priv);
2387
2388         /* Configure MTL RX algorithms */
2389         if (rx_queues_count > 1)
2390                 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2391                                 priv->plat->rx_sched_algorithm);
2392
2393         /* Configure MTL TX algorithms */
2394         if (tx_queues_count > 1)
2395                 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2396                                 priv->plat->tx_sched_algorithm);
2397
2398         /* Configure CBS in AVB TX queues */
2399         if (tx_queues_count > 1)
2400                 stmmac_configure_cbs(priv);
2401
2402         /* Map RX MTL to DMA channels */
2403         stmmac_rx_queue_dma_chan_map(priv);
2404
2405         /* Enable MAC RX Queues */
2406         stmmac_mac_enable_rx_queues(priv);
2407
2408         /* Set RX priorities */
2409         if (rx_queues_count > 1)
2410                 stmmac_mac_config_rx_queues_prio(priv);
2411
2412         /* Set TX priorities */
2413         if (tx_queues_count > 1)
2414                 stmmac_mac_config_tx_queues_prio(priv);
2415
2416         /* Set RX routing */
2417         if (rx_queues_count > 1)
2418                 stmmac_mac_config_rx_queues_routing(priv);
2419 }
2420
2421 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2422 {
2423         if (priv->dma_cap.asp) {
2424                 netdev_info(priv->dev, "Enabling Safety Features\n");
2425                 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2426         } else {
2427                 netdev_info(priv->dev, "No Safety Features support found\n");
2428         }
2429 }
2430
2431 /**
2432  * stmmac_hw_setup - setup mac in a usable state.
2433  *  @dev : pointer to the device structure.
2434  *  Description:
2435  *  this is the main function to setup the HW in a usable state because the
2436  *  dma engine is reset, the core registers are configured (e.g. AXI,
2437  *  Checksum features, timers). The DMA is ready to start receiving and
2438  *  transmitting.
2439  *  Return value:
2440  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2441  *  file on failure.
2442  */
2443 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2444 {
2445         struct stmmac_priv *priv = netdev_priv(dev);
2446         u32 rx_cnt = priv->plat->rx_queues_to_use;
2447         u32 tx_cnt = priv->plat->tx_queues_to_use;
2448         u32 chan;
2449         int ret;
2450
2451         /* DMA initialization and SW reset */
2452         ret = stmmac_init_dma_engine(priv);
2453         if (ret < 0) {
2454                 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2455                            __func__);
2456                 return ret;
2457         }
2458
2459         /* Copy the MAC addr into the HW  */
2460         stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2461
2462         /* PS and related bits will be programmed according to the speed */
2463         if (priv->hw->pcs) {
2464                 int speed = priv->plat->mac_port_sel_speed;
2465
2466                 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2467                     (speed == SPEED_1000)) {
2468                         priv->hw->ps = speed;
2469                 } else {
2470                         dev_warn(priv->device, "invalid port speed\n");
2471                         priv->hw->ps = 0;
2472                 }
2473         }
2474
2475         /* Initialize the MAC Core */
2476         stmmac_core_init(priv, priv->hw, dev);
2477
2478         /* Initialize MTL*/
2479         stmmac_mtl_configuration(priv);
2480
2481         /* Initialize Safety Features */
2482         stmmac_safety_feat_configuration(priv);
2483
2484         ret = stmmac_rx_ipc(priv, priv->hw);
2485         if (!ret) {
2486                 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2487                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2488                 priv->hw->rx_csum = 0;
2489         }
2490
2491         /* Enable the MAC Rx/Tx */
2492         stmmac_mac_set(priv, priv->ioaddr, true);
2493
2494         /* Set the HW DMA mode and the COE */
2495         stmmac_dma_operation_mode(priv);
2496
2497         stmmac_mmc_setup(priv);
2498
2499         if (init_ptp) {
2500                 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2501                 if (ret < 0)
2502                         netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2503
2504                 ret = stmmac_init_ptp(priv);
2505                 if (ret == -EOPNOTSUPP)
2506                         netdev_warn(priv->dev, "PTP not supported by HW\n");
2507                 else if (ret)
2508                         netdev_warn(priv->dev, "PTP init failed\n");
2509         }
2510
2511 #ifdef CONFIG_DEBUG_FS
2512         ret = stmmac_init_fs(dev);
2513         if (ret < 0)
2514                 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2515                             __func__);
2516 #endif
2517         /* Start the ball rolling... */
2518         stmmac_start_all_dma(priv);
2519
2520         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2521
2522         if (priv->use_riwt) {
2523                 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2524                 if (!ret)
2525                         priv->rx_riwt = MAX_DMA_RIWT;
2526         }
2527
2528         if (priv->hw->pcs)
2529                 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2530
2531         /* set TX and RX rings length */
2532         stmmac_set_rings_length(priv);
2533
2534         /* Enable TSO */
2535         if (priv->tso) {
2536                 for (chan = 0; chan < tx_cnt; chan++)
2537                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2538         }
2539
2540         return 0;
2541 }
2542
2543 static void stmmac_hw_teardown(struct net_device *dev)
2544 {
2545         struct stmmac_priv *priv = netdev_priv(dev);
2546
2547         clk_disable_unprepare(priv->plat->clk_ptp_ref);
2548 }
2549
2550 /**
2551  *  stmmac_open - open entry point of the driver
2552  *  @dev : pointer to the device structure.
2553  *  Description:
2554  *  This function is the open entry point of the driver.
2555  *  Return value:
2556  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2557  *  file on failure.
2558  */
2559 static int stmmac_open(struct net_device *dev)
2560 {
2561         struct stmmac_priv *priv = netdev_priv(dev);
2562         int ret;
2563
2564         stmmac_check_ether_addr(priv);
2565
2566         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2567             priv->hw->pcs != STMMAC_PCS_TBI &&
2568             priv->hw->pcs != STMMAC_PCS_RTBI) {
2569                 ret = stmmac_init_phy(dev);
2570                 if (ret) {
2571                         netdev_err(priv->dev,
2572                                    "%s: Cannot attach to PHY (error: %d)\n",
2573                                    __func__, ret);
2574                         return ret;
2575                 }
2576         }
2577
2578         /* Extra statistics */
2579         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2580         priv->xstats.threshold = tc;
2581
2582         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2583         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2584
2585         ret = alloc_dma_desc_resources(priv);
2586         if (ret < 0) {
2587                 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2588                            __func__);
2589                 goto dma_desc_error;
2590         }
2591
2592         ret = init_dma_desc_rings(dev, GFP_KERNEL);
2593         if (ret < 0) {
2594                 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2595                            __func__);
2596                 goto init_error;
2597         }
2598
2599         ret = stmmac_hw_setup(dev, true);
2600         if (ret < 0) {
2601                 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2602                 goto init_error;
2603         }
2604
2605         stmmac_init_tx_coalesce(priv);
2606
2607         if (dev->phydev)
2608                 phy_start(dev->phydev);
2609
2610         /* Request the IRQ lines */
2611         ret = request_irq(dev->irq, stmmac_interrupt,
2612                           IRQF_SHARED, dev->name, dev);
2613         if (unlikely(ret < 0)) {
2614                 netdev_err(priv->dev,
2615                            "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2616                            __func__, dev->irq, ret);
2617                 goto irq_error;
2618         }
2619
2620         /* Request the Wake IRQ in case of another line is used for WoL */
2621         if (priv->wol_irq != dev->irq) {
2622                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2623                                   IRQF_SHARED, dev->name, dev);
2624                 if (unlikely(ret < 0)) {
2625                         netdev_err(priv->dev,
2626                                    "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2627                                    __func__, priv->wol_irq, ret);
2628                         goto wolirq_error;
2629                 }
2630         }
2631
2632         /* Request the IRQ lines */
2633         if (priv->lpi_irq > 0) {
2634                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2635                                   dev->name, dev);
2636                 if (unlikely(ret < 0)) {
2637                         netdev_err(priv->dev,
2638                                    "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2639                                    __func__, priv->lpi_irq, ret);
2640                         goto lpiirq_error;
2641                 }
2642         }
2643
2644         stmmac_enable_all_queues(priv);
2645         stmmac_start_all_queues(priv);
2646
2647         return 0;
2648
2649 lpiirq_error:
2650         if (priv->wol_irq != dev->irq)
2651                 free_irq(priv->wol_irq, dev);
2652 wolirq_error:
2653         free_irq(dev->irq, dev);
2654 irq_error:
2655         if (dev->phydev)
2656                 phy_stop(dev->phydev);
2657
2658         del_timer_sync(&priv->txtimer);
2659         stmmac_hw_teardown(dev);
2660 init_error:
2661         free_dma_desc_resources(priv);
2662 dma_desc_error:
2663         if (dev->phydev)
2664                 phy_disconnect(dev->phydev);
2665
2666         return ret;
2667 }
2668
2669 /**
2670  *  stmmac_release - close entry point of the driver
2671  *  @dev : device pointer.
2672  *  Description:
2673  *  This is the stop entry point of the driver.
2674  */
2675 static int stmmac_release(struct net_device *dev)
2676 {
2677         struct stmmac_priv *priv = netdev_priv(dev);
2678
2679         if (priv->eee_enabled)
2680                 del_timer_sync(&priv->eee_ctrl_timer);
2681
2682         /* Stop and disconnect the PHY */
2683         if (dev->phydev) {
2684                 phy_stop(dev->phydev);
2685                 phy_disconnect(dev->phydev);
2686         }
2687
2688         stmmac_stop_all_queues(priv);
2689
2690         stmmac_disable_all_queues(priv);
2691
2692         del_timer_sync(&priv->txtimer);
2693
2694         /* Free the IRQ lines */
2695         free_irq(dev->irq, dev);
2696         if (priv->wol_irq != dev->irq)
2697                 free_irq(priv->wol_irq, dev);
2698         if (priv->lpi_irq > 0)
2699                 free_irq(priv->lpi_irq, dev);
2700
2701         /* Stop TX/RX DMA and clear the descriptors */
2702         stmmac_stop_all_dma(priv);
2703
2704         /* Release and free the Rx/Tx resources */
2705         free_dma_desc_resources(priv);
2706
2707         /* Disable the MAC Rx/Tx */
2708         stmmac_mac_set(priv, priv->ioaddr, false);
2709
2710         netif_carrier_off(dev);
2711
2712 #ifdef CONFIG_DEBUG_FS
2713         stmmac_exit_fs(dev);
2714 #endif
2715
2716         stmmac_release_ptp(priv);
2717
2718         return 0;
2719 }
2720
2721 /**
2722  *  stmmac_tso_allocator - close entry point of the driver
2723  *  @priv: driver private structure
2724  *  @des: buffer start address
2725  *  @total_len: total length to fill in descriptors
2726  *  @last_segmant: condition for the last descriptor
2727  *  @queue: TX queue index
2728  *  Description:
2729  *  This function fills descriptor and request new descriptors according to
2730  *  buffer length to fill
2731  */
2732 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2733                                  int total_len, bool last_segment, u32 queue)
2734 {
2735         struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2736         struct dma_desc *desc;
2737         u32 buff_size;
2738         int tmp_len;
2739
2740         tmp_len = total_len;
2741
2742         while (tmp_len > 0) {
2743                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2744                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2745                 desc = tx_q->dma_tx + tx_q->cur_tx;
2746
2747                 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2748                 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2749                             TSO_MAX_BUFF_SIZE : tmp_len;
2750
2751                 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2752                                 0, 1,
2753                                 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2754                                 0, 0);
2755
2756                 tmp_len -= TSO_MAX_BUFF_SIZE;
2757         }
2758 }
2759
2760 /**
2761  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2762  *  @skb : the socket buffer
2763  *  @dev : device pointer
2764  *  Description: this is the transmit function that is called on TSO frames
2765  *  (support available on GMAC4 and newer chips).
2766  *  Diagram below show the ring programming in case of TSO frames:
2767  *
2768  *  First Descriptor
2769  *   --------
2770  *   | DES0 |---> buffer1 = L2/L3/L4 header
2771  *   | DES1 |---> TCP Payload (can continue on next descr...)
2772  *   | DES2 |---> buffer 1 and 2 len
2773  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2774  *   --------
2775  *      |
2776  *     ...
2777  *      |
2778  *   --------
2779  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2780  *   | DES1 | --|
2781  *   | DES2 | --> buffer 1 and 2 len
2782  *   | DES3 |
2783  *   --------
2784  *
2785  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2786  */
2787 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2788 {
2789         struct dma_desc *desc, *first, *mss_desc = NULL;
2790         struct stmmac_priv *priv = netdev_priv(dev);
2791         int nfrags = skb_shinfo(skb)->nr_frags;
2792         u32 queue = skb_get_queue_mapping(skb);
2793         unsigned int first_entry, des;
2794         struct stmmac_tx_queue *tx_q;
2795         int tmp_pay_len = 0;
2796         u32 pay_len, mss;
2797         u8 proto_hdr_len;
2798         int i;
2799
2800         tx_q = &priv->tx_queue[queue];
2801
2802         /* Compute header lengths */
2803         proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2804
2805         /* Desc availability based on threshold should be enough safe */
2806         if (unlikely(stmmac_tx_avail(priv, queue) <
2807                 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2808                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2809                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2810                                                                 queue));
2811                         /* This is a hard error, log it. */
2812                         netdev_err(priv->dev,
2813                                    "%s: Tx Ring full when queue awake\n",
2814                                    __func__);
2815                 }
2816                 return NETDEV_TX_BUSY;
2817         }
2818
2819         pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2820
2821         mss = skb_shinfo(skb)->gso_size;
2822
2823         /* set new MSS value if needed */
2824         if (mss != tx_q->mss) {
2825                 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2826                 stmmac_set_mss(priv, mss_desc, mss);
2827                 tx_q->mss = mss;
2828                 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2829                 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2830         }
2831
2832         if (netif_msg_tx_queued(priv)) {
2833                 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2834                         __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2835                 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2836                         skb->data_len);
2837         }
2838
2839         first_entry = tx_q->cur_tx;
2840         WARN_ON(tx_q->tx_skbuff[first_entry]);
2841
2842         desc = tx_q->dma_tx + first_entry;
2843         first = desc;
2844
2845         /* first descriptor: fill Headers on Buf1 */
2846         des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2847                              DMA_TO_DEVICE);
2848         if (dma_mapping_error(priv->device, des))
2849                 goto dma_map_err;
2850
2851         tx_q->tx_skbuff_dma[first_entry].buf = des;
2852         tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2853
2854         first->des0 = cpu_to_le32(des);
2855
2856         /* Fill start of payload in buff2 of first descriptor */
2857         if (pay_len)
2858                 first->des1 = cpu_to_le32(des + proto_hdr_len);
2859
2860         /* If needed take extra descriptors to fill the remaining payload */
2861         tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2862
2863         stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2864
2865         /* Prepare fragments */
2866         for (i = 0; i < nfrags; i++) {
2867                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2868
2869                 des = skb_frag_dma_map(priv->device, frag, 0,
2870                                        skb_frag_size(frag),
2871                                        DMA_TO_DEVICE);
2872                 if (dma_mapping_error(priv->device, des))
2873                         goto dma_map_err;
2874
2875                 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2876                                      (i == nfrags - 1), queue);
2877
2878                 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2879                 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2880                 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2881         }
2882
2883         tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2884
2885         /* Only the last descriptor gets to point to the skb. */
2886         tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2887
2888         /* We've used all descriptors we need for this skb, however,
2889          * advance cur_tx so that it references a fresh descriptor.
2890          * ndo_start_xmit will fill this descriptor the next time it's
2891          * called and stmmac_tx_clean may clean up to this descriptor.
2892          */
2893         tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2894
2895         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2896                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2897                           __func__);
2898                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2899         }
2900
2901         dev->stats.tx_bytes += skb->len;
2902         priv->xstats.tx_tso_frames++;
2903         priv->xstats.tx_tso_nfrags += nfrags;
2904
2905         /* Manage tx mitigation */
2906         priv->tx_count_frames += nfrags + 1;
2907         if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2908                 mod_timer(&priv->txtimer,
2909                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
2910         } else {
2911                 priv->tx_count_frames = 0;
2912                 stmmac_set_tx_ic(priv, desc);
2913                 priv->xstats.tx_set_ic_bit++;
2914         }
2915
2916         skb_tx_timestamp(skb);
2917
2918         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2919                      priv->hwts_tx_en)) {
2920                 /* declare that device is doing timestamping */
2921                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2922                 stmmac_enable_tx_timestamp(priv, first);
2923         }
2924
2925         /* Complete the first descriptor before granting the DMA */
2926         stmmac_prepare_tso_tx_desc(priv, first, 1,
2927                         proto_hdr_len,
2928                         pay_len,
2929                         1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2930                         tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2931
2932         /* If context desc is used to change MSS */
2933         if (mss_desc) {
2934                 /* Make sure that first descriptor has been completely
2935                  * written, including its own bit. This is because MSS is
2936                  * actually before first descriptor, so we need to make
2937                  * sure that MSS's own bit is the last thing written.
2938                  */
2939                 dma_wmb();
2940                 stmmac_set_tx_owner(priv, mss_desc);
2941         }
2942
2943         /* The own bit must be the latest setting done when prepare the
2944          * descriptor and then barrier is needed to make sure that
2945          * all is coherent before granting the DMA engine.
2946          */
2947         wmb();
2948
2949         if (netif_msg_pktdata(priv)) {
2950                 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2951                         __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2952                         tx_q->cur_tx, first, nfrags);
2953
2954                 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2955
2956                 pr_info(">>> frame to be transmitted: ");
2957                 print_pkt(skb->data, skb_headlen(skb));
2958         }
2959
2960         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2961
2962         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2963
2964         return NETDEV_TX_OK;
2965
2966 dma_map_err:
2967         dev_err(priv->device, "Tx dma map failed\n");
2968         dev_kfree_skb(skb);
2969         priv->dev->stats.tx_dropped++;
2970         return NETDEV_TX_OK;
2971 }
2972
2973 /**
2974  *  stmmac_xmit - Tx entry point of the driver
2975  *  @skb : the socket buffer
2976  *  @dev : device pointer
2977  *  Description : this is the tx entry point of the driver.
2978  *  It programs the chain or the ring and supports oversized frames
2979  *  and SG feature.
2980  */
2981 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2982 {
2983         struct stmmac_priv *priv = netdev_priv(dev);
2984         unsigned int nopaged_len = skb_headlen(skb);
2985         int i, csum_insertion = 0, is_jumbo = 0;
2986         u32 queue = skb_get_queue_mapping(skb);
2987         int nfrags = skb_shinfo(skb)->nr_frags;
2988         int entry;
2989         unsigned int first_entry;
2990         struct dma_desc *desc, *first;
2991         struct stmmac_tx_queue *tx_q;
2992         unsigned int enh_desc;
2993         unsigned int des;
2994
2995         tx_q = &priv->tx_queue[queue];
2996
2997         /* Manage oversized TCP frames for GMAC4 device */
2998         if (skb_is_gso(skb) && priv->tso) {
2999                 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3000                         return stmmac_tso_xmit(skb, dev);
3001         }
3002
3003         if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3004                 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3005                         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3006                                                                 queue));
3007                         /* This is a hard error, log it. */
3008                         netdev_err(priv->dev,
3009                                    "%s: Tx Ring full when queue awake\n",
3010                                    __func__);
3011                 }
3012                 return NETDEV_TX_BUSY;
3013         }
3014
3015         if (priv->tx_path_in_lpi_mode)
3016                 stmmac_disable_eee_mode(priv);
3017
3018         entry = tx_q->cur_tx;
3019         first_entry = entry;
3020         WARN_ON(tx_q->tx_skbuff[first_entry]);
3021
3022         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3023
3024         if (likely(priv->extend_desc))
3025                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3026         else
3027                 desc = tx_q->dma_tx + entry;
3028
3029         first = desc;
3030
3031         enh_desc = priv->plat->enh_desc;
3032         /* To program the descriptors according to the size of the frame */
3033         if (enh_desc)
3034                 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3035
3036         if (unlikely(is_jumbo)) {
3037                 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3038                 if (unlikely(entry < 0) && (entry != -EINVAL))
3039                         goto dma_map_err;
3040         }
3041
3042         for (i = 0; i < nfrags; i++) {
3043                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3044                 int len = skb_frag_size(frag);
3045                 bool last_segment = (i == (nfrags - 1));
3046
3047                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3048                 WARN_ON(tx_q->tx_skbuff[entry]);
3049
3050                 if (likely(priv->extend_desc))
3051                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3052                 else
3053                         desc = tx_q->dma_tx + entry;
3054
3055                 des = skb_frag_dma_map(priv->device, frag, 0, len,
3056                                        DMA_TO_DEVICE);
3057                 if (dma_mapping_error(priv->device, des))
3058                         goto dma_map_err; /* should reuse desc w/o issues */
3059
3060                 tx_q->tx_skbuff_dma[entry].buf = des;
3061
3062                 stmmac_set_desc_addr(priv, desc, des);
3063
3064                 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3065                 tx_q->tx_skbuff_dma[entry].len = len;
3066                 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3067
3068                 /* Prepare the descriptor and set the own bit too */
3069                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3070                                 priv->mode, 1, last_segment, skb->len);
3071         }
3072
3073         /* Only the last descriptor gets to point to the skb. */
3074         tx_q->tx_skbuff[entry] = skb;
3075
3076         /* We've used all descriptors we need for this skb, however,
3077          * advance cur_tx so that it references a fresh descriptor.
3078          * ndo_start_xmit will fill this descriptor the next time it's
3079          * called and stmmac_tx_clean may clean up to this descriptor.
3080          */
3081         entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3082         tx_q->cur_tx = entry;
3083
3084         if (netif_msg_pktdata(priv)) {
3085                 void *tx_head;
3086
3087                 netdev_dbg(priv->dev,
3088                            "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3089                            __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3090                            entry, first, nfrags);
3091
3092                 if (priv->extend_desc)
3093                         tx_head = (void *)tx_q->dma_etx;
3094                 else
3095                         tx_head = (void *)tx_q->dma_tx;
3096
3097                 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3098
3099                 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3100                 print_pkt(skb->data, skb->len);
3101         }
3102
3103         if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3104                 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3105                           __func__);
3106                 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3107         }
3108
3109         dev->stats.tx_bytes += skb->len;
3110
3111         /* According to the coalesce parameter the IC bit for the latest
3112          * segment is reset and the timer re-started to clean the tx status.
3113          * This approach takes care about the fragments: desc is the first
3114          * element in case of no SG.
3115          */
3116         priv->tx_count_frames += nfrags + 1;
3117         if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
3118             !priv->tx_timer_armed) {
3119                 mod_timer(&priv->txtimer,
3120                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
3121                 priv->tx_timer_armed = true;
3122         } else {
3123                 priv->tx_count_frames = 0;
3124                 stmmac_set_tx_ic(priv, desc);
3125                 priv->xstats.tx_set_ic_bit++;
3126                 priv->tx_timer_armed = false;
3127         }
3128
3129         skb_tx_timestamp(skb);
3130
3131         /* Ready to fill the first descriptor and set the OWN bit w/o any
3132          * problems because all the descriptors are actually ready to be
3133          * passed to the DMA engine.
3134          */
3135         if (likely(!is_jumbo)) {
3136                 bool last_segment = (nfrags == 0);
3137
3138                 des = dma_map_single(priv->device, skb->data,
3139                                      nopaged_len, DMA_TO_DEVICE);
3140                 if (dma_mapping_error(priv->device, des))
3141                         goto dma_map_err;
3142
3143                 tx_q->tx_skbuff_dma[first_entry].buf = des;
3144
3145                 stmmac_set_desc_addr(priv, first, des);
3146
3147                 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3148                 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3149
3150                 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3151                              priv->hwts_tx_en)) {
3152                         /* declare that device is doing timestamping */
3153                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3154                         stmmac_enable_tx_timestamp(priv, first);
3155                 }
3156
3157                 /* Prepare the first descriptor setting the OWN bit too */
3158                 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3159                                 csum_insertion, priv->mode, 1, last_segment,
3160                                 skb->len);
3161
3162                 /* The own bit must be the latest setting done when prepare the
3163                  * descriptor and then barrier is needed to make sure that
3164                  * all is coherent before granting the DMA engine.
3165                  */
3166                 wmb();
3167         }
3168
3169         netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3170
3171         stmmac_enable_dma_transmission(priv, priv->ioaddr);
3172         stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3173
3174         return NETDEV_TX_OK;
3175
3176 dma_map_err:
3177         netdev_err(priv->dev, "Tx DMA map failed\n");
3178         dev_kfree_skb(skb);
3179         priv->dev->stats.tx_dropped++;
3180         return NETDEV_TX_OK;
3181 }
3182
3183 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3184 {
3185         struct vlan_ethhdr *veth;
3186         __be16 vlan_proto;
3187         u16 vlanid;
3188
3189         veth = (struct vlan_ethhdr *)skb->data;
3190         vlan_proto = veth->h_vlan_proto;
3191
3192         if ((vlan_proto == htons(ETH_P_8021Q) &&
3193              dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3194             (vlan_proto == htons(ETH_P_8021AD) &&
3195              dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3196                 /* pop the vlan tag */
3197                 vlanid = ntohs(veth->h_vlan_TCI);
3198                 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3199                 skb_pull(skb, VLAN_HLEN);
3200                 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3201         }
3202 }
3203
3204
3205 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3206 {
3207         if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3208                 return 0;
3209
3210         return 1;
3211 }
3212
3213 /**
3214  * stmmac_rx_refill - refill used skb preallocated buffers
3215  * @priv: driver private structure
3216  * @queue: RX queue index
3217  * Description : this is to reallocate the skb for the reception process
3218  * that is based on zero-copy.
3219  */
3220 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3221 {
3222         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3223         int dirty = stmmac_rx_dirty(priv, queue);
3224         unsigned int entry = rx_q->dirty_rx;
3225
3226         int bfsize = priv->dma_buf_sz;
3227
3228         while (dirty-- > 0) {
3229                 struct dma_desc *p;
3230
3231                 if (priv->extend_desc)
3232                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3233                 else
3234                         p = rx_q->dma_rx + entry;
3235
3236                 if (likely(!rx_q->rx_skbuff[entry])) {
3237                         struct sk_buff *skb;
3238
3239                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3240                         if (unlikely(!skb)) {
3241                                 /* so for a while no zero-copy! */
3242                                 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3243                                 if (unlikely(net_ratelimit()))
3244                                         dev_err(priv->device,
3245                                                 "fail to alloc skb entry %d\n",
3246                                                 entry);
3247                                 break;
3248                         }
3249
3250                         rx_q->rx_skbuff[entry] = skb;
3251                         rx_q->rx_skbuff_dma[entry] =
3252                             dma_map_single(priv->device, skb->data, bfsize,
3253                                            DMA_FROM_DEVICE);
3254                         if (dma_mapping_error(priv->device,
3255                                               rx_q->rx_skbuff_dma[entry])) {
3256                                 netdev_err(priv->dev, "Rx DMA map failed\n");
3257                                 dev_kfree_skb(skb);
3258                                 break;
3259                         }
3260
3261                         stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3262                         stmmac_refill_desc3(priv, rx_q, p);
3263
3264                         if (rx_q->rx_zeroc_thresh > 0)
3265                                 rx_q->rx_zeroc_thresh--;
3266
3267                         netif_dbg(priv, rx_status, priv->dev,
3268                                   "refill entry #%d\n", entry);
3269                 }
3270                 dma_wmb();
3271
3272                 stmmac_set_rx_owner(priv, p, priv->use_riwt);
3273
3274                 dma_wmb();
3275
3276                 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3277         }
3278         rx_q->dirty_rx = entry;
3279 }
3280
3281 /**
3282  * stmmac_rx - manage the receive process
3283  * @priv: driver private structure
3284  * @limit: napi bugget
3285  * @queue: RX queue index.
3286  * Description :  this the function called by the napi poll method.
3287  * It gets all the frames inside the ring.
3288  */
3289 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3290 {
3291         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3292         unsigned int entry = rx_q->cur_rx;
3293         int coe = priv->hw->rx_csum;
3294         unsigned int next_entry;
3295         unsigned int count = 0;
3296
3297         if (netif_msg_rx_status(priv)) {
3298                 void *rx_head;
3299
3300                 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3301                 if (priv->extend_desc)
3302                         rx_head = (void *)rx_q->dma_erx;
3303                 else
3304                         rx_head = (void *)rx_q->dma_rx;
3305
3306                 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3307         }
3308         while (count < limit) {
3309                 int status;
3310                 struct dma_desc *p;
3311                 struct dma_desc *np;
3312
3313                 if (priv->extend_desc)
3314                         p = (struct dma_desc *)(rx_q->dma_erx + entry);
3315                 else
3316                         p = rx_q->dma_rx + entry;
3317
3318                 /* read the status of the incoming frame */
3319                 status = stmmac_rx_status(priv, &priv->dev->stats,
3320                                 &priv->xstats, p);
3321                 /* check if managed by the DMA otherwise go ahead */
3322                 if (unlikely(status & dma_own))
3323                         break;
3324
3325                 count++;
3326
3327                 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3328                 next_entry = rx_q->cur_rx;
3329
3330                 if (priv->extend_desc)
3331                         np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3332                 else
3333                         np = rx_q->dma_rx + next_entry;
3334
3335                 prefetch(np);
3336
3337                 if (priv->extend_desc)
3338                         stmmac_rx_extended_status(priv, &priv->dev->stats,
3339                                         &priv->xstats, rx_q->dma_erx + entry);
3340                 if (unlikely(status == discard_frame)) {
3341                         priv->dev->stats.rx_errors++;
3342                         if (priv->hwts_rx_en && !priv->extend_desc) {
3343                                 /* DESC2 & DESC3 will be overwritten by device
3344                                  * with timestamp value, hence reinitialize
3345                                  * them in stmmac_rx_refill() function so that
3346                                  * device can reuse it.
3347                                  */
3348                                 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3349                                 rx_q->rx_skbuff[entry] = NULL;
3350                                 dma_unmap_single(priv->device,
3351                                                  rx_q->rx_skbuff_dma[entry],
3352                                                  priv->dma_buf_sz,
3353                                                  DMA_FROM_DEVICE);
3354                         }
3355                 } else {
3356                         struct sk_buff *skb;
3357                         int frame_len;
3358                         unsigned int des;
3359
3360                         stmmac_get_desc_addr(priv, p, &des);
3361                         frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3362
3363                         /*  If frame length is greater than skb buffer size
3364                          *  (preallocated during init) then the packet is
3365                          *  ignored
3366                          */
3367                         if (frame_len > priv->dma_buf_sz) {
3368                                 netdev_err(priv->dev,
3369                                            "len %d larger than size (%d)\n",
3370                                            frame_len, priv->dma_buf_sz);
3371                                 priv->dev->stats.rx_length_errors++;
3372                                 break;
3373                         }
3374
3375                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3376                          * Type frames (LLC/LLC-SNAP)
3377                          *
3378                          * llc_snap is never checked in GMAC >= 4, so this ACS
3379                          * feature is always disabled and packets need to be
3380                          * stripped manually.
3381                          */
3382                         if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3383                             unlikely(status != llc_snap))
3384                                 frame_len -= ETH_FCS_LEN;
3385
3386                         if (netif_msg_rx_status(priv)) {
3387                                 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3388                                            p, entry, des);
3389                                 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3390                                            frame_len, status);
3391                         }
3392
3393                         /* The zero-copy is always used for all the sizes
3394                          * in case of GMAC4 because it needs
3395                          * to refill the used descriptors, always.
3396                          */
3397                         if (unlikely(!priv->plat->has_gmac4 &&
3398                                      ((frame_len < priv->rx_copybreak) ||
3399                                      stmmac_rx_threshold_count(rx_q)))) {
3400                                 skb = netdev_alloc_skb_ip_align(priv->dev,
3401                                                                 frame_len);
3402                                 if (unlikely(!skb)) {
3403                                         if (net_ratelimit())
3404                                                 dev_warn(priv->device,
3405                                                          "packet dropped\n");
3406                                         priv->dev->stats.rx_dropped++;
3407                                         break;
3408                                 }
3409
3410                                 dma_sync_single_for_cpu(priv->device,
3411                                                         rx_q->rx_skbuff_dma
3412                                                         [entry], frame_len,
3413                                                         DMA_FROM_DEVICE);
3414                                 skb_copy_to_linear_data(skb,
3415                                                         rx_q->
3416                                                         rx_skbuff[entry]->data,
3417                                                         frame_len);
3418
3419                                 skb_put(skb, frame_len);
3420                                 dma_sync_single_for_device(priv->device,
3421                                                            rx_q->rx_skbuff_dma
3422                                                            [entry], frame_len,
3423                                                            DMA_FROM_DEVICE);
3424                         } else {
3425                                 skb = rx_q->rx_skbuff[entry];
3426                                 if (unlikely(!skb)) {
3427                                         netdev_err(priv->dev,
3428                                                    "%s: Inconsistent Rx chain\n",
3429                                                    priv->dev->name);
3430                                         priv->dev->stats.rx_dropped++;
3431                                         break;
3432                                 }
3433                                 prefetch(skb->data - NET_IP_ALIGN);
3434                                 rx_q->rx_skbuff[entry] = NULL;
3435                                 rx_q->rx_zeroc_thresh++;
3436
3437                                 skb_put(skb, frame_len);
3438                                 dma_unmap_single(priv->device,
3439                                                  rx_q->rx_skbuff_dma[entry],
3440                                                  priv->dma_buf_sz,
3441                                                  DMA_FROM_DEVICE);
3442                         }
3443
3444                         if (netif_msg_pktdata(priv)) {
3445                                 netdev_dbg(priv->dev, "frame received (%dbytes)",
3446                                            frame_len);
3447                                 print_pkt(skb->data, frame_len);
3448                         }
3449
3450                         stmmac_get_rx_hwtstamp(priv, p, np, skb);
3451
3452                         stmmac_rx_vlan(priv->dev, skb);
3453
3454                         skb->protocol = eth_type_trans(skb, priv->dev);
3455
3456                         if (unlikely(!coe))
3457                                 skb_checksum_none_assert(skb);
3458                         else
3459                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3460
3461                         napi_gro_receive(&rx_q->napi, skb);
3462
3463                         priv->dev->stats.rx_packets++;
3464                         priv->dev->stats.rx_bytes += frame_len;
3465                 }
3466                 entry = next_entry;
3467         }
3468
3469         stmmac_rx_refill(priv, queue);
3470
3471         priv->xstats.rx_pkt_n += count;
3472
3473         return count;
3474 }
3475
3476 /**
3477  *  stmmac_poll - stmmac poll method (NAPI)
3478  *  @napi : pointer to the napi structure.
3479  *  @budget : maximum number of packets that the current CPU can receive from
3480  *            all interfaces.
3481  *  Description :
3482  *  To look at the incoming frames and clear the tx resources.
3483  */
3484 static int stmmac_poll(struct napi_struct *napi, int budget)
3485 {
3486         struct stmmac_rx_queue *rx_q =
3487                 container_of(napi, struct stmmac_rx_queue, napi);
3488         struct stmmac_priv *priv = rx_q->priv_data;
3489         u32 tx_count = priv->plat->tx_queues_to_use;
3490         u32 chan = rx_q->queue_index;
3491         int work_done = 0;
3492         u32 queue;
3493
3494         priv->xstats.napi_poll++;
3495
3496         /* check all the queues */
3497         for (queue = 0; queue < tx_count; queue++)
3498                 stmmac_tx_clean(priv, queue);
3499
3500         work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3501         if (work_done < budget) {
3502                 napi_complete_done(napi, work_done);
3503                 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3504         }
3505         return work_done;
3506 }
3507
3508 /**
3509  *  stmmac_tx_timeout
3510  *  @dev : Pointer to net device structure
3511  *  Description: this function is called when a packet transmission fails to
3512  *   complete within a reasonable time. The driver will mark the error in the
3513  *   netdev structure and arrange for the device to be reset to a sane state
3514  *   in order to transmit a new packet.
3515  */
3516 static void stmmac_tx_timeout(struct net_device *dev)
3517 {
3518         struct stmmac_priv *priv = netdev_priv(dev);
3519
3520         stmmac_global_err(priv);
3521 }
3522
3523 /**
3524  *  stmmac_set_rx_mode - entry point for multicast addressing
3525  *  @dev : pointer to the device structure
3526  *  Description:
3527  *  This function is a driver entry point which gets called by the kernel
3528  *  whenever multicast addresses must be enabled/disabled.
3529  *  Return value:
3530  *  void.
3531  */
3532 static void stmmac_set_rx_mode(struct net_device *dev)
3533 {
3534         struct stmmac_priv *priv = netdev_priv(dev);
3535
3536         stmmac_set_filter(priv, priv->hw, dev);
3537 }
3538
3539 /**
3540  *  stmmac_change_mtu - entry point to change MTU size for the device.
3541  *  @dev : device pointer.
3542  *  @new_mtu : the new MTU size for the device.
3543  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3544  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3545  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3546  *  Return value:
3547  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3548  *  file on failure.
3549  */
3550 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3551 {
3552         struct stmmac_priv *priv = netdev_priv(dev);
3553
3554         if (netif_running(dev)) {
3555                 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3556                 return -EBUSY;
3557         }
3558
3559         dev->mtu = new_mtu;
3560
3561         netdev_update_features(dev);
3562
3563         return 0;
3564 }
3565
3566 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3567                                              netdev_features_t features)
3568 {
3569         struct stmmac_priv *priv = netdev_priv(dev);
3570
3571         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3572                 features &= ~NETIF_F_RXCSUM;
3573
3574         if (!priv->plat->tx_coe)
3575                 features &= ~NETIF_F_CSUM_MASK;
3576
3577         /* Some GMAC devices have a bugged Jumbo frame support that
3578          * needs to have the Tx COE disabled for oversized frames
3579          * (due to limited buffer sizes). In this case we disable
3580          * the TX csum insertion in the TDES and not use SF.
3581          */
3582         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3583                 features &= ~NETIF_F_CSUM_MASK;
3584
3585         /* Disable tso if asked by ethtool */
3586         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3587                 if (features & NETIF_F_TSO)
3588                         priv->tso = true;
3589                 else
3590                         priv->tso = false;
3591         }
3592
3593         return features;
3594 }
3595
3596 static int stmmac_set_features(struct net_device *netdev,
3597                                netdev_features_t features)
3598 {
3599         struct stmmac_priv *priv = netdev_priv(netdev);
3600
3601         /* Keep the COE Type in case of csum is supporting */
3602         if (features & NETIF_F_RXCSUM)
3603                 priv->hw->rx_csum = priv->plat->rx_coe;
3604         else
3605                 priv->hw->rx_csum = 0;
3606         /* No check needed because rx_coe has been set before and it will be
3607          * fixed in case of issue.
3608          */
3609         stmmac_rx_ipc(priv, priv->hw);
3610
3611         return 0;
3612 }
3613
3614 /**
3615  *  stmmac_interrupt - main ISR
3616  *  @irq: interrupt number.
3617  *  @dev_id: to pass the net device pointer.
3618  *  Description: this is the main driver interrupt service routine.
3619  *  It can call:
3620  *  o DMA service routine (to manage incoming frame reception and transmission
3621  *    status)
3622  *  o Core interrupts to manage: remote wake-up, management counter, LPI
3623  *    interrupts.
3624  */
3625 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3626 {
3627         struct net_device *dev = (struct net_device *)dev_id;
3628         struct stmmac_priv *priv = netdev_priv(dev);
3629         u32 rx_cnt = priv->plat->rx_queues_to_use;
3630         u32 tx_cnt = priv->plat->tx_queues_to_use;
3631         u32 queues_count;
3632         u32 queue;
3633
3634         queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3635
3636         if (priv->irq_wake)
3637                 pm_wakeup_event(priv->device, 0);
3638
3639         if (unlikely(!dev)) {
3640                 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3641                 return IRQ_NONE;
3642         }
3643
3644         /* Check if adapter is up */
3645         if (test_bit(STMMAC_DOWN, &priv->state))
3646                 return IRQ_HANDLED;
3647         /* Check if a fatal error happened */
3648         if (stmmac_safety_feat_interrupt(priv))
3649                 return IRQ_HANDLED;
3650
3651         /* To handle GMAC own interrupts */
3652         if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3653                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3654                 int mtl_status;
3655
3656                 if (unlikely(status)) {
3657                         /* For LPI we need to save the tx status */
3658                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3659                                 priv->tx_path_in_lpi_mode = true;
3660                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3661                                 priv->tx_path_in_lpi_mode = false;
3662                 }
3663
3664                 for (queue = 0; queue < queues_count; queue++) {
3665                         struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3666
3667                         mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3668                                                                 queue);
3669                         if (mtl_status != -EINVAL)
3670                                 status |= mtl_status;
3671
3672                         if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3673                                 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3674                                                        rx_q->rx_tail_addr,
3675                                                        queue);
3676                 }
3677
3678                 /* PCS link status */
3679                 if (priv->hw->pcs) {
3680                         if (priv->xstats.pcs_link)
3681                                 netif_carrier_on(dev);
3682                         else
3683                                 netif_carrier_off(dev);
3684                 }
3685         }
3686
3687         /* To handle DMA interrupts */
3688         stmmac_dma_interrupt(priv);
3689
3690         return IRQ_HANDLED;
3691 }
3692
3693 #ifdef CONFIG_NET_POLL_CONTROLLER
3694 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3695  * to allow network I/O with interrupts disabled.
3696  */
3697 static void stmmac_poll_controller(struct net_device *dev)
3698 {
3699         disable_irq(dev->irq);
3700         stmmac_interrupt(dev->irq, dev);
3701         enable_irq(dev->irq);
3702 }
3703 #endif
3704
3705 /**
3706  *  stmmac_ioctl - Entry point for the Ioctl
3707  *  @dev: Device pointer.
3708  *  @rq: An IOCTL specefic structure, that can contain a pointer to
3709  *  a proprietary structure used to pass information to the driver.
3710  *  @cmd: IOCTL command
3711  *  Description:
3712  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3713  */
3714 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3715 {
3716         int ret = -EOPNOTSUPP;
3717
3718         if (!netif_running(dev))
3719                 return -EINVAL;
3720
3721         switch (cmd) {
3722         case SIOCGMIIPHY:
3723         case SIOCGMIIREG:
3724         case SIOCSMIIREG:
3725                 if (!dev->phydev)
3726                         return -EINVAL;
3727                 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3728                 break;
3729         case SIOCSHWTSTAMP:
3730                 ret = stmmac_hwtstamp_ioctl(dev, rq);
3731                 break;
3732         default:
3733                 break;
3734         }
3735
3736         return ret;
3737 }
3738
3739 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3740                                     void *cb_priv)
3741 {
3742         struct stmmac_priv *priv = cb_priv;
3743         int ret = -EOPNOTSUPP;
3744
3745         stmmac_disable_all_queues(priv);
3746
3747         switch (type) {
3748         case TC_SETUP_CLSU32:
3749                 if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3750                         ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3751                 break;
3752         default:
3753                 break;
3754         }
3755
3756         stmmac_enable_all_queues(priv);
3757         return ret;
3758 }
3759
3760 static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3761                                  struct tc_block_offload *f)
3762 {
3763         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3764                 return -EOPNOTSUPP;
3765
3766         switch (f->command) {
3767         case TC_BLOCK_BIND:
3768                 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3769                                 priv, priv);
3770         case TC_BLOCK_UNBIND:
3771                 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3772                 return 0;
3773         default:
3774                 return -EOPNOTSUPP;
3775         }
3776 }
3777
3778 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3779                            void *type_data)
3780 {
3781         struct stmmac_priv *priv = netdev_priv(ndev);
3782
3783         switch (type) {
3784         case TC_SETUP_BLOCK:
3785                 return stmmac_setup_tc_block(priv, type_data);
3786         default:
3787                 return -EOPNOTSUPP;
3788         }
3789 }
3790
3791 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3792 {
3793         struct stmmac_priv *priv = netdev_priv(ndev);
3794         int ret = 0;
3795
3796         ret = eth_mac_addr(ndev, addr);
3797         if (ret)
3798                 return ret;
3799
3800         stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3801
3802         return ret;
3803 }
3804
3805 #ifdef CONFIG_DEBUG_FS
3806 static struct dentry *stmmac_fs_dir;
3807
3808 static void sysfs_display_ring(void *head, int size, int extend_desc,
3809                                struct seq_file *seq)
3810 {
3811         int i;
3812         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3813         struct dma_desc *p = (struct dma_desc *)head;
3814
3815         for (i = 0; i < size; i++) {
3816                 if (extend_desc) {
3817                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3818                                    i, (unsigned int)virt_to_phys(ep),
3819                                    le32_to_cpu(ep->basic.des0),
3820                                    le32_to_cpu(ep->basic.des1),
3821                                    le32_to_cpu(ep->basic.des2),
3822                                    le32_to_cpu(ep->basic.des3));
3823                         ep++;
3824                 } else {
3825                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3826                                    i, (unsigned int)virt_to_phys(p),
3827                                    le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3828                                    le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3829                         p++;
3830                 }
3831                 seq_printf(seq, "\n");
3832         }
3833 }
3834
3835 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3836 {
3837         struct net_device *dev = seq->private;
3838         struct stmmac_priv *priv = netdev_priv(dev);
3839         u32 rx_count = priv->plat->rx_queues_to_use;
3840         u32 tx_count = priv->plat->tx_queues_to_use;
3841         u32 queue;
3842
3843         for (queue = 0; queue < rx_count; queue++) {
3844                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3845
3846                 seq_printf(seq, "RX Queue %d:\n", queue);
3847
3848                 if (priv->extend_desc) {
3849                         seq_printf(seq, "Extended descriptor ring:\n");
3850                         sysfs_display_ring((void *)rx_q->dma_erx,
3851                                            DMA_RX_SIZE, 1, seq);
3852                 } else {
3853                         seq_printf(seq, "Descriptor ring:\n");
3854                         sysfs_display_ring((void *)rx_q->dma_rx,
3855                                            DMA_RX_SIZE, 0, seq);
3856                 }
3857         }
3858
3859         for (queue = 0; queue < tx_count; queue++) {
3860                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3861
3862                 seq_printf(seq, "TX Queue %d:\n", queue);
3863
3864                 if (priv->extend_desc) {
3865                         seq_printf(seq, "Extended descriptor ring:\n");
3866                         sysfs_display_ring((void *)tx_q->dma_etx,
3867                                            DMA_TX_SIZE, 1, seq);
3868                 } else {
3869                         seq_printf(seq, "Descriptor ring:\n");
3870                         sysfs_display_ring((void *)tx_q->dma_tx,
3871                                            DMA_TX_SIZE, 0, seq);
3872                 }
3873         }
3874
3875         return 0;
3876 }
3877
3878 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3879 {
3880         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3881 }
3882
3883 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3884
3885 static const struct file_operations stmmac_rings_status_fops = {
3886         .owner = THIS_MODULE,
3887         .open = stmmac_sysfs_ring_open,
3888         .read = seq_read,
3889         .llseek = seq_lseek,
3890         .release = single_release,
3891 };
3892
3893 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3894 {
3895         struct net_device *dev = seq->private;
3896         struct stmmac_priv *priv = netdev_priv(dev);
3897
3898         if (!priv->hw_cap_support) {
3899                 seq_printf(seq, "DMA HW features not supported\n");
3900                 return 0;
3901         }
3902
3903         seq_printf(seq, "==============================\n");
3904         seq_printf(seq, "\tDMA HW features\n");
3905         seq_printf(seq, "==============================\n");
3906
3907         seq_printf(seq, "\t10/100 Mbps: %s\n",
3908                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3909         seq_printf(seq, "\t1000 Mbps: %s\n",
3910                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
3911         seq_printf(seq, "\tHalf duplex: %s\n",
3912                    (priv->dma_cap.half_duplex) ? "Y" : "N");
3913         seq_printf(seq, "\tHash Filter: %s\n",
3914                    (priv->dma_cap.hash_filter) ? "Y" : "N");
3915         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3916                    (priv->dma_cap.multi_addr) ? "Y" : "N");
3917         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3918                    (priv->dma_cap.pcs) ? "Y" : "N");
3919         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3920                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
3921         seq_printf(seq, "\tPMT Remote wake up: %s\n",
3922                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3923         seq_printf(seq, "\tPMT Magic Frame: %s\n",
3924                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3925         seq_printf(seq, "\tRMON module: %s\n",
3926                    (priv->dma_cap.rmon) ? "Y" : "N");
3927         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3928                    (priv->dma_cap.time_stamp) ? "Y" : "N");
3929         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3930                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
3931         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3932                    (priv->dma_cap.eee) ? "Y" : "N");
3933         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3934         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3935                    (priv->dma_cap.tx_coe) ? "Y" : "N");
3936         if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3937                 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3938                            (priv->dma_cap.rx_coe) ? "Y" : "N");
3939         } else {
3940                 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3941                            (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3942                 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3943                            (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3944         }
3945         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3946                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3947         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3948                    priv->dma_cap.number_rx_channel);
3949         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3950                    priv->dma_cap.number_tx_channel);
3951         seq_printf(seq, "\tEnhanced descriptors: %s\n",
3952                    (priv->dma_cap.enh_desc) ? "Y" : "N");
3953
3954         return 0;
3955 }
3956
3957 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3958 {
3959         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3960 }
3961
3962 static const struct file_operations stmmac_dma_cap_fops = {
3963         .owner = THIS_MODULE,
3964         .open = stmmac_sysfs_dma_cap_open,
3965         .read = seq_read,
3966         .llseek = seq_lseek,
3967         .release = single_release,
3968 };
3969
3970 static int stmmac_init_fs(struct net_device *dev)
3971 {
3972         struct stmmac_priv *priv = netdev_priv(dev);
3973
3974         /* Create per netdev entries */
3975         priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3976
3977         if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3978                 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3979
3980                 return -ENOMEM;
3981         }
3982
3983         /* Entry to report DMA RX/TX rings */
3984         priv->dbgfs_rings_status =
3985                 debugfs_create_file("descriptors_status", 0444,
3986                                     priv->dbgfs_dir, dev,
3987                                     &stmmac_rings_status_fops);
3988
3989         if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3990                 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3991                 debugfs_remove_recursive(priv->dbgfs_dir);
3992
3993                 return -ENOMEM;
3994         }
3995
3996         /* Entry to report the DMA HW features */
3997         priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
3998                                                   priv->dbgfs_dir,
3999                                                   dev, &stmmac_dma_cap_fops);
4000
4001         if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4002                 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4003                 debugfs_remove_recursive(priv->dbgfs_dir);
4004
4005                 return -ENOMEM;
4006         }
4007
4008         return 0;
4009 }
4010
4011 static void stmmac_exit_fs(struct net_device *dev)
4012 {
4013         struct stmmac_priv *priv = netdev_priv(dev);
4014
4015         debugfs_remove_recursive(priv->dbgfs_dir);
4016 }
4017 #endif /* CONFIG_DEBUG_FS */
4018
4019 static const struct net_device_ops stmmac_netdev_ops = {
4020         .ndo_open = stmmac_open,
4021         .ndo_start_xmit = stmmac_xmit,
4022         .ndo_stop = stmmac_release,
4023         .ndo_change_mtu = stmmac_change_mtu,
4024         .ndo_fix_features = stmmac_fix_features,
4025         .ndo_set_features = stmmac_set_features,
4026         .ndo_set_rx_mode = stmmac_set_rx_mode,
4027         .ndo_tx_timeout = stmmac_tx_timeout,
4028         .ndo_do_ioctl = stmmac_ioctl,
4029         .ndo_setup_tc = stmmac_setup_tc,
4030 #ifdef CONFIG_NET_POLL_CONTROLLER
4031         .ndo_poll_controller = stmmac_poll_controller,
4032 #endif
4033         .ndo_set_mac_address = stmmac_set_mac_address,
4034 };
4035
4036 static void stmmac_reset_subtask(struct stmmac_priv *priv)
4037 {
4038         if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4039                 return;
4040         if (test_bit(STMMAC_DOWN, &priv->state))
4041                 return;
4042
4043         netdev_err(priv->dev, "Reset adapter.\n");
4044
4045         rtnl_lock();
4046         netif_trans_update(priv->dev);
4047         while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4048                 usleep_range(1000, 2000);
4049
4050         set_bit(STMMAC_DOWN, &priv->state);
4051         dev_close(priv->dev);
4052         dev_open(priv->dev);
4053         clear_bit(STMMAC_DOWN, &priv->state);
4054         clear_bit(STMMAC_RESETING, &priv->state);
4055         rtnl_unlock();
4056 }
4057
4058 static void stmmac_service_task(struct work_struct *work)
4059 {
4060         struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4061                         service_task);
4062
4063         stmmac_reset_subtask(priv);
4064         clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4065 }
4066
4067 /**
4068  *  stmmac_hw_init - Init the MAC device
4069  *  @priv: driver private structure
4070  *  Description: this function is to configure the MAC device according to
4071  *  some platform parameters or the HW capability register. It prepares the
4072  *  driver to use either ring or chain modes and to setup either enhanced or
4073  *  normal descriptors.
4074  */
4075 static int stmmac_hw_init(struct stmmac_priv *priv)
4076 {
4077         int ret;
4078
4079         /* dwmac-sun8i only work in chain mode */
4080         if (priv->plat->has_sun8i)
4081                 chain_mode = 1;
4082         priv->chain_mode = chain_mode;
4083
4084         /* Initialize HW Interface */
4085         ret = stmmac_hwif_init(priv);
4086         if (ret)
4087                 return ret;
4088
4089         /* Get the HW capability (new GMAC newer than 3.50a) */
4090         priv->hw_cap_support = stmmac_get_hw_features(priv);
4091         if (priv->hw_cap_support) {
4092                 dev_info(priv->device, "DMA HW capability register supported\n");
4093
4094                 /* We can override some gmac/dma configuration fields: e.g.
4095                  * enh_desc, tx_coe (e.g. that are passed through the
4096                  * platform) with the values from the HW capability
4097                  * register (if supported).
4098                  */
4099                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4100                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4101                 priv->hw->pmt = priv->plat->pmt;
4102
4103                 /* TXCOE doesn't work in thresh DMA mode */
4104                 if (priv->plat->force_thresh_dma_mode)
4105                         priv->plat->tx_coe = 0;
4106                 else
4107                         priv->plat->tx_coe = priv->dma_cap.tx_coe;
4108
4109                 /* In case of GMAC4 rx_coe is from HW cap register. */
4110                 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4111
4112                 if (priv->dma_cap.rx_coe_type2)
4113                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4114                 else if (priv->dma_cap.rx_coe_type1)
4115                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4116
4117         } else {
4118                 dev_info(priv->device, "No HW DMA feature register supported\n");
4119         }
4120
4121         if (priv->plat->rx_coe) {
4122                 priv->hw->rx_csum = priv->plat->rx_coe;
4123                 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4124                 if (priv->synopsys_id < DWMAC_CORE_4_00)
4125                         dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4126         }
4127         if (priv->plat->tx_coe)
4128                 dev_info(priv->device, "TX Checksum insertion supported\n");
4129
4130         if (priv->plat->pmt) {
4131                 dev_info(priv->device, "Wake-Up On Lan supported\n");
4132                 device_set_wakeup_capable(priv->device, 1);
4133         }
4134
4135         if (priv->dma_cap.tsoen)
4136                 dev_info(priv->device, "TSO supported\n");
4137
4138         /* Run HW quirks, if any */
4139         if (priv->hwif_quirks) {
4140                 ret = priv->hwif_quirks(priv);
4141                 if (ret)
4142                         return ret;
4143         }
4144
4145         return 0;
4146 }
4147
4148 /**
4149  * stmmac_dvr_probe
4150  * @device: device pointer
4151  * @plat_dat: platform data pointer
4152  * @res: stmmac resource pointer
4153  * Description: this is the main probe function used to
4154  * call the alloc_etherdev, allocate the priv structure.
4155  * Return:
4156  * returns 0 on success, otherwise errno.
4157  */
4158 int stmmac_dvr_probe(struct device *device,
4159                      struct plat_stmmacenet_data *plat_dat,
4160                      struct stmmac_resources *res)
4161 {
4162         struct net_device *ndev = NULL;
4163         struct stmmac_priv *priv;
4164         int ret = 0;
4165         u32 queue;
4166
4167         ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4168                                   MTL_MAX_TX_QUEUES,
4169                                   MTL_MAX_RX_QUEUES);
4170         if (!ndev)
4171                 return -ENOMEM;
4172
4173         SET_NETDEV_DEV(ndev, device);
4174
4175         priv = netdev_priv(ndev);
4176         priv->device = device;
4177         priv->dev = ndev;
4178
4179         stmmac_set_ethtool_ops(ndev);
4180         priv->pause = pause;
4181         priv->plat = plat_dat;
4182         priv->ioaddr = res->addr;
4183         priv->dev->base_addr = (unsigned long)res->addr;
4184
4185         priv->dev->irq = res->irq;
4186         priv->wol_irq = res->wol_irq;
4187         priv->lpi_irq = res->lpi_irq;
4188
4189         if (res->mac)
4190                 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4191
4192         dev_set_drvdata(device, priv->dev);
4193
4194         /* Verify driver arguments */
4195         stmmac_verify_args();
4196
4197         /* Allocate workqueue */
4198         priv->wq = create_singlethread_workqueue("stmmac_wq");
4199         if (!priv->wq) {
4200                 dev_err(priv->device, "failed to create workqueue\n");
4201                 goto error_wq;
4202         }
4203
4204         INIT_WORK(&priv->service_task, stmmac_service_task);
4205
4206         /* Override with kernel parameters if supplied XXX CRS XXX
4207          * this needs to have multiple instances
4208          */
4209         if ((phyaddr >= 0) && (phyaddr <= 31))
4210                 priv->plat->phy_addr = phyaddr;
4211
4212         if (priv->plat->stmmac_rst) {
4213                 ret = reset_control_assert(priv->plat->stmmac_rst);
4214                 reset_control_deassert(priv->plat->stmmac_rst);
4215                 /* Some reset controllers have only reset callback instead of
4216                  * assert + deassert callbacks pair.
4217                  */
4218                 if (ret == -ENOTSUPP)
4219                         reset_control_reset(priv->plat->stmmac_rst);
4220         }
4221
4222         /* Init MAC and get the capabilities */
4223         ret = stmmac_hw_init(priv);
4224         if (ret)
4225                 goto error_hw_init;
4226
4227         /* Configure real RX and TX queues */
4228         netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4229         netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4230
4231         ndev->netdev_ops = &stmmac_netdev_ops;
4232
4233         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4234                             NETIF_F_RXCSUM;
4235
4236         ret = stmmac_tc_init(priv, priv);
4237         if (!ret) {
4238                 ndev->hw_features |= NETIF_F_HW_TC;
4239         }
4240
4241         if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4242                 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4243                 priv->tso = true;
4244                 dev_info(priv->device, "TSO feature enabled\n");
4245         }
4246         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4247         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4248 #ifdef STMMAC_VLAN_TAG_USED
4249         /* Both mac100 and gmac support receive VLAN tag detection */
4250         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4251 #endif
4252         priv->msg_enable = netif_msg_init(debug, default_msg_level);
4253
4254         /* MTU range: 46 - hw-specific max */
4255         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4256         if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4257                 ndev->max_mtu = JUMBO_LEN;
4258         else
4259                 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4260         /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4261          * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4262          */
4263         if ((priv->plat->maxmtu < ndev->max_mtu) &&
4264             (priv->plat->maxmtu >= ndev->min_mtu))
4265                 ndev->max_mtu = priv->plat->maxmtu;
4266         else if (priv->plat->maxmtu < ndev->min_mtu)
4267                 dev_warn(priv->device,
4268                          "%s: warning: maxmtu having invalid value (%d)\n",
4269                          __func__, priv->plat->maxmtu);
4270
4271         if (flow_ctrl)
4272                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4273
4274         /* Rx Watchdog is available in the COREs newer than the 3.40.
4275          * In some case, for example on bugged HW this feature
4276          * has to be disable and this can be done by passing the
4277          * riwt_off field from the platform.
4278          */
4279         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4280                 priv->use_riwt = 1;
4281                 dev_info(priv->device,
4282                          "Enable RX Mitigation via HW Watchdog Timer\n");
4283         }
4284
4285         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4286                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4287
4288                 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4289                                (8 * priv->plat->rx_queues_to_use));
4290         }
4291
4292         mutex_init(&priv->lock);
4293
4294         /* If a specific clk_csr value is passed from the platform
4295          * this means that the CSR Clock Range selection cannot be
4296          * changed at run-time and it is fixed. Viceversa the driver'll try to
4297          * set the MDC clock dynamically according to the csr actual
4298          * clock input.
4299          */
4300         if (!priv->plat->clk_csr)
4301                 stmmac_clk_csr_set(priv);
4302         else
4303                 priv->clk_csr = priv->plat->clk_csr;
4304
4305         stmmac_check_pcs_mode(priv);
4306
4307         if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4308             priv->hw->pcs != STMMAC_PCS_TBI &&
4309             priv->hw->pcs != STMMAC_PCS_RTBI) {
4310                 /* MDIO bus Registration */
4311                 ret = stmmac_mdio_register(ndev);
4312                 if (ret < 0) {
4313                         dev_err(priv->device,
4314                                 "%s: MDIO bus (id: %d) registration failed",
4315                                 __func__, priv->plat->bus_id);
4316                         goto error_mdio_register;
4317                 }
4318         }
4319
4320         ret = register_netdev(ndev);
4321         if (ret) {
4322                 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4323                         __func__, ret);
4324                 goto error_netdev_register;
4325         }
4326
4327         return ret;
4328
4329 error_netdev_register:
4330         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4331             priv->hw->pcs != STMMAC_PCS_TBI &&
4332             priv->hw->pcs != STMMAC_PCS_RTBI)
4333                 stmmac_mdio_unregister(ndev);
4334 error_mdio_register:
4335         for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4336                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4337
4338                 netif_napi_del(&rx_q->napi);
4339         }
4340 error_hw_init:
4341         destroy_workqueue(priv->wq);
4342 error_wq:
4343         free_netdev(ndev);
4344
4345         return ret;
4346 }
4347 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4348
4349 /**
4350  * stmmac_dvr_remove
4351  * @dev: device pointer
4352  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4353  * changes the link status, releases the DMA descriptor rings.
4354  */
4355 int stmmac_dvr_remove(struct device *dev)
4356 {
4357         struct net_device *ndev = dev_get_drvdata(dev);
4358         struct stmmac_priv *priv = netdev_priv(ndev);
4359
4360         netdev_info(priv->dev, "%s: removing driver", __func__);
4361
4362         stmmac_stop_all_dma(priv);
4363
4364         stmmac_mac_set(priv, priv->ioaddr, false);
4365         netif_carrier_off(ndev);
4366         unregister_netdev(ndev);
4367         if (priv->plat->stmmac_rst)
4368                 reset_control_assert(priv->plat->stmmac_rst);
4369         clk_disable_unprepare(priv->plat->pclk);
4370         clk_disable_unprepare(priv->plat->stmmac_clk);
4371         if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4372             priv->hw->pcs != STMMAC_PCS_TBI &&
4373             priv->hw->pcs != STMMAC_PCS_RTBI)
4374                 stmmac_mdio_unregister(ndev);
4375         destroy_workqueue(priv->wq);
4376         mutex_destroy(&priv->lock);
4377         free_netdev(ndev);
4378
4379         return 0;
4380 }
4381 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4382
4383 /**
4384  * stmmac_suspend - suspend callback
4385  * @dev: device pointer
4386  * Description: this is the function to suspend the device and it is called
4387  * by the platform driver to stop the network queue, release the resources,
4388  * program the PMT register (for WoL), clean and release driver resources.
4389  */
4390 int stmmac_suspend(struct device *dev)
4391 {
4392         struct net_device *ndev = dev_get_drvdata(dev);
4393         struct stmmac_priv *priv = netdev_priv(ndev);
4394
4395         if (!ndev || !netif_running(ndev))
4396                 return 0;
4397
4398         if (ndev->phydev)
4399                 phy_stop(ndev->phydev);
4400
4401         mutex_lock(&priv->lock);
4402
4403         netif_device_detach(ndev);
4404         stmmac_stop_all_queues(priv);
4405
4406         stmmac_disable_all_queues(priv);
4407
4408         /* Stop TX/RX DMA */
4409         stmmac_stop_all_dma(priv);
4410
4411         /* Enable Power down mode by programming the PMT regs */
4412         if (device_may_wakeup(priv->device)) {
4413                 stmmac_pmt(priv, priv->hw, priv->wolopts);
4414                 priv->irq_wake = 1;
4415         } else {
4416                 stmmac_mac_set(priv, priv->ioaddr, false);
4417                 pinctrl_pm_select_sleep_state(priv->device);
4418                 /* Disable clock in case of PWM is off */
4419                 clk_disable(priv->plat->pclk);
4420                 clk_disable(priv->plat->stmmac_clk);
4421         }
4422         mutex_unlock(&priv->lock);
4423
4424         priv->oldlink = false;
4425         priv->speed = SPEED_UNKNOWN;
4426         priv->oldduplex = DUPLEX_UNKNOWN;
4427         return 0;
4428 }
4429 EXPORT_SYMBOL_GPL(stmmac_suspend);
4430
4431 /**
4432  * stmmac_reset_queues_param - reset queue parameters
4433  * @dev: device pointer
4434  */
4435 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4436 {
4437         u32 rx_cnt = priv->plat->rx_queues_to_use;
4438         u32 tx_cnt = priv->plat->tx_queues_to_use;
4439         u32 queue;
4440
4441         for (queue = 0; queue < rx_cnt; queue++) {
4442                 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4443
4444                 rx_q->cur_rx = 0;
4445                 rx_q->dirty_rx = 0;
4446         }
4447
4448         for (queue = 0; queue < tx_cnt; queue++) {
4449                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4450
4451                 tx_q->cur_tx = 0;
4452                 tx_q->dirty_tx = 0;
4453                 tx_q->mss = 0;
4454         }
4455 }
4456
4457 /**
4458  * stmmac_resume - resume callback
4459  * @dev: device pointer
4460  * Description: when resume this function is invoked to setup the DMA and CORE
4461  * in a usable state.
4462  */
4463 int stmmac_resume(struct device *dev)
4464 {
4465         struct net_device *ndev = dev_get_drvdata(dev);
4466         struct stmmac_priv *priv = netdev_priv(ndev);
4467
4468         if (!netif_running(ndev))
4469                 return 0;
4470
4471         /* Power Down bit, into the PM register, is cleared
4472          * automatically as soon as a magic packet or a Wake-up frame
4473          * is received. Anyway, it's better to manually clear
4474          * this bit because it can generate problems while resuming
4475          * from another devices (e.g. serial console).
4476          */
4477         if (device_may_wakeup(priv->device)) {
4478                 mutex_lock(&priv->lock);
4479                 stmmac_pmt(priv, priv->hw, 0);
4480                 mutex_unlock(&priv->lock);
4481                 priv->irq_wake = 0;
4482         } else {
4483                 pinctrl_pm_select_default_state(priv->device);
4484                 /* enable the clk previously disabled */
4485                 clk_enable(priv->plat->stmmac_clk);
4486                 clk_enable(priv->plat->pclk);
4487                 /* reset the phy so that it's ready */
4488                 if (priv->mii)
4489                         stmmac_mdio_reset(priv->mii);
4490         }
4491
4492         netif_device_attach(ndev);
4493
4494         mutex_lock(&priv->lock);
4495
4496         stmmac_reset_queues_param(priv);
4497
4498         stmmac_clear_descriptors(priv);
4499
4500         stmmac_hw_setup(ndev, false);
4501         stmmac_init_tx_coalesce(priv);
4502         stmmac_set_rx_mode(ndev);
4503
4504         stmmac_enable_all_queues(priv);
4505
4506         stmmac_start_all_queues(priv);
4507
4508         mutex_unlock(&priv->lock);
4509
4510         if (ndev->phydev)
4511                 phy_start(ndev->phydev);
4512
4513         return 0;
4514 }
4515 EXPORT_SYMBOL_GPL(stmmac_resume);
4516
4517 #ifndef MODULE
4518 static int __init stmmac_cmdline_opt(char *str)
4519 {
4520         char *opt;
4521
4522         if (!str || !*str)
4523                 return -EINVAL;
4524         while ((opt = strsep(&str, ",")) != NULL) {
4525                 if (!strncmp(opt, "debug:", 6)) {
4526                         if (kstrtoint(opt + 6, 0, &debug))
4527                                 goto err;
4528                 } else if (!strncmp(opt, "phyaddr:", 8)) {
4529                         if (kstrtoint(opt + 8, 0, &phyaddr))
4530                                 goto err;
4531                 } else if (!strncmp(opt, "buf_sz:", 7)) {
4532                         if (kstrtoint(opt + 7, 0, &buf_sz))
4533                                 goto err;
4534                 } else if (!strncmp(opt, "tc:", 3)) {
4535                         if (kstrtoint(opt + 3, 0, &tc))
4536                                 goto err;
4537                 } else if (!strncmp(opt, "watchdog:", 9)) {
4538                         if (kstrtoint(opt + 9, 0, &watchdog))
4539                                 goto err;
4540                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4541                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
4542                                 goto err;
4543                 } else if (!strncmp(opt, "pause:", 6)) {
4544                         if (kstrtoint(opt + 6, 0, &pause))
4545                                 goto err;
4546                 } else if (!strncmp(opt, "eee_timer:", 10)) {
4547                         if (kstrtoint(opt + 10, 0, &eee_timer))
4548                                 goto err;
4549                 } else if (!strncmp(opt, "chain_mode:", 11)) {
4550                         if (kstrtoint(opt + 11, 0, &chain_mode))
4551                                 goto err;
4552                 }
4553         }
4554         return 0;
4555
4556 err:
4557         pr_err("%s: ERROR broken module parameter conversion", __func__);
4558         return -EINVAL;
4559 }
4560
4561 __setup("stmmaceth=", stmmac_cmdline_opt);
4562 #endif /* MODULE */
4563
4564 static int __init stmmac_init(void)
4565 {
4566 #ifdef CONFIG_DEBUG_FS
4567         /* Create debugfs main directory if it doesn't exist yet */
4568         if (!stmmac_fs_dir) {
4569                 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4570
4571                 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4572                         pr_err("ERROR %s, debugfs create directory failed\n",
4573                                STMMAC_RESOURCE_NAME);
4574
4575                         return -ENOMEM;
4576                 }
4577         }
4578 #endif
4579
4580         return 0;
4581 }
4582
4583 static void __exit stmmac_exit(void)
4584 {
4585 #ifdef CONFIG_DEBUG_FS
4586         debugfs_remove_recursive(stmmac_fs_dir);
4587 #endif
4588 }
4589
4590 module_init(stmmac_init)
4591 module_exit(stmmac_exit)
4592
4593 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4594 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4595 MODULE_LICENSE("GPL");