net: ethernet: ti: cpsw: Fix inconsistent IS_ERR and PTR_ERR in cpsw_probe()
[linux-2.6-microblaze.git] / drivers / net / ethernet / ti / cpsw.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments Ethernet Switch Driver
4  *
5  * Copyright (C) 2012 Texas Instruments
6  *
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/clk.h>
12 #include <linux/timer.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/irqreturn.h>
16 #include <linux/interrupt.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/netdevice.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/phy.h>
22 #include <linux/phy/phy.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/gpio/consumer.h>
27 #include <linux/of.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/of_device.h>
31 #include <linux/if_vlan.h>
32 #include <linux/kmemleak.h>
33 #include <linux/sys_soc.h>
34
35 #include <linux/pinctrl/consumer.h>
36 #include <net/pkt_cls.h>
37
38 #include "cpsw.h"
39 #include "cpsw_ale.h"
40 #include "cpsw_priv.h"
41 #include "cpsw_sl.h"
42 #include "cpts.h"
43 #include "davinci_cpdma.h"
44
45 #include <net/pkt_sched.h>
46
47 static int debug_level;
48 module_param(debug_level, int, 0);
49 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
50
51 static int ale_ageout = 10;
52 module_param(ale_ageout, int, 0);
53 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
54
55 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
56 module_param(rx_packet_max, int, 0);
57 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
58
59 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
60 module_param(descs_pool_size, int, 0444);
61 MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
62
63 #define for_each_slave(priv, func, arg...)                              \
64         do {                                                            \
65                 struct cpsw_slave *slave;                               \
66                 struct cpsw_common *cpsw = (priv)->cpsw;                \
67                 int n;                                                  \
68                 if (cpsw->data.dual_emac)                               \
69                         (func)((cpsw)->slaves + priv->emac_port, ##arg);\
70                 else                                                    \
71                         for (n = cpsw->data.slaves,                     \
72                                         slave = cpsw->slaves;           \
73                                         n; n--)                         \
74                                 (func)(slave++, ##arg);                 \
75         } while (0)
76
77 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
78                                     __be16 proto, u16 vid);
79
80 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
81 {
82         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
83         struct cpsw_ale *ale = cpsw->ale;
84         int i;
85
86         if (cpsw->data.dual_emac) {
87                 bool flag = false;
88
89                 /* Enabling promiscuous mode for one interface will be
90                  * common for both the interface as the interface shares
91                  * the same hardware resource.
92                  */
93                 for (i = 0; i < cpsw->data.slaves; i++)
94                         if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
95                                 flag = true;
96
97                 if (!enable && flag) {
98                         enable = true;
99                         dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
100                 }
101
102                 if (enable) {
103                         /* Enable Bypass */
104                         cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
105
106                         dev_dbg(&ndev->dev, "promiscuity enabled\n");
107                 } else {
108                         /* Disable Bypass */
109                         cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
110                         dev_dbg(&ndev->dev, "promiscuity disabled\n");
111                 }
112         } else {
113                 if (enable) {
114                         unsigned long timeout = jiffies + HZ;
115
116                         /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
117                         for (i = 0; i <= cpsw->data.slaves; i++) {
118                                 cpsw_ale_control_set(ale, i,
119                                                      ALE_PORT_NOLEARN, 1);
120                                 cpsw_ale_control_set(ale, i,
121                                                      ALE_PORT_NO_SA_UPDATE, 1);
122                         }
123
124                         /* Clear All Untouched entries */
125                         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
126                         do {
127                                 cpu_relax();
128                                 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
129                                         break;
130                         } while (time_after(timeout, jiffies));
131                         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
132
133                         /* Clear all mcast from ALE */
134                         cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
135                         __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
136
137                         /* Flood All Unicast Packets to Host port */
138                         cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
139                         dev_dbg(&ndev->dev, "promiscuity enabled\n");
140                 } else {
141                         /* Don't Flood All Unicast Packets to Host port */
142                         cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
143
144                         /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
145                         for (i = 0; i <= cpsw->data.slaves; i++) {
146                                 cpsw_ale_control_set(ale, i,
147                                                      ALE_PORT_NOLEARN, 0);
148                                 cpsw_ale_control_set(ale, i,
149                                                      ALE_PORT_NO_SA_UPDATE, 0);
150                         }
151                         dev_dbg(&ndev->dev, "promiscuity disabled\n");
152                 }
153         }
154 }
155
156 /**
157  * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
158  * if it's not deleted
159  * @ndev: device to sync
160  * @addr: address to be added or deleted
161  * @vid: vlan id, if vid < 0 set/unset address for real device
162  * @add: add address if the flag is set or remove otherwise
163  */
164 static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
165                        int vid, int add)
166 {
167         struct cpsw_priv *priv = netdev_priv(ndev);
168         struct cpsw_common *cpsw = priv->cpsw;
169         int mask, flags, ret;
170
171         if (vid < 0) {
172                 if (cpsw->data.dual_emac)
173                         vid = cpsw->slaves[priv->emac_port].port_vlan;
174                 else
175                         vid = 0;
176         }
177
178         mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
179         flags = vid ? ALE_VLAN : 0;
180
181         if (add)
182                 ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
183         else
184                 ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
185
186         return ret;
187 }
188
189 static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
190 {
191         struct addr_sync_ctx *sync_ctx = ctx;
192         struct netdev_hw_addr *ha;
193         int found = 0, ret = 0;
194
195         if (!vdev || !(vdev->flags & IFF_UP))
196                 return 0;
197
198         /* vlan address is relevant if its sync_cnt != 0 */
199         netdev_for_each_mc_addr(ha, vdev) {
200                 if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
201                         found = ha->sync_cnt;
202                         break;
203                 }
204         }
205
206         if (found)
207                 sync_ctx->consumed++;
208
209         if (sync_ctx->flush) {
210                 if (!found)
211                         cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
212                 return 0;
213         }
214
215         if (found)
216                 ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
217
218         return ret;
219 }
220
221 static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
222 {
223         struct addr_sync_ctx sync_ctx;
224         int ret;
225
226         sync_ctx.consumed = 0;
227         sync_ctx.addr = addr;
228         sync_ctx.ndev = ndev;
229         sync_ctx.flush = 0;
230
231         ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
232         if (sync_ctx.consumed < num && !ret)
233                 ret = cpsw_set_mc(ndev, addr, -1, 1);
234
235         return ret;
236 }
237
238 static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
239 {
240         struct addr_sync_ctx sync_ctx;
241
242         sync_ctx.consumed = 0;
243         sync_ctx.addr = addr;
244         sync_ctx.ndev = ndev;
245         sync_ctx.flush = 1;
246
247         vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
248         if (sync_ctx.consumed == num)
249                 cpsw_set_mc(ndev, addr, -1, 0);
250
251         return 0;
252 }
253
254 static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
255 {
256         struct addr_sync_ctx *sync_ctx = ctx;
257         struct netdev_hw_addr *ha;
258         int found = 0;
259
260         if (!vdev || !(vdev->flags & IFF_UP))
261                 return 0;
262
263         /* vlan address is relevant if its sync_cnt != 0 */
264         netdev_for_each_mc_addr(ha, vdev) {
265                 if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
266                         found = ha->sync_cnt;
267                         break;
268                 }
269         }
270
271         if (!found)
272                 return 0;
273
274         sync_ctx->consumed++;
275         cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
276         return 0;
277 }
278
279 static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
280 {
281         struct addr_sync_ctx sync_ctx;
282
283         sync_ctx.addr = addr;
284         sync_ctx.ndev = ndev;
285         sync_ctx.consumed = 0;
286
287         vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
288         if (sync_ctx.consumed < num)
289                 cpsw_set_mc(ndev, addr, -1, 0);
290
291         return 0;
292 }
293
294 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
295 {
296         struct cpsw_priv *priv = netdev_priv(ndev);
297         struct cpsw_common *cpsw = priv->cpsw;
298         int slave_port = -1;
299
300         if (cpsw->data.dual_emac)
301                 slave_port = priv->emac_port + 1;
302
303         if (ndev->flags & IFF_PROMISC) {
304                 /* Enable promiscuous mode */
305                 cpsw_set_promiscious(ndev, true);
306                 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
307                 return;
308         } else {
309                 /* Disable promiscuous mode */
310                 cpsw_set_promiscious(ndev, false);
311         }
312
313         /* Restore allmulti on vlans if necessary */
314         cpsw_ale_set_allmulti(cpsw->ale,
315                               ndev->flags & IFF_ALLMULTI, slave_port);
316
317         /* add/remove mcast address either for real netdev or for vlan */
318         __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
319                                cpsw_del_mc_addr);
320 }
321
322 void cpsw_intr_enable(struct cpsw_common *cpsw)
323 {
324         writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
325         writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
326
327         cpdma_ctlr_int_ctrl(cpsw->dma, true);
328         return;
329 }
330
331 void cpsw_intr_disable(struct cpsw_common *cpsw)
332 {
333         writel_relaxed(0, &cpsw->wr_regs->tx_en);
334         writel_relaxed(0, &cpsw->wr_regs->rx_en);
335
336         cpdma_ctlr_int_ctrl(cpsw->dma, false);
337         return;
338 }
339
340 void cpsw_tx_handler(void *token, int len, int status)
341 {
342         struct netdev_queue     *txq;
343         struct sk_buff          *skb = token;
344         struct net_device       *ndev = skb->dev;
345         struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
346
347         /* Check whether the queue is stopped due to stalled tx dma, if the
348          * queue is stopped then start the queue as we have free desc for tx
349          */
350         txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
351         if (unlikely(netif_tx_queue_stopped(txq)))
352                 netif_tx_wake_queue(txq);
353
354         cpts_tx_timestamp(cpsw->cpts, skb);
355         ndev->stats.tx_packets++;
356         ndev->stats.tx_bytes += len;
357         dev_kfree_skb_any(skb);
358 }
359
360 static void cpsw_rx_vlan_encap(struct sk_buff *skb)
361 {
362         struct cpsw_priv *priv = netdev_priv(skb->dev);
363         struct cpsw_common *cpsw = priv->cpsw;
364         u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
365         u16 vtag, vid, prio, pkt_type;
366
367         /* Remove VLAN header encapsulation word */
368         skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
369
370         pkt_type = (rx_vlan_encap_hdr >>
371                     CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
372                     CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
373         /* Ignore unknown & Priority-tagged packets*/
374         if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
375             pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
376                 return;
377
378         vid = (rx_vlan_encap_hdr >>
379                CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
380                VLAN_VID_MASK;
381         /* Ignore vid 0 and pass packet as is */
382         if (!vid)
383                 return;
384         /* Ignore default vlans in dual mac mode */
385         if (cpsw->data.dual_emac &&
386             vid == cpsw->slaves[priv->emac_port].port_vlan)
387                 return;
388
389         prio = (rx_vlan_encap_hdr >>
390                 CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
391                 CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
392
393         vtag = (prio << VLAN_PRIO_SHIFT) | vid;
394         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
395
396         /* strip vlan tag for VLAN-tagged packet */
397         if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
398                 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
399                 skb_pull(skb, VLAN_HLEN);
400         }
401 }
402
403 static void cpsw_rx_handler(void *token, int len, int status)
404 {
405         struct cpdma_chan       *ch;
406         struct sk_buff          *skb = token;
407         struct sk_buff          *new_skb;
408         struct net_device       *ndev = skb->dev;
409         int                     ret = 0, port;
410         struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
411         struct cpsw_priv        *priv;
412
413         if (cpsw->data.dual_emac) {
414                 port = CPDMA_RX_SOURCE_PORT(status);
415                 if (port) {
416                         ndev = cpsw->slaves[--port].ndev;
417                         skb->dev = ndev;
418                 }
419         }
420
421         if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
422                 /* In dual emac mode check for all interfaces */
423                 if (cpsw->data.dual_emac && cpsw->usage_count &&
424                     (status >= 0)) {
425                         /* The packet received is for the interface which
426                          * is already down and the other interface is up
427                          * and running, instead of freeing which results
428                          * in reducing of the number of rx descriptor in
429                          * DMA engine, requeue skb back to cpdma.
430                          */
431                         new_skb = skb;
432                         goto requeue;
433                 }
434
435                 /* the interface is going down, skbs are purged */
436                 dev_kfree_skb_any(skb);
437                 return;
438         }
439
440         new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
441         if (new_skb) {
442                 skb_copy_queue_mapping(new_skb, skb);
443                 skb_put(skb, len);
444                 if (status & CPDMA_RX_VLAN_ENCAP)
445                         cpsw_rx_vlan_encap(skb);
446                 priv = netdev_priv(ndev);
447                 if (priv->rx_ts_enabled)
448                         cpts_rx_timestamp(cpsw->cpts, skb);
449                 skb->protocol = eth_type_trans(skb, ndev);
450                 netif_receive_skb(skb);
451                 ndev->stats.rx_bytes += len;
452                 ndev->stats.rx_packets++;
453                 kmemleak_not_leak(new_skb);
454         } else {
455                 ndev->stats.rx_dropped++;
456                 new_skb = skb;
457         }
458
459 requeue:
460         if (netif_dormant(ndev)) {
461                 dev_kfree_skb_any(new_skb);
462                 return;
463         }
464
465         ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
466         ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
467                                 skb_tailroom(new_skb), 0);
468         if (WARN_ON(ret < 0))
469                 dev_kfree_skb_any(new_skb);
470 }
471
472 void cpsw_split_res(struct cpsw_common *cpsw)
473 {
474         u32 consumed_rate = 0, bigest_rate = 0;
475         struct cpsw_vector *txv = cpsw->txv;
476         int i, ch_weight, rlim_ch_num = 0;
477         int budget, bigest_rate_ch = 0;
478         u32 ch_rate, max_rate;
479         int ch_budget = 0;
480
481         for (i = 0; i < cpsw->tx_ch_num; i++) {
482                 ch_rate = cpdma_chan_get_rate(txv[i].ch);
483                 if (!ch_rate)
484                         continue;
485
486                 rlim_ch_num++;
487                 consumed_rate += ch_rate;
488         }
489
490         if (cpsw->tx_ch_num == rlim_ch_num) {
491                 max_rate = consumed_rate;
492         } else if (!rlim_ch_num) {
493                 ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
494                 bigest_rate = 0;
495                 max_rate = consumed_rate;
496         } else {
497                 max_rate = cpsw->speed * 1000;
498
499                 /* if max_rate is less then expected due to reduced link speed,
500                  * split proportionally according next potential max speed
501                  */
502                 if (max_rate < consumed_rate)
503                         max_rate *= 10;
504
505                 if (max_rate < consumed_rate)
506                         max_rate *= 10;
507
508                 ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
509                 ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
510                             (cpsw->tx_ch_num - rlim_ch_num);
511                 bigest_rate = (max_rate - consumed_rate) /
512                               (cpsw->tx_ch_num - rlim_ch_num);
513         }
514
515         /* split tx weight/budget */
516         budget = CPSW_POLL_WEIGHT;
517         for (i = 0; i < cpsw->tx_ch_num; i++) {
518                 ch_rate = cpdma_chan_get_rate(txv[i].ch);
519                 if (ch_rate) {
520                         txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
521                         if (!txv[i].budget)
522                                 txv[i].budget++;
523                         if (ch_rate > bigest_rate) {
524                                 bigest_rate_ch = i;
525                                 bigest_rate = ch_rate;
526                         }
527
528                         ch_weight = (ch_rate * 100) / max_rate;
529                         if (!ch_weight)
530                                 ch_weight++;
531                         cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
532                 } else {
533                         txv[i].budget = ch_budget;
534                         if (!bigest_rate_ch)
535                                 bigest_rate_ch = i;
536                         cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
537                 }
538
539                 budget -= txv[i].budget;
540         }
541
542         if (budget)
543                 txv[bigest_rate_ch].budget += budget;
544
545         /* split rx budget */
546         budget = CPSW_POLL_WEIGHT;
547         ch_budget = budget / cpsw->rx_ch_num;
548         for (i = 0; i < cpsw->rx_ch_num; i++) {
549                 cpsw->rxv[i].budget = ch_budget;
550                 budget -= ch_budget;
551         }
552
553         if (budget)
554                 cpsw->rxv[0].budget += budget;
555 }
556
557 static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
558 {
559         struct cpsw_common *cpsw = dev_id;
560
561         writel(0, &cpsw->wr_regs->tx_en);
562         cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
563
564         if (cpsw->quirk_irq) {
565                 disable_irq_nosync(cpsw->irqs_table[1]);
566                 cpsw->tx_irq_disabled = true;
567         }
568
569         napi_schedule(&cpsw->napi_tx);
570         return IRQ_HANDLED;
571 }
572
573 static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
574 {
575         struct cpsw_common *cpsw = dev_id;
576
577         cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
578         writel(0, &cpsw->wr_regs->rx_en);
579
580         if (cpsw->quirk_irq) {
581                 disable_irq_nosync(cpsw->irqs_table[0]);
582                 cpsw->rx_irq_disabled = true;
583         }
584
585         napi_schedule(&cpsw->napi_rx);
586         return IRQ_HANDLED;
587 }
588
589 static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
590 {
591         u32                     ch_map;
592         int                     num_tx, cur_budget, ch;
593         struct cpsw_common      *cpsw = napi_to_cpsw(napi_tx);
594         struct cpsw_vector      *txv;
595
596         /* process every unprocessed channel */
597         ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
598         for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
599                 if (!(ch_map & 0x80))
600                         continue;
601
602                 txv = &cpsw->txv[ch];
603                 if (unlikely(txv->budget > budget - num_tx))
604                         cur_budget = budget - num_tx;
605                 else
606                         cur_budget = txv->budget;
607
608                 num_tx += cpdma_chan_process(txv->ch, cur_budget);
609                 if (num_tx >= budget)
610                         break;
611         }
612
613         if (num_tx < budget) {
614                 napi_complete(napi_tx);
615                 writel(0xff, &cpsw->wr_regs->tx_en);
616         }
617
618         return num_tx;
619 }
620
621 static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
622 {
623         struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
624         int num_tx;
625
626         num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
627         if (num_tx < budget) {
628                 napi_complete(napi_tx);
629                 writel(0xff, &cpsw->wr_regs->tx_en);
630                 if (cpsw->tx_irq_disabled) {
631                         cpsw->tx_irq_disabled = false;
632                         enable_irq(cpsw->irqs_table[1]);
633                 }
634         }
635
636         return num_tx;
637 }
638
639 static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
640 {
641         u32                     ch_map;
642         int                     num_rx, cur_budget, ch;
643         struct cpsw_common      *cpsw = napi_to_cpsw(napi_rx);
644         struct cpsw_vector      *rxv;
645
646         /* process every unprocessed channel */
647         ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
648         for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
649                 if (!(ch_map & 0x01))
650                         continue;
651
652                 rxv = &cpsw->rxv[ch];
653                 if (unlikely(rxv->budget > budget - num_rx))
654                         cur_budget = budget - num_rx;
655                 else
656                         cur_budget = rxv->budget;
657
658                 num_rx += cpdma_chan_process(rxv->ch, cur_budget);
659                 if (num_rx >= budget)
660                         break;
661         }
662
663         if (num_rx < budget) {
664                 napi_complete_done(napi_rx, num_rx);
665                 writel(0xff, &cpsw->wr_regs->rx_en);
666         }
667
668         return num_rx;
669 }
670
671 static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
672 {
673         struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
674         int num_rx;
675
676         num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
677         if (num_rx < budget) {
678                 napi_complete_done(napi_rx, num_rx);
679                 writel(0xff, &cpsw->wr_regs->rx_en);
680                 if (cpsw->rx_irq_disabled) {
681                         cpsw->rx_irq_disabled = false;
682                         enable_irq(cpsw->irqs_table[0]);
683                 }
684         }
685
686         return num_rx;
687 }
688
689 static inline void soft_reset(const char *module, void __iomem *reg)
690 {
691         unsigned long timeout = jiffies + HZ;
692
693         writel_relaxed(1, reg);
694         do {
695                 cpu_relax();
696         } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
697
698         WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
699 }
700
701 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
702                                struct cpsw_priv *priv)
703 {
704         slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
705         slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
706 }
707
708 static bool cpsw_shp_is_off(struct cpsw_priv *priv)
709 {
710         struct cpsw_common *cpsw = priv->cpsw;
711         struct cpsw_slave *slave;
712         u32 shift, mask, val;
713
714         val = readl_relaxed(&cpsw->regs->ptype);
715
716         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
717         shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
718         mask = 7 << shift;
719         val = val & mask;
720
721         return !val;
722 }
723
724 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
725 {
726         struct cpsw_common *cpsw = priv->cpsw;
727         struct cpsw_slave *slave;
728         u32 shift, mask, val;
729
730         val = readl_relaxed(&cpsw->regs->ptype);
731
732         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
733         shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
734         mask = (1 << --fifo) << shift;
735         val = on ? val | mask : val & ~mask;
736
737         writel_relaxed(val, &cpsw->regs->ptype);
738 }
739
740 static void _cpsw_adjust_link(struct cpsw_slave *slave,
741                               struct cpsw_priv *priv, bool *link)
742 {
743         struct phy_device       *phy = slave->phy;
744         u32                     mac_control = 0;
745         u32                     slave_port;
746         struct cpsw_common *cpsw = priv->cpsw;
747
748         if (!phy)
749                 return;
750
751         slave_port = cpsw_get_slave_port(slave->slave_num);
752
753         if (phy->link) {
754                 mac_control = CPSW_SL_CTL_GMII_EN;
755
756                 if (phy->speed == 1000)
757                         mac_control |= CPSW_SL_CTL_GIG;
758                 if (phy->duplex)
759                         mac_control |= CPSW_SL_CTL_FULLDUPLEX;
760
761                 /* set speed_in input in case RMII mode is used in 100Mbps */
762                 if (phy->speed == 100)
763                         mac_control |= CPSW_SL_CTL_IFCTL_A;
764                 /* in band mode only works in 10Mbps RGMII mode */
765                 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
766                         mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
767
768                 if (priv->rx_pause)
769                         mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
770
771                 if (priv->tx_pause)
772                         mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
773
774                 if (mac_control != slave->mac_control)
775                         cpsw_sl_ctl_set(slave->mac_sl, mac_control);
776
777                 /* enable forwarding */
778                 cpsw_ale_control_set(cpsw->ale, slave_port,
779                                      ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
780
781                 *link = true;
782
783                 if (priv->shp_cfg_speed &&
784                     priv->shp_cfg_speed != slave->phy->speed &&
785                     !cpsw_shp_is_off(priv))
786                         dev_warn(priv->dev,
787                                  "Speed was changed, CBS shaper speeds are changed!");
788         } else {
789                 mac_control = 0;
790                 /* disable forwarding */
791                 cpsw_ale_control_set(cpsw->ale, slave_port,
792                                      ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
793
794                 cpsw_sl_wait_for_idle(slave->mac_sl, 100);
795
796                 cpsw_sl_ctl_reset(slave->mac_sl);
797         }
798
799         if (mac_control != slave->mac_control)
800                 phy_print_status(phy);
801
802         slave->mac_control = mac_control;
803 }
804
805 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
806 {
807         int i, speed;
808
809         for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
810                 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
811                         speed += cpsw->slaves[i].phy->speed;
812
813         return speed;
814 }
815
816 static int cpsw_need_resplit(struct cpsw_common *cpsw)
817 {
818         int i, rlim_ch_num;
819         int speed, ch_rate;
820
821         /* re-split resources only in case speed was changed */
822         speed = cpsw_get_common_speed(cpsw);
823         if (speed == cpsw->speed || !speed)
824                 return 0;
825
826         cpsw->speed = speed;
827
828         for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
829                 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
830                 if (!ch_rate)
831                         break;
832
833                 rlim_ch_num++;
834         }
835
836         /* cases not dependent on speed */
837         if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
838                 return 0;
839
840         return 1;
841 }
842
843 static void cpsw_adjust_link(struct net_device *ndev)
844 {
845         struct cpsw_priv        *priv = netdev_priv(ndev);
846         struct cpsw_common      *cpsw = priv->cpsw;
847         bool                    link = false;
848
849         for_each_slave(priv, _cpsw_adjust_link, priv, &link);
850
851         if (link) {
852                 if (cpsw_need_resplit(cpsw))
853                         cpsw_split_res(cpsw);
854
855                 netif_carrier_on(ndev);
856                 if (netif_running(ndev))
857                         netif_tx_wake_all_queues(ndev);
858         } else {
859                 netif_carrier_off(ndev);
860                 netif_tx_stop_all_queues(ndev);
861         }
862 }
863
864 static inline void cpsw_add_dual_emac_def_ale_entries(
865                 struct cpsw_priv *priv, struct cpsw_slave *slave,
866                 u32 slave_port)
867 {
868         struct cpsw_common *cpsw = priv->cpsw;
869         u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
870
871         if (cpsw->version == CPSW_VERSION_1)
872                 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
873         else
874                 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
875         cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
876                           port_mask, port_mask, 0);
877         cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
878                            ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
879         cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
880                            HOST_PORT_NUM, ALE_VLAN |
881                            ALE_SECURE, slave->port_vlan);
882         cpsw_ale_control_set(cpsw->ale, slave_port,
883                              ALE_PORT_DROP_UNKNOWN_VLAN, 1);
884 }
885
886 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
887 {
888         u32 slave_port;
889         struct phy_device *phy;
890         struct cpsw_common *cpsw = priv->cpsw;
891
892         cpsw_sl_reset(slave->mac_sl, 100);
893         cpsw_sl_ctl_reset(slave->mac_sl);
894
895         /* setup priority mapping */
896         cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
897                           RX_PRIORITY_MAPPING);
898
899         switch (cpsw->version) {
900         case CPSW_VERSION_1:
901                 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
902                 /* Increase RX FIFO size to 5 for supporting fullduplex
903                  * flow control mode
904                  */
905                 slave_write(slave,
906                             (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
907                             CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
908                 break;
909         case CPSW_VERSION_2:
910         case CPSW_VERSION_3:
911         case CPSW_VERSION_4:
912                 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
913                 /* Increase RX FIFO size to 5 for supporting fullduplex
914                  * flow control mode
915                  */
916                 slave_write(slave,
917                             (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
918                             CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
919                 break;
920         }
921
922         /* setup max packet size, and mac address */
923         cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
924                           cpsw->rx_packet_max);
925         cpsw_set_slave_mac(slave, priv);
926
927         slave->mac_control = 0; /* no link yet */
928
929         slave_port = cpsw_get_slave_port(slave->slave_num);
930
931         if (cpsw->data.dual_emac)
932                 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
933         else
934                 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
935                                    1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
936
937         if (slave->data->phy_node) {
938                 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
939                                  &cpsw_adjust_link, 0, slave->data->phy_if);
940                 if (!phy) {
941                         dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
942                                 slave->data->phy_node,
943                                 slave->slave_num);
944                         return;
945                 }
946         } else {
947                 phy = phy_connect(priv->ndev, slave->data->phy_id,
948                                  &cpsw_adjust_link, slave->data->phy_if);
949                 if (IS_ERR(phy)) {
950                         dev_err(priv->dev,
951                                 "phy \"%s\" not found on slave %d, err %ld\n",
952                                 slave->data->phy_id, slave->slave_num,
953                                 PTR_ERR(phy));
954                         return;
955                 }
956         }
957
958         slave->phy = phy;
959
960         phy_attached_info(slave->phy);
961
962         phy_start(slave->phy);
963
964         /* Configure GMII_SEL register */
965         if (!IS_ERR(slave->data->ifphy))
966                 phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
967                                  slave->data->phy_if);
968         else
969                 cpsw_phy_sel(cpsw->dev, slave->phy->interface,
970                              slave->slave_num);
971 }
972
973 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
974 {
975         struct cpsw_common *cpsw = priv->cpsw;
976         const int vlan = cpsw->data.default_vlan;
977         u32 reg;
978         int i;
979         int unreg_mcast_mask;
980
981         reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
982                CPSW2_PORT_VLAN;
983
984         writel(vlan, &cpsw->host_port_regs->port_vlan);
985
986         for (i = 0; i < cpsw->data.slaves; i++)
987                 slave_write(cpsw->slaves + i, vlan, reg);
988
989         if (priv->ndev->flags & IFF_ALLMULTI)
990                 unreg_mcast_mask = ALE_ALL_PORTS;
991         else
992                 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
993
994         cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
995                           ALE_ALL_PORTS, ALE_ALL_PORTS,
996                           unreg_mcast_mask);
997 }
998
999 static void cpsw_init_host_port(struct cpsw_priv *priv)
1000 {
1001         u32 fifo_mode;
1002         u32 control_reg;
1003         struct cpsw_common *cpsw = priv->cpsw;
1004
1005         /* soft reset the controller and initialize ale */
1006         soft_reset("cpsw", &cpsw->regs->soft_reset);
1007         cpsw_ale_start(cpsw->ale);
1008
1009         /* switch to vlan unaware mode */
1010         cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
1011                              CPSW_ALE_VLAN_AWARE);
1012         control_reg = readl(&cpsw->regs->control);
1013         control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
1014         writel(control_reg, &cpsw->regs->control);
1015         fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1016                      CPSW_FIFO_NORMAL_MODE;
1017         writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
1018
1019         /* setup host port priority mapping */
1020         writel_relaxed(CPDMA_TX_PRIORITY_MAP,
1021                        &cpsw->host_port_regs->cpdma_tx_pri_map);
1022         writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
1023
1024         cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
1025                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1026
1027         if (!cpsw->data.dual_emac) {
1028                 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1029                                    0, 0);
1030                 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1031                                    ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
1032         }
1033 }
1034
1035 int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1036 {
1037         struct cpsw_common *cpsw = priv->cpsw;
1038         struct sk_buff *skb;
1039         int ch_buf_num;
1040         int ch, i, ret;
1041
1042         for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1043                 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1044                 for (i = 0; i < ch_buf_num; i++) {
1045                         skb = __netdev_alloc_skb_ip_align(priv->ndev,
1046                                                           cpsw->rx_packet_max,
1047                                                           GFP_KERNEL);
1048                         if (!skb) {
1049                                 cpsw_err(priv, ifup, "cannot allocate skb\n");
1050                                 return -ENOMEM;
1051                         }
1052
1053                         skb_set_queue_mapping(skb, ch);
1054                         ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
1055                                                 skb->data, skb_tailroom(skb),
1056                                                 0);
1057                         if (ret < 0) {
1058                                 cpsw_err(priv, ifup,
1059                                          "cannot submit skb to channel %d rx, error %d\n",
1060                                          ch, ret);
1061                                 kfree_skb(skb);
1062                                 return ret;
1063                         }
1064                         kmemleak_not_leak(skb);
1065                 }
1066
1067                 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1068                           ch, ch_buf_num);
1069         }
1070
1071         return 0;
1072 }
1073
1074 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
1075 {
1076         u32 slave_port;
1077
1078         slave_port = cpsw_get_slave_port(slave->slave_num);
1079
1080         if (!slave->phy)
1081                 return;
1082         phy_stop(slave->phy);
1083         phy_disconnect(slave->phy);
1084         slave->phy = NULL;
1085         cpsw_ale_control_set(cpsw->ale, slave_port,
1086                              ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1087         cpsw_sl_reset(slave->mac_sl, 100);
1088         cpsw_sl_ctl_reset(slave->mac_sl);
1089 }
1090
1091 static int cpsw_tc_to_fifo(int tc, int num_tc)
1092 {
1093         if (tc == num_tc - 1)
1094                 return 0;
1095
1096         return CPSW_FIFO_SHAPERS_NUM - tc;
1097 }
1098
1099 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
1100 {
1101         struct cpsw_common *cpsw = priv->cpsw;
1102         u32 val = 0, send_pct, shift;
1103         struct cpsw_slave *slave;
1104         int pct = 0, i;
1105
1106         if (bw > priv->shp_cfg_speed * 1000)
1107                 goto err;
1108
1109         /* shaping has to stay enabled for highest fifos linearly
1110          * and fifo bw no more then interface can allow
1111          */
1112         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1113         send_pct = slave_read(slave, SEND_PERCENT);
1114         for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
1115                 if (!bw) {
1116                         if (i >= fifo || !priv->fifo_bw[i])
1117                                 continue;
1118
1119                         dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
1120                         continue;
1121                 }
1122
1123                 if (!priv->fifo_bw[i] && i > fifo) {
1124                         dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
1125                         return -EINVAL;
1126                 }
1127
1128                 shift = (i - 1) * 8;
1129                 if (i == fifo) {
1130                         send_pct &= ~(CPSW_PCT_MASK << shift);
1131                         val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
1132                         if (!val)
1133                                 val = 1;
1134
1135                         send_pct |= val << shift;
1136                         pct += val;
1137                         continue;
1138                 }
1139
1140                 if (priv->fifo_bw[i])
1141                         pct += (send_pct >> shift) & CPSW_PCT_MASK;
1142         }
1143
1144         if (pct >= 100)
1145                 goto err;
1146
1147         slave_write(slave, send_pct, SEND_PERCENT);
1148         priv->fifo_bw[fifo] = bw;
1149
1150         dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
1151                  DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
1152
1153         return 0;
1154 err:
1155         dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
1156         return -EINVAL;
1157 }
1158
1159 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
1160 {
1161         struct cpsw_common *cpsw = priv->cpsw;
1162         struct cpsw_slave *slave;
1163         u32 tx_in_ctl_rg, val;
1164         int ret;
1165
1166         ret = cpsw_set_fifo_bw(priv, fifo, bw);
1167         if (ret)
1168                 return ret;
1169
1170         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1171         tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
1172                        CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
1173
1174         if (!bw)
1175                 cpsw_fifo_shp_on(priv, fifo, bw);
1176
1177         val = slave_read(slave, tx_in_ctl_rg);
1178         if (cpsw_shp_is_off(priv)) {
1179                 /* disable FIFOs rate limited queues */
1180                 val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
1181
1182                 /* set type of FIFO queues to normal priority mode */
1183                 val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
1184
1185                 /* set type of FIFO queues to be rate limited */
1186                 if (bw)
1187                         val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
1188                 else
1189                         priv->shp_cfg_speed = 0;
1190         }
1191
1192         /* toggle a FIFO rate limited queue */
1193         if (bw)
1194                 val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1195         else
1196                 val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1197         slave_write(slave, val, tx_in_ctl_rg);
1198
1199         /* FIFO transmit shape enable */
1200         cpsw_fifo_shp_on(priv, fifo, bw);
1201         return 0;
1202 }
1203
1204 /* Defaults:
1205  * class A - prio 3
1206  * class B - prio 2
1207  * shaping for class A should be set first
1208  */
1209 static int cpsw_set_cbs(struct net_device *ndev,
1210                         struct tc_cbs_qopt_offload *qopt)
1211 {
1212         struct cpsw_priv *priv = netdev_priv(ndev);
1213         struct cpsw_common *cpsw = priv->cpsw;
1214         struct cpsw_slave *slave;
1215         int prev_speed = 0;
1216         int tc, ret, fifo;
1217         u32 bw = 0;
1218
1219         tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
1220
1221         /* enable channels in backward order, as highest FIFOs must be rate
1222          * limited first and for compliance with CPDMA rate limited channels
1223          * that also used in bacward order. FIFO0 cannot be rate limited.
1224          */
1225         fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
1226         if (!fifo) {
1227                 dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
1228                 return -EINVAL;
1229         }
1230
1231         /* do nothing, it's disabled anyway */
1232         if (!qopt->enable && !priv->fifo_bw[fifo])
1233                 return 0;
1234
1235         /* shapers can be set if link speed is known */
1236         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1237         if (slave->phy && slave->phy->link) {
1238                 if (priv->shp_cfg_speed &&
1239                     priv->shp_cfg_speed != slave->phy->speed)
1240                         prev_speed = priv->shp_cfg_speed;
1241
1242                 priv->shp_cfg_speed = slave->phy->speed;
1243         }
1244
1245         if (!priv->shp_cfg_speed) {
1246                 dev_err(priv->dev, "Link speed is not known");
1247                 return -1;
1248         }
1249
1250         ret = pm_runtime_get_sync(cpsw->dev);
1251         if (ret < 0) {
1252                 pm_runtime_put_noidle(cpsw->dev);
1253                 return ret;
1254         }
1255
1256         bw = qopt->enable ? qopt->idleslope : 0;
1257         ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
1258         if (ret) {
1259                 priv->shp_cfg_speed = prev_speed;
1260                 prev_speed = 0;
1261         }
1262
1263         if (bw && prev_speed)
1264                 dev_warn(priv->dev,
1265                          "Speed was changed, CBS shaper speeds are changed!");
1266
1267         pm_runtime_put_sync(cpsw->dev);
1268         return ret;
1269 }
1270
1271 static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1272 {
1273         int fifo, bw;
1274
1275         for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1276                 bw = priv->fifo_bw[fifo];
1277                 if (!bw)
1278                         continue;
1279
1280                 cpsw_set_fifo_rlimit(priv, fifo, bw);
1281         }
1282 }
1283
1284 static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1285 {
1286         struct cpsw_common *cpsw = priv->cpsw;
1287         u32 tx_prio_map = 0;
1288         int i, tc, fifo;
1289         u32 tx_prio_rg;
1290
1291         if (!priv->mqprio_hw)
1292                 return;
1293
1294         for (i = 0; i < 8; i++) {
1295                 tc = netdev_get_prio_tc_map(priv->ndev, i);
1296                 fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1297                 tx_prio_map |= fifo << (4 * i);
1298         }
1299
1300         tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1301                      CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1302
1303         slave_write(slave, tx_prio_map, tx_prio_rg);
1304 }
1305
1306 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
1307 {
1308         struct cpsw_priv *priv = arg;
1309
1310         if (!vdev)
1311                 return 0;
1312
1313         cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
1314         return 0;
1315 }
1316
1317 /* restore resources after port reset */
1318 static void cpsw_restore(struct cpsw_priv *priv)
1319 {
1320         /* restore vlan configurations */
1321         vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
1322
1323         /* restore MQPRIO offload */
1324         for_each_slave(priv, cpsw_mqprio_resume, priv);
1325
1326         /* restore CBS offload */
1327         for_each_slave(priv, cpsw_cbs_resume, priv);
1328 }
1329
1330 static int cpsw_ndo_open(struct net_device *ndev)
1331 {
1332         struct cpsw_priv *priv = netdev_priv(ndev);
1333         struct cpsw_common *cpsw = priv->cpsw;
1334         int ret;
1335         u32 reg;
1336
1337         ret = pm_runtime_get_sync(cpsw->dev);
1338         if (ret < 0) {
1339                 pm_runtime_put_noidle(cpsw->dev);
1340                 return ret;
1341         }
1342
1343         netif_carrier_off(ndev);
1344
1345         /* Notify the stack of the actual queue counts. */
1346         ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
1347         if (ret) {
1348                 dev_err(priv->dev, "cannot set real number of tx queues\n");
1349                 goto err_cleanup;
1350         }
1351
1352         ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
1353         if (ret) {
1354                 dev_err(priv->dev, "cannot set real number of rx queues\n");
1355                 goto err_cleanup;
1356         }
1357
1358         reg = cpsw->version;
1359
1360         dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
1361                  CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
1362                  CPSW_RTL_VERSION(reg));
1363
1364         /* Initialize host and slave ports */
1365         if (!cpsw->usage_count)
1366                 cpsw_init_host_port(priv);
1367         for_each_slave(priv, cpsw_slave_open, priv);
1368
1369         /* Add default VLAN */
1370         if (!cpsw->data.dual_emac)
1371                 cpsw_add_default_vlan(priv);
1372         else
1373                 cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
1374                                   ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
1375
1376         /* initialize shared resources for every ndev */
1377         if (!cpsw->usage_count) {
1378                 /* disable priority elevation */
1379                 writel_relaxed(0, &cpsw->regs->ptype);
1380
1381                 /* enable statistics collection only on all ports */
1382                 writel_relaxed(0x7, &cpsw->regs->stat_port_en);
1383
1384                 /* Enable internal fifo flow control */
1385                 writel(0x7, &cpsw->regs->flow_control);
1386
1387                 napi_enable(&cpsw->napi_rx);
1388                 napi_enable(&cpsw->napi_tx);
1389
1390                 if (cpsw->tx_irq_disabled) {
1391                         cpsw->tx_irq_disabled = false;
1392                         enable_irq(cpsw->irqs_table[1]);
1393                 }
1394
1395                 if (cpsw->rx_irq_disabled) {
1396                         cpsw->rx_irq_disabled = false;
1397                         enable_irq(cpsw->irqs_table[0]);
1398                 }
1399
1400                 ret = cpsw_fill_rx_channels(priv);
1401                 if (ret < 0)
1402                         goto err_cleanup;
1403
1404                 if (cpts_register(cpsw->cpts))
1405                         dev_err(priv->dev, "error registering cpts device\n");
1406
1407         }
1408
1409         cpsw_restore(priv);
1410
1411         /* Enable Interrupt pacing if configured */
1412         if (cpsw->coal_intvl != 0) {
1413                 struct ethtool_coalesce coal;
1414
1415                 coal.rx_coalesce_usecs = cpsw->coal_intvl;
1416                 cpsw_set_coalesce(ndev, &coal);
1417         }
1418
1419         cpdma_ctlr_start(cpsw->dma);
1420         cpsw_intr_enable(cpsw);
1421         cpsw->usage_count++;
1422
1423         return 0;
1424
1425 err_cleanup:
1426         cpdma_ctlr_stop(cpsw->dma);
1427         for_each_slave(priv, cpsw_slave_stop, cpsw);
1428         pm_runtime_put_sync(cpsw->dev);
1429         netif_carrier_off(priv->ndev);
1430         return ret;
1431 }
1432
1433 static int cpsw_ndo_stop(struct net_device *ndev)
1434 {
1435         struct cpsw_priv *priv = netdev_priv(ndev);
1436         struct cpsw_common *cpsw = priv->cpsw;
1437
1438         cpsw_info(priv, ifdown, "shutting down cpsw device\n");
1439         __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
1440         netif_tx_stop_all_queues(priv->ndev);
1441         netif_carrier_off(priv->ndev);
1442
1443         if (cpsw->usage_count <= 1) {
1444                 napi_disable(&cpsw->napi_rx);
1445                 napi_disable(&cpsw->napi_tx);
1446                 cpts_unregister(cpsw->cpts);
1447                 cpsw_intr_disable(cpsw);
1448                 cpdma_ctlr_stop(cpsw->dma);
1449                 cpsw_ale_stop(cpsw->ale);
1450         }
1451         for_each_slave(priv, cpsw_slave_stop, cpsw);
1452
1453         if (cpsw_need_resplit(cpsw))
1454                 cpsw_split_res(cpsw);
1455
1456         cpsw->usage_count--;
1457         pm_runtime_put_sync(cpsw->dev);
1458         return 0;
1459 }
1460
1461 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1462                                        struct net_device *ndev)
1463 {
1464         struct cpsw_priv *priv = netdev_priv(ndev);
1465         struct cpsw_common *cpsw = priv->cpsw;
1466         struct cpts *cpts = cpsw->cpts;
1467         struct netdev_queue *txq;
1468         struct cpdma_chan *txch;
1469         int ret, q_idx;
1470
1471         if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
1472                 cpsw_err(priv, tx_err, "packet pad failed\n");
1473                 ndev->stats.tx_dropped++;
1474                 return NET_XMIT_DROP;
1475         }
1476
1477         if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
1478             priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
1479                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1480
1481         q_idx = skb_get_queue_mapping(skb);
1482         if (q_idx >= cpsw->tx_ch_num)
1483                 q_idx = q_idx % cpsw->tx_ch_num;
1484
1485         txch = cpsw->txv[q_idx].ch;
1486         txq = netdev_get_tx_queue(ndev, q_idx);
1487         skb_tx_timestamp(skb);
1488         ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
1489                                 priv->emac_port + cpsw->data.dual_emac);
1490         if (unlikely(ret != 0)) {
1491                 cpsw_err(priv, tx_err, "desc submit failed\n");
1492                 goto fail;
1493         }
1494
1495         /* If there is no more tx desc left free then we need to
1496          * tell the kernel to stop sending us tx frames.
1497          */
1498         if (unlikely(!cpdma_check_free_tx_desc(txch))) {
1499                 netif_tx_stop_queue(txq);
1500
1501                 /* Barrier, so that stop_queue visible to other cpus */
1502                 smp_mb__after_atomic();
1503
1504                 if (cpdma_check_free_tx_desc(txch))
1505                         netif_tx_wake_queue(txq);
1506         }
1507
1508         return NETDEV_TX_OK;
1509 fail:
1510         ndev->stats.tx_dropped++;
1511         netif_tx_stop_queue(txq);
1512
1513         /* Barrier, so that stop_queue visible to other cpus */
1514         smp_mb__after_atomic();
1515
1516         if (cpdma_check_free_tx_desc(txch))
1517                 netif_tx_wake_queue(txq);
1518
1519         return NETDEV_TX_BUSY;
1520 }
1521
1522 #if IS_ENABLED(CONFIG_TI_CPTS)
1523
1524 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
1525 {
1526         struct cpsw_common *cpsw = priv->cpsw;
1527         struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
1528         u32 ts_en, seq_id;
1529
1530         if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
1531                 slave_write(slave, 0, CPSW1_TS_CTL);
1532                 return;
1533         }
1534
1535         seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
1536         ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
1537
1538         if (priv->tx_ts_enabled)
1539                 ts_en |= CPSW_V1_TS_TX_EN;
1540
1541         if (priv->rx_ts_enabled)
1542                 ts_en |= CPSW_V1_TS_RX_EN;
1543
1544         slave_write(slave, ts_en, CPSW1_TS_CTL);
1545         slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
1546 }
1547
1548 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
1549 {
1550         struct cpsw_slave *slave;
1551         struct cpsw_common *cpsw = priv->cpsw;
1552         u32 ctrl, mtype;
1553
1554         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1555
1556         ctrl = slave_read(slave, CPSW2_CONTROL);
1557         switch (cpsw->version) {
1558         case CPSW_VERSION_2:
1559                 ctrl &= ~CTRL_V2_ALL_TS_MASK;
1560
1561                 if (priv->tx_ts_enabled)
1562                         ctrl |= CTRL_V2_TX_TS_BITS;
1563
1564                 if (priv->rx_ts_enabled)
1565                         ctrl |= CTRL_V2_RX_TS_BITS;
1566                 break;
1567         case CPSW_VERSION_3:
1568         default:
1569                 ctrl &= ~CTRL_V3_ALL_TS_MASK;
1570
1571                 if (priv->tx_ts_enabled)
1572                         ctrl |= CTRL_V3_TX_TS_BITS;
1573
1574                 if (priv->rx_ts_enabled)
1575                         ctrl |= CTRL_V3_RX_TS_BITS;
1576                 break;
1577         }
1578
1579         mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
1580
1581         slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
1582         slave_write(slave, ctrl, CPSW2_CONTROL);
1583         writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
1584         writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
1585 }
1586
1587 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1588 {
1589         struct cpsw_priv *priv = netdev_priv(dev);
1590         struct hwtstamp_config cfg;
1591         struct cpsw_common *cpsw = priv->cpsw;
1592
1593         if (cpsw->version != CPSW_VERSION_1 &&
1594             cpsw->version != CPSW_VERSION_2 &&
1595             cpsw->version != CPSW_VERSION_3)
1596                 return -EOPNOTSUPP;
1597
1598         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1599                 return -EFAULT;
1600
1601         /* reserved for future extensions */
1602         if (cfg.flags)
1603                 return -EINVAL;
1604
1605         if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
1606                 return -ERANGE;
1607
1608         switch (cfg.rx_filter) {
1609         case HWTSTAMP_FILTER_NONE:
1610                 priv->rx_ts_enabled = 0;
1611                 break;
1612         case HWTSTAMP_FILTER_ALL:
1613         case HWTSTAMP_FILTER_NTP_ALL:
1614                 return -ERANGE;
1615         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1616         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1617         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1618                 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
1619                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
1620                 break;
1621         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1622         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1623         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1624         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1625         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1626         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1627         case HWTSTAMP_FILTER_PTP_V2_EVENT:
1628         case HWTSTAMP_FILTER_PTP_V2_SYNC:
1629         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1630                 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
1631                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1632                 break;
1633         default:
1634                 return -ERANGE;
1635         }
1636
1637         priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
1638
1639         switch (cpsw->version) {
1640         case CPSW_VERSION_1:
1641                 cpsw_hwtstamp_v1(priv);
1642                 break;
1643         case CPSW_VERSION_2:
1644         case CPSW_VERSION_3:
1645                 cpsw_hwtstamp_v2(priv);
1646                 break;
1647         default:
1648                 WARN_ON(1);
1649         }
1650
1651         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1652 }
1653
1654 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1655 {
1656         struct cpsw_common *cpsw = ndev_to_cpsw(dev);
1657         struct cpsw_priv *priv = netdev_priv(dev);
1658         struct hwtstamp_config cfg;
1659
1660         if (cpsw->version != CPSW_VERSION_1 &&
1661             cpsw->version != CPSW_VERSION_2 &&
1662             cpsw->version != CPSW_VERSION_3)
1663                 return -EOPNOTSUPP;
1664
1665         cfg.flags = 0;
1666         cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1667         cfg.rx_filter = priv->rx_ts_enabled;
1668
1669         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1670 }
1671 #else
1672 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1673 {
1674         return -EOPNOTSUPP;
1675 }
1676
1677 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1678 {
1679         return -EOPNOTSUPP;
1680 }
1681 #endif /*CONFIG_TI_CPTS*/
1682
1683 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1684 {
1685         struct cpsw_priv *priv = netdev_priv(dev);
1686         struct cpsw_common *cpsw = priv->cpsw;
1687         int slave_no = cpsw_slave_index(cpsw, priv);
1688
1689         if (!netif_running(dev))
1690                 return -EINVAL;
1691
1692         switch (cmd) {
1693         case SIOCSHWTSTAMP:
1694                 return cpsw_hwtstamp_set(dev, req);
1695         case SIOCGHWTSTAMP:
1696                 return cpsw_hwtstamp_get(dev, req);
1697         }
1698
1699         if (!cpsw->slaves[slave_no].phy)
1700                 return -EOPNOTSUPP;
1701         return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
1702 }
1703
1704 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1705 {
1706         struct cpsw_priv *priv = netdev_priv(ndev);
1707         struct cpsw_common *cpsw = priv->cpsw;
1708         int ch;
1709
1710         cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
1711         ndev->stats.tx_errors++;
1712         cpsw_intr_disable(cpsw);
1713         for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
1714                 cpdma_chan_stop(cpsw->txv[ch].ch);
1715                 cpdma_chan_start(cpsw->txv[ch].ch);
1716         }
1717
1718         cpsw_intr_enable(cpsw);
1719         netif_trans_update(ndev);
1720         netif_tx_wake_all_queues(ndev);
1721 }
1722
1723 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1724 {
1725         struct cpsw_priv *priv = netdev_priv(ndev);
1726         struct sockaddr *addr = (struct sockaddr *)p;
1727         struct cpsw_common *cpsw = priv->cpsw;
1728         int flags = 0;
1729         u16 vid = 0;
1730         int ret;
1731
1732         if (!is_valid_ether_addr(addr->sa_data))
1733                 return -EADDRNOTAVAIL;
1734
1735         ret = pm_runtime_get_sync(cpsw->dev);
1736         if (ret < 0) {
1737                 pm_runtime_put_noidle(cpsw->dev);
1738                 return ret;
1739         }
1740
1741         if (cpsw->data.dual_emac) {
1742                 vid = cpsw->slaves[priv->emac_port].port_vlan;
1743                 flags = ALE_VLAN;
1744         }
1745
1746         cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1747                            flags, vid);
1748         cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
1749                            flags, vid);
1750
1751         memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
1752         memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1753         for_each_slave(priv, cpsw_set_slave_mac, priv);
1754
1755         pm_runtime_put(cpsw->dev);
1756
1757         return 0;
1758 }
1759
1760 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
1761                                 unsigned short vid)
1762 {
1763         int ret;
1764         int unreg_mcast_mask = 0;
1765         int mcast_mask;
1766         u32 port_mask;
1767         struct cpsw_common *cpsw = priv->cpsw;
1768
1769         if (cpsw->data.dual_emac) {
1770                 port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
1771
1772                 mcast_mask = ALE_PORT_HOST;
1773                 if (priv->ndev->flags & IFF_ALLMULTI)
1774                         unreg_mcast_mask = mcast_mask;
1775         } else {
1776                 port_mask = ALE_ALL_PORTS;
1777                 mcast_mask = port_mask;
1778
1779                 if (priv->ndev->flags & IFF_ALLMULTI)
1780                         unreg_mcast_mask = ALE_ALL_PORTS;
1781                 else
1782                         unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1783         }
1784
1785         ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
1786                                 unreg_mcast_mask);
1787         if (ret != 0)
1788                 return ret;
1789
1790         ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1791                                  HOST_PORT_NUM, ALE_VLAN, vid);
1792         if (ret != 0)
1793                 goto clean_vid;
1794
1795         ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1796                                  mcast_mask, ALE_VLAN, vid, 0);
1797         if (ret != 0)
1798                 goto clean_vlan_ucast;
1799         return 0;
1800
1801 clean_vlan_ucast:
1802         cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1803                            HOST_PORT_NUM, ALE_VLAN, vid);
1804 clean_vid:
1805         cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1806         return ret;
1807 }
1808
1809 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1810                                     __be16 proto, u16 vid)
1811 {
1812         struct cpsw_priv *priv = netdev_priv(ndev);
1813         struct cpsw_common *cpsw = priv->cpsw;
1814         int ret;
1815
1816         if (vid == cpsw->data.default_vlan)
1817                 return 0;
1818
1819         ret = pm_runtime_get_sync(cpsw->dev);
1820         if (ret < 0) {
1821                 pm_runtime_put_noidle(cpsw->dev);
1822                 return ret;
1823         }
1824
1825         if (cpsw->data.dual_emac) {
1826                 /* In dual EMAC, reserved VLAN id should not be used for
1827                  * creating VLAN interfaces as this can break the dual
1828                  * EMAC port separation
1829                  */
1830                 int i;
1831
1832                 for (i = 0; i < cpsw->data.slaves; i++) {
1833                         if (vid == cpsw->slaves[i].port_vlan) {
1834                                 ret = -EINVAL;
1835                                 goto err;
1836                         }
1837                 }
1838         }
1839
1840         dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1841         ret = cpsw_add_vlan_ale_entry(priv, vid);
1842 err:
1843         pm_runtime_put(cpsw->dev);
1844         return ret;
1845 }
1846
1847 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1848                                      __be16 proto, u16 vid)
1849 {
1850         struct cpsw_priv *priv = netdev_priv(ndev);
1851         struct cpsw_common *cpsw = priv->cpsw;
1852         int ret;
1853
1854         if (vid == cpsw->data.default_vlan)
1855                 return 0;
1856
1857         ret = pm_runtime_get_sync(cpsw->dev);
1858         if (ret < 0) {
1859                 pm_runtime_put_noidle(cpsw->dev);
1860                 return ret;
1861         }
1862
1863         if (cpsw->data.dual_emac) {
1864                 int i;
1865
1866                 for (i = 0; i < cpsw->data.slaves; i++) {
1867                         if (vid == cpsw->slaves[i].port_vlan)
1868                                 goto err;
1869                 }
1870         }
1871
1872         dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1873         ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1874         ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1875                                   HOST_PORT_NUM, ALE_VLAN, vid);
1876         ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
1877                                   0, ALE_VLAN, vid);
1878         ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
1879 err:
1880         pm_runtime_put(cpsw->dev);
1881         return ret;
1882 }
1883
1884 static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
1885 {
1886         struct cpsw_priv *priv = netdev_priv(ndev);
1887         struct cpsw_common *cpsw = priv->cpsw;
1888         struct cpsw_slave *slave;
1889         u32 min_rate;
1890         u32 ch_rate;
1891         int i, ret;
1892
1893         ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
1894         if (ch_rate == rate)
1895                 return 0;
1896
1897         ch_rate = rate * 1000;
1898         min_rate = cpdma_chan_get_min_rate(cpsw->dma);
1899         if ((ch_rate < min_rate && ch_rate)) {
1900                 dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
1901                         min_rate);
1902                 return -EINVAL;
1903         }
1904
1905         if (rate > cpsw->speed) {
1906                 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
1907                 return -EINVAL;
1908         }
1909
1910         ret = pm_runtime_get_sync(cpsw->dev);
1911         if (ret < 0) {
1912                 pm_runtime_put_noidle(cpsw->dev);
1913                 return ret;
1914         }
1915
1916         ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
1917         pm_runtime_put(cpsw->dev);
1918
1919         if (ret)
1920                 return ret;
1921
1922         /* update rates for slaves tx queues */
1923         for (i = 0; i < cpsw->data.slaves; i++) {
1924                 slave = &cpsw->slaves[i];
1925                 if (!slave->ndev)
1926                         continue;
1927
1928                 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
1929         }
1930
1931         cpsw_split_res(cpsw);
1932         return ret;
1933 }
1934
1935 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
1936 {
1937         struct tc_mqprio_qopt_offload *mqprio = type_data;
1938         struct cpsw_priv *priv = netdev_priv(ndev);
1939         struct cpsw_common *cpsw = priv->cpsw;
1940         int fifo, num_tc, count, offset;
1941         struct cpsw_slave *slave;
1942         u32 tx_prio_map = 0;
1943         int i, tc, ret;
1944
1945         num_tc = mqprio->qopt.num_tc;
1946         if (num_tc > CPSW_TC_NUM)
1947                 return -EINVAL;
1948
1949         if (mqprio->mode != TC_MQPRIO_MODE_DCB)
1950                 return -EINVAL;
1951
1952         ret = pm_runtime_get_sync(cpsw->dev);
1953         if (ret < 0) {
1954                 pm_runtime_put_noidle(cpsw->dev);
1955                 return ret;
1956         }
1957
1958         if (num_tc) {
1959                 for (i = 0; i < 8; i++) {
1960                         tc = mqprio->qopt.prio_tc_map[i];
1961                         fifo = cpsw_tc_to_fifo(tc, num_tc);
1962                         tx_prio_map |= fifo << (4 * i);
1963                 }
1964
1965                 netdev_set_num_tc(ndev, num_tc);
1966                 for (i = 0; i < num_tc; i++) {
1967                         count = mqprio->qopt.count[i];
1968                         offset = mqprio->qopt.offset[i];
1969                         netdev_set_tc_queue(ndev, i, count, offset);
1970                 }
1971         }
1972
1973         if (!mqprio->qopt.hw) {
1974                 /* restore default configuration */
1975                 netdev_reset_tc(ndev);
1976                 tx_prio_map = TX_PRIORITY_MAPPING;
1977         }
1978
1979         priv->mqprio_hw = mqprio->qopt.hw;
1980
1981         offset = cpsw->version == CPSW_VERSION_1 ?
1982                  CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1983
1984         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1985         slave_write(slave, tx_prio_map, offset);
1986
1987         pm_runtime_put_sync(cpsw->dev);
1988
1989         return 0;
1990 }
1991
1992 static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1993                              void *type_data)
1994 {
1995         switch (type) {
1996         case TC_SETUP_QDISC_CBS:
1997                 return cpsw_set_cbs(ndev, type_data);
1998
1999         case TC_SETUP_QDISC_MQPRIO:
2000                 return cpsw_set_mqprio(ndev, type_data);
2001
2002         default:
2003                 return -EOPNOTSUPP;
2004         }
2005 }
2006
2007 #ifdef CONFIG_NET_POLL_CONTROLLER
2008 static void cpsw_ndo_poll_controller(struct net_device *ndev)
2009 {
2010         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2011
2012         cpsw_intr_disable(cpsw);
2013         cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
2014         cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
2015         cpsw_intr_enable(cpsw);
2016 }
2017 #endif
2018
2019 static const struct net_device_ops cpsw_netdev_ops = {
2020         .ndo_open               = cpsw_ndo_open,
2021         .ndo_stop               = cpsw_ndo_stop,
2022         .ndo_start_xmit         = cpsw_ndo_start_xmit,
2023         .ndo_set_mac_address    = cpsw_ndo_set_mac_address,
2024         .ndo_do_ioctl           = cpsw_ndo_ioctl,
2025         .ndo_validate_addr      = eth_validate_addr,
2026         .ndo_tx_timeout         = cpsw_ndo_tx_timeout,
2027         .ndo_set_rx_mode        = cpsw_ndo_set_rx_mode,
2028         .ndo_set_tx_maxrate     = cpsw_ndo_set_tx_maxrate,
2029 #ifdef CONFIG_NET_POLL_CONTROLLER
2030         .ndo_poll_controller    = cpsw_ndo_poll_controller,
2031 #endif
2032         .ndo_vlan_rx_add_vid    = cpsw_ndo_vlan_rx_add_vid,
2033         .ndo_vlan_rx_kill_vid   = cpsw_ndo_vlan_rx_kill_vid,
2034         .ndo_setup_tc           = cpsw_ndo_setup_tc,
2035 };
2036
2037 static void cpsw_get_drvinfo(struct net_device *ndev,
2038                              struct ethtool_drvinfo *info)
2039 {
2040         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2041         struct platform_device  *pdev = to_platform_device(cpsw->dev);
2042
2043         strlcpy(info->driver, "cpsw", sizeof(info->driver));
2044         strlcpy(info->version, "1.0", sizeof(info->version));
2045         strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
2046 }
2047
2048 static int cpsw_set_pauseparam(struct net_device *ndev,
2049                                struct ethtool_pauseparam *pause)
2050 {
2051         struct cpsw_priv *priv = netdev_priv(ndev);
2052         bool link;
2053
2054         priv->rx_pause = pause->rx_pause ? true : false;
2055         priv->tx_pause = pause->tx_pause ? true : false;
2056
2057         for_each_slave(priv, _cpsw_adjust_link, priv, &link);
2058         return 0;
2059 }
2060
2061 static int cpsw_set_channels(struct net_device *ndev,
2062                              struct ethtool_channels *chs)
2063 {
2064         return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
2065 }
2066
2067 static const struct ethtool_ops cpsw_ethtool_ops = {
2068         .get_drvinfo    = cpsw_get_drvinfo,
2069         .get_msglevel   = cpsw_get_msglevel,
2070         .set_msglevel   = cpsw_set_msglevel,
2071         .get_link       = ethtool_op_get_link,
2072         .get_ts_info    = cpsw_get_ts_info,
2073         .get_coalesce   = cpsw_get_coalesce,
2074         .set_coalesce   = cpsw_set_coalesce,
2075         .get_sset_count         = cpsw_get_sset_count,
2076         .get_strings            = cpsw_get_strings,
2077         .get_ethtool_stats      = cpsw_get_ethtool_stats,
2078         .get_pauseparam         = cpsw_get_pauseparam,
2079         .set_pauseparam         = cpsw_set_pauseparam,
2080         .get_wol        = cpsw_get_wol,
2081         .set_wol        = cpsw_set_wol,
2082         .get_regs_len   = cpsw_get_regs_len,
2083         .get_regs       = cpsw_get_regs,
2084         .begin          = cpsw_ethtool_op_begin,
2085         .complete       = cpsw_ethtool_op_complete,
2086         .get_channels   = cpsw_get_channels,
2087         .set_channels   = cpsw_set_channels,
2088         .get_link_ksettings     = cpsw_get_link_ksettings,
2089         .set_link_ksettings     = cpsw_set_link_ksettings,
2090         .get_eee        = cpsw_get_eee,
2091         .set_eee        = cpsw_set_eee,
2092         .nway_reset     = cpsw_nway_reset,
2093         .get_ringparam = cpsw_get_ringparam,
2094         .set_ringparam = cpsw_set_ringparam,
2095 };
2096
2097 static int cpsw_probe_dt(struct cpsw_platform_data *data,
2098                          struct platform_device *pdev)
2099 {
2100         struct device_node *node = pdev->dev.of_node;
2101         struct device_node *slave_node;
2102         int i = 0, ret;
2103         u32 prop;
2104
2105         if (!node)
2106                 return -EINVAL;
2107
2108         if (of_property_read_u32(node, "slaves", &prop)) {
2109                 dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
2110                 return -EINVAL;
2111         }
2112         data->slaves = prop;
2113
2114         if (of_property_read_u32(node, "active_slave", &prop)) {
2115                 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
2116                 return -EINVAL;
2117         }
2118         data->active_slave = prop;
2119
2120         data->slave_data = devm_kcalloc(&pdev->dev,
2121                                         data->slaves,
2122                                         sizeof(struct cpsw_slave_data),
2123                                         GFP_KERNEL);
2124         if (!data->slave_data)
2125                 return -ENOMEM;
2126
2127         if (of_property_read_u32(node, "cpdma_channels", &prop)) {
2128                 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
2129                 return -EINVAL;
2130         }
2131         data->channels = prop;
2132
2133         if (of_property_read_u32(node, "ale_entries", &prop)) {
2134                 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
2135                 return -EINVAL;
2136         }
2137         data->ale_entries = prop;
2138
2139         if (of_property_read_u32(node, "bd_ram_size", &prop)) {
2140                 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
2141                 return -EINVAL;
2142         }
2143         data->bd_ram_size = prop;
2144
2145         if (of_property_read_u32(node, "mac_control", &prop)) {
2146                 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
2147                 return -EINVAL;
2148         }
2149         data->mac_control = prop;
2150
2151         if (of_property_read_bool(node, "dual_emac"))
2152                 data->dual_emac = 1;
2153
2154         /*
2155          * Populate all the child nodes here...
2156          */
2157         ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2158         /* We do not want to force this, as in some cases may not have child */
2159         if (ret)
2160                 dev_warn(&pdev->dev, "Doesn't have any child node\n");
2161
2162         for_each_available_child_of_node(node, slave_node) {
2163                 struct cpsw_slave_data *slave_data = data->slave_data + i;
2164                 const void *mac_addr = NULL;
2165                 int lenp;
2166                 const __be32 *parp;
2167
2168                 /* This is no slave child node, continue */
2169                 if (!of_node_name_eq(slave_node, "slave"))
2170                         continue;
2171
2172                 slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
2173                                                     NULL);
2174                 if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
2175                     IS_ERR(slave_data->ifphy)) {
2176                         ret = PTR_ERR(slave_data->ifphy);
2177                         dev_err(&pdev->dev,
2178                                 "%d: Error retrieving port phy: %d\n", i, ret);
2179                         return ret;
2180                 }
2181
2182                 slave_data->phy_node = of_parse_phandle(slave_node,
2183                                                         "phy-handle", 0);
2184                 parp = of_get_property(slave_node, "phy_id", &lenp);
2185                 if (slave_data->phy_node) {
2186                         dev_dbg(&pdev->dev,
2187                                 "slave[%d] using phy-handle=\"%pOF\"\n",
2188                                 i, slave_data->phy_node);
2189                 } else if (of_phy_is_fixed_link(slave_node)) {
2190                         /* In the case of a fixed PHY, the DT node associated
2191                          * to the PHY is the Ethernet MAC DT node.
2192                          */
2193                         ret = of_phy_register_fixed_link(slave_node);
2194                         if (ret) {
2195                                 if (ret != -EPROBE_DEFER)
2196                                         dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
2197                                 return ret;
2198                         }
2199                         slave_data->phy_node = of_node_get(slave_node);
2200                 } else if (parp) {
2201                         u32 phyid;
2202                         struct device_node *mdio_node;
2203                         struct platform_device *mdio;
2204
2205                         if (lenp != (sizeof(__be32) * 2)) {
2206                                 dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
2207                                 goto no_phy_slave;
2208                         }
2209                         mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
2210                         phyid = be32_to_cpup(parp+1);
2211                         mdio = of_find_device_by_node(mdio_node);
2212                         of_node_put(mdio_node);
2213                         if (!mdio) {
2214                                 dev_err(&pdev->dev, "Missing mdio platform device\n");
2215                                 return -EINVAL;
2216                         }
2217                         snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2218                                  PHY_ID_FMT, mdio->name, phyid);
2219                         put_device(&mdio->dev);
2220                 } else {
2221                         dev_err(&pdev->dev,
2222                                 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
2223                                 i);
2224                         goto no_phy_slave;
2225                 }
2226                 slave_data->phy_if = of_get_phy_mode(slave_node);
2227                 if (slave_data->phy_if < 0) {
2228                         dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
2229                                 i);
2230                         return slave_data->phy_if;
2231                 }
2232
2233 no_phy_slave:
2234                 mac_addr = of_get_mac_address(slave_node);
2235                 if (mac_addr) {
2236                         memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
2237                 } else {
2238                         ret = ti_cm_get_macid(&pdev->dev, i,
2239                                               slave_data->mac_addr);
2240                         if (ret)
2241                                 return ret;
2242                 }
2243                 if (data->dual_emac) {
2244                         if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
2245                                                  &prop)) {
2246                                 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
2247                                 slave_data->dual_emac_res_vlan = i+1;
2248                                 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
2249                                         slave_data->dual_emac_res_vlan, i);
2250                         } else {
2251                                 slave_data->dual_emac_res_vlan = prop;
2252                         }
2253                 }
2254
2255                 i++;
2256                 if (i == data->slaves)
2257                         break;
2258         }
2259
2260         return 0;
2261 }
2262
2263 static void cpsw_remove_dt(struct platform_device *pdev)
2264 {
2265         struct net_device *ndev = platform_get_drvdata(pdev);
2266         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2267         struct cpsw_platform_data *data = &cpsw->data;
2268         struct device_node *node = pdev->dev.of_node;
2269         struct device_node *slave_node;
2270         int i = 0;
2271
2272         for_each_available_child_of_node(node, slave_node) {
2273                 struct cpsw_slave_data *slave_data = &data->slave_data[i];
2274
2275                 if (!of_node_name_eq(slave_node, "slave"))
2276                         continue;
2277
2278                 if (of_phy_is_fixed_link(slave_node))
2279                         of_phy_deregister_fixed_link(slave_node);
2280
2281                 of_node_put(slave_data->phy_node);
2282
2283                 i++;
2284                 if (i == data->slaves)
2285                         break;
2286         }
2287
2288         of_platform_depopulate(&pdev->dev);
2289 }
2290
2291 static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
2292 {
2293         struct cpsw_common              *cpsw = priv->cpsw;
2294         struct cpsw_platform_data       *data = &cpsw->data;
2295         struct net_device               *ndev;
2296         struct cpsw_priv                *priv_sl2;
2297         int ret = 0;
2298
2299         ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv),
2300                                        CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
2301         if (!ndev) {
2302                 dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
2303                 return -ENOMEM;
2304         }
2305
2306         priv_sl2 = netdev_priv(ndev);
2307         priv_sl2->cpsw = cpsw;
2308         priv_sl2->ndev = ndev;
2309         priv_sl2->dev  = &ndev->dev;
2310         priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2311
2312         if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
2313                 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
2314                         ETH_ALEN);
2315                 dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
2316                          priv_sl2->mac_addr);
2317         } else {
2318                 eth_random_addr(priv_sl2->mac_addr);
2319                 dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
2320                          priv_sl2->mac_addr);
2321         }
2322         memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
2323
2324         priv_sl2->emac_port = 1;
2325         cpsw->slaves[1].ndev = ndev;
2326         ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
2327
2328         ndev->netdev_ops = &cpsw_netdev_ops;
2329         ndev->ethtool_ops = &cpsw_ethtool_ops;
2330
2331         /* register the network device */
2332         SET_NETDEV_DEV(ndev, cpsw->dev);
2333         ret = register_netdev(ndev);
2334         if (ret)
2335                 dev_err(cpsw->dev, "cpsw: error registering net device\n");
2336
2337         return ret;
2338 }
2339
2340 static const struct of_device_id cpsw_of_mtable[] = {
2341         { .compatible = "ti,cpsw"},
2342         { .compatible = "ti,am335x-cpsw"},
2343         { .compatible = "ti,am4372-cpsw"},
2344         { .compatible = "ti,dra7-cpsw"},
2345         { /* sentinel */ },
2346 };
2347 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
2348
2349 static const struct soc_device_attribute cpsw_soc_devices[] = {
2350         { .family = "AM33xx", .revision = "ES1.0"},
2351         { /* sentinel */ }
2352 };
2353
2354 static int cpsw_probe(struct platform_device *pdev)
2355 {
2356         struct device                   *dev = &pdev->dev;
2357         struct clk                      *clk;
2358         struct cpsw_platform_data       *data;
2359         struct net_device               *ndev;
2360         struct cpsw_priv                *priv;
2361         void __iomem                    *ss_regs;
2362         struct resource                 *res, *ss_res;
2363         struct gpio_descs               *mode;
2364         const struct soc_device_attribute *soc;
2365         struct cpsw_common              *cpsw;
2366         int ret = 0, ch;
2367         int irq;
2368
2369         cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
2370         if (!cpsw)
2371                 return -ENOMEM;
2372
2373         cpsw->dev = dev;
2374
2375         mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
2376         if (IS_ERR(mode)) {
2377                 ret = PTR_ERR(mode);
2378                 dev_err(dev, "gpio request failed, ret %d\n", ret);
2379                 return ret;
2380         }
2381
2382         clk = devm_clk_get(dev, "fck");
2383         if (IS_ERR(clk)) {
2384                 ret = PTR_ERR(clk);
2385                 dev_err(dev, "fck is not found %d\n", ret);
2386                 return ret;
2387         }
2388         cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
2389
2390         ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2391         ss_regs = devm_ioremap_resource(dev, ss_res);
2392         if (IS_ERR(ss_regs))
2393                 return PTR_ERR(ss_regs);
2394         cpsw->regs = ss_regs;
2395
2396         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2397         cpsw->wr_regs = devm_ioremap_resource(dev, res);
2398         if (IS_ERR(cpsw->wr_regs))
2399                 return PTR_ERR(cpsw->wr_regs);
2400
2401         /* RX IRQ */
2402         irq = platform_get_irq(pdev, 1);
2403         if (irq < 0)
2404                 return irq;
2405         cpsw->irqs_table[0] = irq;
2406
2407         /* TX IRQ */
2408         irq = platform_get_irq(pdev, 2);
2409         if (irq < 0)
2410                 return irq;
2411         cpsw->irqs_table[1] = irq;
2412
2413         /*
2414          * This may be required here for child devices.
2415          */
2416         pm_runtime_enable(dev);
2417
2418         /* Need to enable clocks with runtime PM api to access module
2419          * registers
2420          */
2421         ret = pm_runtime_get_sync(dev);
2422         if (ret < 0) {
2423                 pm_runtime_put_noidle(dev);
2424                 goto clean_runtime_disable_ret;
2425         }
2426
2427         ret = cpsw_probe_dt(&cpsw->data, pdev);
2428         if (ret)
2429                 goto clean_dt_ret;
2430
2431         soc = soc_device_match(cpsw_soc_devices);
2432         if (soc)
2433                 cpsw->quirk_irq = 1;
2434
2435         data = &cpsw->data;
2436         cpsw->slaves = devm_kcalloc(dev,
2437                                     data->slaves, sizeof(struct cpsw_slave),
2438                                     GFP_KERNEL);
2439         if (!cpsw->slaves) {
2440                 ret = -ENOMEM;
2441                 goto clean_dt_ret;
2442         }
2443
2444         cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE);
2445         cpsw->descs_pool_size = descs_pool_size;
2446
2447         ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
2448                                ss_res->start + CPSW2_BD_OFFSET,
2449                                descs_pool_size);
2450         if (ret)
2451                 goto clean_dt_ret;
2452
2453         ch = cpsw->quirk_irq ? 0 : 7;
2454         cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
2455         if (IS_ERR(cpsw->txv[0].ch)) {
2456                 dev_err(dev, "error initializing tx dma channel\n");
2457                 ret = PTR_ERR(cpsw->txv[0].ch);
2458                 goto clean_cpts;
2459         }
2460
2461         cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
2462         if (IS_ERR(cpsw->rxv[0].ch)) {
2463                 dev_err(dev, "error initializing rx dma channel\n");
2464                 ret = PTR_ERR(cpsw->rxv[0].ch);
2465                 goto clean_cpts;
2466         }
2467         cpsw_split_res(cpsw);
2468
2469         /* setup netdev */
2470         ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
2471                                        CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
2472         if (!ndev) {
2473                 dev_err(dev, "error allocating net_device\n");
2474                 goto clean_cpts;
2475         }
2476
2477         platform_set_drvdata(pdev, ndev);
2478         priv = netdev_priv(ndev);
2479         priv->cpsw = cpsw;
2480         priv->ndev = ndev;
2481         priv->dev  = dev;
2482         priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2483         priv->emac_port = 0;
2484
2485         if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
2486                 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
2487                 dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr);
2488         } else {
2489                 eth_random_addr(priv->mac_addr);
2490                 dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
2491         }
2492
2493         memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2494
2495         cpsw->slaves[0].ndev = ndev;
2496
2497         ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
2498
2499         ndev->netdev_ops = &cpsw_netdev_ops;
2500         ndev->ethtool_ops = &cpsw_ethtool_ops;
2501         netif_napi_add(ndev, &cpsw->napi_rx,
2502                        cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
2503                        CPSW_POLL_WEIGHT);
2504         netif_tx_napi_add(ndev, &cpsw->napi_tx,
2505                           cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
2506                           CPSW_POLL_WEIGHT);
2507
2508         /* register the network device */
2509         SET_NETDEV_DEV(ndev, dev);
2510         ret = register_netdev(ndev);
2511         if (ret) {
2512                 dev_err(dev, "error registering net device\n");
2513                 ret = -ENODEV;
2514                 goto clean_cpts;
2515         }
2516
2517         if (cpsw->data.dual_emac) {
2518                 ret = cpsw_probe_dual_emac(priv);
2519                 if (ret) {
2520                         cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
2521                         goto clean_unregister_netdev_ret;
2522                 }
2523         }
2524
2525         /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
2526          * MISC IRQs which are always kept disabled with this driver so
2527          * we will not request them.
2528          *
2529          * If anyone wants to implement support for those, make sure to
2530          * first request and append them to irqs_table array.
2531          */
2532         ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
2533                                0, dev_name(dev), cpsw);
2534         if (ret < 0) {
2535                 dev_err(dev, "error attaching irq (%d)\n", ret);
2536                 goto clean_unregister_netdev_ret;
2537         }
2538
2539
2540         ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
2541                                0, dev_name(&pdev->dev), cpsw);
2542         if (ret < 0) {
2543                 dev_err(dev, "error attaching irq (%d)\n", ret);
2544                 goto clean_unregister_netdev_ret;
2545         }
2546
2547         cpsw_notice(priv, probe,
2548                     "initialized device (regs %pa, irq %d, pool size %d)\n",
2549                     &ss_res->start, cpsw->irqs_table[0], descs_pool_size);
2550
2551         pm_runtime_put(&pdev->dev);
2552
2553         return 0;
2554
2555 clean_unregister_netdev_ret:
2556         unregister_netdev(ndev);
2557 clean_cpts:
2558         cpts_release(cpsw->cpts);
2559         cpdma_ctlr_destroy(cpsw->dma);
2560 clean_dt_ret:
2561         cpsw_remove_dt(pdev);
2562         pm_runtime_put_sync(&pdev->dev);
2563 clean_runtime_disable_ret:
2564         pm_runtime_disable(&pdev->dev);
2565         return ret;
2566 }
2567
2568 static int cpsw_remove(struct platform_device *pdev)
2569 {
2570         struct net_device *ndev = platform_get_drvdata(pdev);
2571         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2572         int ret;
2573
2574         ret = pm_runtime_get_sync(&pdev->dev);
2575         if (ret < 0) {
2576                 pm_runtime_put_noidle(&pdev->dev);
2577                 return ret;
2578         }
2579
2580         if (cpsw->data.dual_emac)
2581                 unregister_netdev(cpsw->slaves[1].ndev);
2582         unregister_netdev(ndev);
2583
2584         cpts_release(cpsw->cpts);
2585         cpdma_ctlr_destroy(cpsw->dma);
2586         cpsw_remove_dt(pdev);
2587         pm_runtime_put_sync(&pdev->dev);
2588         pm_runtime_disable(&pdev->dev);
2589         return 0;
2590 }
2591
2592 #ifdef CONFIG_PM_SLEEP
2593 static int cpsw_suspend(struct device *dev)
2594 {
2595         struct net_device       *ndev = dev_get_drvdata(dev);
2596         struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
2597
2598         if (cpsw->data.dual_emac) {
2599                 int i;
2600
2601                 for (i = 0; i < cpsw->data.slaves; i++) {
2602                         if (netif_running(cpsw->slaves[i].ndev))
2603                                 cpsw_ndo_stop(cpsw->slaves[i].ndev);
2604                 }
2605         } else {
2606                 if (netif_running(ndev))
2607                         cpsw_ndo_stop(ndev);
2608         }
2609
2610         /* Select sleep pin state */
2611         pinctrl_pm_select_sleep_state(dev);
2612
2613         return 0;
2614 }
2615
2616 static int cpsw_resume(struct device *dev)
2617 {
2618         struct net_device       *ndev = dev_get_drvdata(dev);
2619         struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
2620
2621         /* Select default pin state */
2622         pinctrl_pm_select_default_state(dev);
2623
2624         /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
2625         rtnl_lock();
2626         if (cpsw->data.dual_emac) {
2627                 int i;
2628
2629                 for (i = 0; i < cpsw->data.slaves; i++) {
2630                         if (netif_running(cpsw->slaves[i].ndev))
2631                                 cpsw_ndo_open(cpsw->slaves[i].ndev);
2632                 }
2633         } else {
2634                 if (netif_running(ndev))
2635                         cpsw_ndo_open(ndev);
2636         }
2637         rtnl_unlock();
2638
2639         return 0;
2640 }
2641 #endif
2642
2643 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2644
2645 static struct platform_driver cpsw_driver = {
2646         .driver = {
2647                 .name    = "cpsw",
2648                 .pm      = &cpsw_pm_ops,
2649                 .of_match_table = cpsw_of_mtable,
2650         },
2651         .probe = cpsw_probe,
2652         .remove = cpsw_remove,
2653 };
2654
2655 module_platform_driver(cpsw_driver);
2656
2657 MODULE_LICENSE("GPL");
2658 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
2659 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
2660 MODULE_DESCRIPTION("TI CPSW Ethernet driver");