Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next...
[linux-2.6-microblaze.git] / drivers / net / ethernet / ti / cpsw.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments Ethernet Switch Driver
4  *
5  * Copyright (C) 2012 Texas Instruments
6  *
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/clk.h>
12 #include <linux/timer.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/irqreturn.h>
16 #include <linux/interrupt.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/netdevice.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/phy.h>
22 #include <linux/phy/phy.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/gpio/consumer.h>
27 #include <linux/of.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/of_device.h>
31 #include <linux/if_vlan.h>
32 #include <linux/kmemleak.h>
33 #include <linux/sys_soc.h>
34
35 #include <linux/pinctrl/consumer.h>
36 #include <net/pkt_cls.h>
37
38 #include "cpsw.h"
39 #include "cpsw_ale.h"
40 #include "cpsw_priv.h"
41 #include "cpsw_sl.h"
42 #include "cpts.h"
43 #include "davinci_cpdma.h"
44
45 #include <net/pkt_sched.h>
46
47 static int debug_level;
48 module_param(debug_level, int, 0);
49 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
50
51 static int ale_ageout = 10;
52 module_param(ale_ageout, int, 0);
53 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
54
55 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
56 module_param(rx_packet_max, int, 0);
57 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
58
59 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
60 module_param(descs_pool_size, int, 0444);
61 MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
62
63 #define for_each_slave(priv, func, arg...)                              \
64         do {                                                            \
65                 struct cpsw_slave *slave;                               \
66                 struct cpsw_common *cpsw = (priv)->cpsw;                \
67                 int n;                                                  \
68                 if (cpsw->data.dual_emac)                               \
69                         (func)((cpsw)->slaves + priv->emac_port, ##arg);\
70                 else                                                    \
71                         for (n = cpsw->data.slaves,                     \
72                                         slave = cpsw->slaves;           \
73                                         n; n--)                         \
74                                 (func)(slave++, ##arg);                 \
75         } while (0)
76
77 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
78                                     __be16 proto, u16 vid);
79
80 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
81 {
82         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
83         struct cpsw_ale *ale = cpsw->ale;
84         int i;
85
86         if (cpsw->data.dual_emac) {
87                 bool flag = false;
88
89                 /* Enabling promiscuous mode for one interface will be
90                  * common for both the interface as the interface shares
91                  * the same hardware resource.
92                  */
93                 for (i = 0; i < cpsw->data.slaves; i++)
94                         if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
95                                 flag = true;
96
97                 if (!enable && flag) {
98                         enable = true;
99                         dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
100                 }
101
102                 if (enable) {
103                         /* Enable Bypass */
104                         cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
105
106                         dev_dbg(&ndev->dev, "promiscuity enabled\n");
107                 } else {
108                         /* Disable Bypass */
109                         cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
110                         dev_dbg(&ndev->dev, "promiscuity disabled\n");
111                 }
112         } else {
113                 if (enable) {
114                         unsigned long timeout = jiffies + HZ;
115
116                         /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
117                         for (i = 0; i <= cpsw->data.slaves; i++) {
118                                 cpsw_ale_control_set(ale, i,
119                                                      ALE_PORT_NOLEARN, 1);
120                                 cpsw_ale_control_set(ale, i,
121                                                      ALE_PORT_NO_SA_UPDATE, 1);
122                         }
123
124                         /* Clear All Untouched entries */
125                         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
126                         do {
127                                 cpu_relax();
128                                 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
129                                         break;
130                         } while (time_after(timeout, jiffies));
131                         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
132
133                         /* Clear all mcast from ALE */
134                         cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
135                         __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
136
137                         /* Flood All Unicast Packets to Host port */
138                         cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
139                         dev_dbg(&ndev->dev, "promiscuity enabled\n");
140                 } else {
141                         /* Don't Flood All Unicast Packets to Host port */
142                         cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
143
144                         /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
145                         for (i = 0; i <= cpsw->data.slaves; i++) {
146                                 cpsw_ale_control_set(ale, i,
147                                                      ALE_PORT_NOLEARN, 0);
148                                 cpsw_ale_control_set(ale, i,
149                                                      ALE_PORT_NO_SA_UPDATE, 0);
150                         }
151                         dev_dbg(&ndev->dev, "promiscuity disabled\n");
152                 }
153         }
154 }
155
156 /**
157  * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
158  * if it's not deleted
159  * @ndev: device to sync
160  * @addr: address to be added or deleted
161  * @vid: vlan id, if vid < 0 set/unset address for real device
162  * @add: add address if the flag is set or remove otherwise
163  */
164 static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
165                        int vid, int add)
166 {
167         struct cpsw_priv *priv = netdev_priv(ndev);
168         struct cpsw_common *cpsw = priv->cpsw;
169         int mask, flags, ret;
170
171         if (vid < 0) {
172                 if (cpsw->data.dual_emac)
173                         vid = cpsw->slaves[priv->emac_port].port_vlan;
174                 else
175                         vid = 0;
176         }
177
178         mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
179         flags = vid ? ALE_VLAN : 0;
180
181         if (add)
182                 ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
183         else
184                 ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
185
186         return ret;
187 }
188
189 static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
190 {
191         struct addr_sync_ctx *sync_ctx = ctx;
192         struct netdev_hw_addr *ha;
193         int found = 0, ret = 0;
194
195         if (!vdev || !(vdev->flags & IFF_UP))
196                 return 0;
197
198         /* vlan address is relevant if its sync_cnt != 0 */
199         netdev_for_each_mc_addr(ha, vdev) {
200                 if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
201                         found = ha->sync_cnt;
202                         break;
203                 }
204         }
205
206         if (found)
207                 sync_ctx->consumed++;
208
209         if (sync_ctx->flush) {
210                 if (!found)
211                         cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
212                 return 0;
213         }
214
215         if (found)
216                 ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
217
218         return ret;
219 }
220
221 static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
222 {
223         struct addr_sync_ctx sync_ctx;
224         int ret;
225
226         sync_ctx.consumed = 0;
227         sync_ctx.addr = addr;
228         sync_ctx.ndev = ndev;
229         sync_ctx.flush = 0;
230
231         ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
232         if (sync_ctx.consumed < num && !ret)
233                 ret = cpsw_set_mc(ndev, addr, -1, 1);
234
235         return ret;
236 }
237
238 static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
239 {
240         struct addr_sync_ctx sync_ctx;
241
242         sync_ctx.consumed = 0;
243         sync_ctx.addr = addr;
244         sync_ctx.ndev = ndev;
245         sync_ctx.flush = 1;
246
247         vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
248         if (sync_ctx.consumed == num)
249                 cpsw_set_mc(ndev, addr, -1, 0);
250
251         return 0;
252 }
253
254 static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
255 {
256         struct addr_sync_ctx *sync_ctx = ctx;
257         struct netdev_hw_addr *ha;
258         int found = 0;
259
260         if (!vdev || !(vdev->flags & IFF_UP))
261                 return 0;
262
263         /* vlan address is relevant if its sync_cnt != 0 */
264         netdev_for_each_mc_addr(ha, vdev) {
265                 if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
266                         found = ha->sync_cnt;
267                         break;
268                 }
269         }
270
271         if (!found)
272                 return 0;
273
274         sync_ctx->consumed++;
275         cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
276         return 0;
277 }
278
279 static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
280 {
281         struct addr_sync_ctx sync_ctx;
282
283         sync_ctx.addr = addr;
284         sync_ctx.ndev = ndev;
285         sync_ctx.consumed = 0;
286
287         vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
288         if (sync_ctx.consumed < num)
289                 cpsw_set_mc(ndev, addr, -1, 0);
290
291         return 0;
292 }
293
294 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
295 {
296         struct cpsw_priv *priv = netdev_priv(ndev);
297         struct cpsw_common *cpsw = priv->cpsw;
298         int slave_port = -1;
299
300         if (cpsw->data.dual_emac)
301                 slave_port = priv->emac_port + 1;
302
303         if (ndev->flags & IFF_PROMISC) {
304                 /* Enable promiscuous mode */
305                 cpsw_set_promiscious(ndev, true);
306                 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
307                 return;
308         } else {
309                 /* Disable promiscuous mode */
310                 cpsw_set_promiscious(ndev, false);
311         }
312
313         /* Restore allmulti on vlans if necessary */
314         cpsw_ale_set_allmulti(cpsw->ale,
315                               ndev->flags & IFF_ALLMULTI, slave_port);
316
317         /* add/remove mcast address either for real netdev or for vlan */
318         __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
319                                cpsw_del_mc_addr);
320 }
321
322 void cpsw_intr_enable(struct cpsw_common *cpsw)
323 {
324         writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
325         writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
326
327         cpdma_ctlr_int_ctrl(cpsw->dma, true);
328         return;
329 }
330
331 void cpsw_intr_disable(struct cpsw_common *cpsw)
332 {
333         writel_relaxed(0, &cpsw->wr_regs->tx_en);
334         writel_relaxed(0, &cpsw->wr_regs->rx_en);
335
336         cpdma_ctlr_int_ctrl(cpsw->dma, false);
337         return;
338 }
339
340 void cpsw_tx_handler(void *token, int len, int status)
341 {
342         struct netdev_queue     *txq;
343         struct sk_buff          *skb = token;
344         struct net_device       *ndev = skb->dev;
345         struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
346
347         /* Check whether the queue is stopped due to stalled tx dma, if the
348          * queue is stopped then start the queue as we have free desc for tx
349          */
350         txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
351         if (unlikely(netif_tx_queue_stopped(txq)))
352                 netif_tx_wake_queue(txq);
353
354         cpts_tx_timestamp(cpsw->cpts, skb);
355         ndev->stats.tx_packets++;
356         ndev->stats.tx_bytes += len;
357         dev_kfree_skb_any(skb);
358 }
359
360 static void cpsw_rx_vlan_encap(struct sk_buff *skb)
361 {
362         struct cpsw_priv *priv = netdev_priv(skb->dev);
363         struct cpsw_common *cpsw = priv->cpsw;
364         u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
365         u16 vtag, vid, prio, pkt_type;
366
367         /* Remove VLAN header encapsulation word */
368         skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
369
370         pkt_type = (rx_vlan_encap_hdr >>
371                     CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
372                     CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
373         /* Ignore unknown & Priority-tagged packets*/
374         if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
375             pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
376                 return;
377
378         vid = (rx_vlan_encap_hdr >>
379                CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
380                VLAN_VID_MASK;
381         /* Ignore vid 0 and pass packet as is */
382         if (!vid)
383                 return;
384         /* Ignore default vlans in dual mac mode */
385         if (cpsw->data.dual_emac &&
386             vid == cpsw->slaves[priv->emac_port].port_vlan)
387                 return;
388
389         prio = (rx_vlan_encap_hdr >>
390                 CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
391                 CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
392
393         vtag = (prio << VLAN_PRIO_SHIFT) | vid;
394         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
395
396         /* strip vlan tag for VLAN-tagged packet */
397         if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
398                 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
399                 skb_pull(skb, VLAN_HLEN);
400         }
401 }
402
403 static void cpsw_rx_handler(void *token, int len, int status)
404 {
405         struct cpdma_chan       *ch;
406         struct sk_buff          *skb = token;
407         struct sk_buff          *new_skb;
408         struct net_device       *ndev = skb->dev;
409         int                     ret = 0, port;
410         struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
411         struct cpsw_priv        *priv;
412
413         if (cpsw->data.dual_emac) {
414                 port = CPDMA_RX_SOURCE_PORT(status);
415                 if (port) {
416                         ndev = cpsw->slaves[--port].ndev;
417                         skb->dev = ndev;
418                 }
419         }
420
421         if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
422                 /* In dual emac mode check for all interfaces */
423                 if (cpsw->data.dual_emac && cpsw->usage_count &&
424                     (status >= 0)) {
425                         /* The packet received is for the interface which
426                          * is already down and the other interface is up
427                          * and running, instead of freeing which results
428                          * in reducing of the number of rx descriptor in
429                          * DMA engine, requeue skb back to cpdma.
430                          */
431                         new_skb = skb;
432                         goto requeue;
433                 }
434
435                 /* the interface is going down, skbs are purged */
436                 dev_kfree_skb_any(skb);
437                 return;
438         }
439
440         new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
441         if (new_skb) {
442                 skb_copy_queue_mapping(new_skb, skb);
443                 skb_put(skb, len);
444                 if (status & CPDMA_RX_VLAN_ENCAP)
445                         cpsw_rx_vlan_encap(skb);
446                 priv = netdev_priv(ndev);
447                 if (priv->rx_ts_enabled)
448                         cpts_rx_timestamp(cpsw->cpts, skb);
449                 skb->protocol = eth_type_trans(skb, ndev);
450                 netif_receive_skb(skb);
451                 ndev->stats.rx_bytes += len;
452                 ndev->stats.rx_packets++;
453                 kmemleak_not_leak(new_skb);
454         } else {
455                 ndev->stats.rx_dropped++;
456                 new_skb = skb;
457         }
458
459 requeue:
460         if (netif_dormant(ndev)) {
461                 dev_kfree_skb_any(new_skb);
462                 return;
463         }
464
465         ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
466         ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
467                                 skb_tailroom(new_skb), 0);
468         if (WARN_ON(ret < 0))
469                 dev_kfree_skb_any(new_skb);
470 }
471
472 void cpsw_split_res(struct cpsw_common *cpsw)
473 {
474         u32 consumed_rate = 0, bigest_rate = 0;
475         struct cpsw_vector *txv = cpsw->txv;
476         int i, ch_weight, rlim_ch_num = 0;
477         int budget, bigest_rate_ch = 0;
478         u32 ch_rate, max_rate;
479         int ch_budget = 0;
480
481         for (i = 0; i < cpsw->tx_ch_num; i++) {
482                 ch_rate = cpdma_chan_get_rate(txv[i].ch);
483                 if (!ch_rate)
484                         continue;
485
486                 rlim_ch_num++;
487                 consumed_rate += ch_rate;
488         }
489
490         if (cpsw->tx_ch_num == rlim_ch_num) {
491                 max_rate = consumed_rate;
492         } else if (!rlim_ch_num) {
493                 ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
494                 bigest_rate = 0;
495                 max_rate = consumed_rate;
496         } else {
497                 max_rate = cpsw->speed * 1000;
498
499                 /* if max_rate is less then expected due to reduced link speed,
500                  * split proportionally according next potential max speed
501                  */
502                 if (max_rate < consumed_rate)
503                         max_rate *= 10;
504
505                 if (max_rate < consumed_rate)
506                         max_rate *= 10;
507
508                 ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
509                 ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
510                             (cpsw->tx_ch_num - rlim_ch_num);
511                 bigest_rate = (max_rate - consumed_rate) /
512                               (cpsw->tx_ch_num - rlim_ch_num);
513         }
514
515         /* split tx weight/budget */
516         budget = CPSW_POLL_WEIGHT;
517         for (i = 0; i < cpsw->tx_ch_num; i++) {
518                 ch_rate = cpdma_chan_get_rate(txv[i].ch);
519                 if (ch_rate) {
520                         txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
521                         if (!txv[i].budget)
522                                 txv[i].budget++;
523                         if (ch_rate > bigest_rate) {
524                                 bigest_rate_ch = i;
525                                 bigest_rate = ch_rate;
526                         }
527
528                         ch_weight = (ch_rate * 100) / max_rate;
529                         if (!ch_weight)
530                                 ch_weight++;
531                         cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
532                 } else {
533                         txv[i].budget = ch_budget;
534                         if (!bigest_rate_ch)
535                                 bigest_rate_ch = i;
536                         cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
537                 }
538
539                 budget -= txv[i].budget;
540         }
541
542         if (budget)
543                 txv[bigest_rate_ch].budget += budget;
544
545         /* split rx budget */
546         budget = CPSW_POLL_WEIGHT;
547         ch_budget = budget / cpsw->rx_ch_num;
548         for (i = 0; i < cpsw->rx_ch_num; i++) {
549                 cpsw->rxv[i].budget = ch_budget;
550                 budget -= ch_budget;
551         }
552
553         if (budget)
554                 cpsw->rxv[0].budget += budget;
555 }
556
557 static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
558 {
559         struct cpsw_common *cpsw = dev_id;
560
561         writel(0, &cpsw->wr_regs->tx_en);
562         cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
563
564         if (cpsw->quirk_irq) {
565                 disable_irq_nosync(cpsw->irqs_table[1]);
566                 cpsw->tx_irq_disabled = true;
567         }
568
569         napi_schedule(&cpsw->napi_tx);
570         return IRQ_HANDLED;
571 }
572
573 static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
574 {
575         struct cpsw_common *cpsw = dev_id;
576
577         cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
578         writel(0, &cpsw->wr_regs->rx_en);
579
580         if (cpsw->quirk_irq) {
581                 disable_irq_nosync(cpsw->irqs_table[0]);
582                 cpsw->rx_irq_disabled = true;
583         }
584
585         napi_schedule(&cpsw->napi_rx);
586         return IRQ_HANDLED;
587 }
588
589 static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
590 {
591         u32                     ch_map;
592         int                     num_tx, cur_budget, ch;
593         struct cpsw_common      *cpsw = napi_to_cpsw(napi_tx);
594         struct cpsw_vector      *txv;
595
596         /* process every unprocessed channel */
597         ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
598         for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
599                 if (!(ch_map & 0x80))
600                         continue;
601
602                 txv = &cpsw->txv[ch];
603                 if (unlikely(txv->budget > budget - num_tx))
604                         cur_budget = budget - num_tx;
605                 else
606                         cur_budget = txv->budget;
607
608                 num_tx += cpdma_chan_process(txv->ch, cur_budget);
609                 if (num_tx >= budget)
610                         break;
611         }
612
613         if (num_tx < budget) {
614                 napi_complete(napi_tx);
615                 writel(0xff, &cpsw->wr_regs->tx_en);
616         }
617
618         return num_tx;
619 }
620
621 static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
622 {
623         struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
624         int num_tx;
625
626         num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
627         if (num_tx < budget) {
628                 napi_complete(napi_tx);
629                 writel(0xff, &cpsw->wr_regs->tx_en);
630                 if (cpsw->tx_irq_disabled) {
631                         cpsw->tx_irq_disabled = false;
632                         enable_irq(cpsw->irqs_table[1]);
633                 }
634         }
635
636         return num_tx;
637 }
638
639 static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
640 {
641         u32                     ch_map;
642         int                     num_rx, cur_budget, ch;
643         struct cpsw_common      *cpsw = napi_to_cpsw(napi_rx);
644         struct cpsw_vector      *rxv;
645
646         /* process every unprocessed channel */
647         ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
648         for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
649                 if (!(ch_map & 0x01))
650                         continue;
651
652                 rxv = &cpsw->rxv[ch];
653                 if (unlikely(rxv->budget > budget - num_rx))
654                         cur_budget = budget - num_rx;
655                 else
656                         cur_budget = rxv->budget;
657
658                 num_rx += cpdma_chan_process(rxv->ch, cur_budget);
659                 if (num_rx >= budget)
660                         break;
661         }
662
663         if (num_rx < budget) {
664                 napi_complete_done(napi_rx, num_rx);
665                 writel(0xff, &cpsw->wr_regs->rx_en);
666         }
667
668         return num_rx;
669 }
670
671 static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
672 {
673         struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
674         int num_rx;
675
676         num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
677         if (num_rx < budget) {
678                 napi_complete_done(napi_rx, num_rx);
679                 writel(0xff, &cpsw->wr_regs->rx_en);
680                 if (cpsw->rx_irq_disabled) {
681                         cpsw->rx_irq_disabled = false;
682                         enable_irq(cpsw->irqs_table[0]);
683                 }
684         }
685
686         return num_rx;
687 }
688
689 static inline void soft_reset(const char *module, void __iomem *reg)
690 {
691         unsigned long timeout = jiffies + HZ;
692
693         writel_relaxed(1, reg);
694         do {
695                 cpu_relax();
696         } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
697
698         WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
699 }
700
701 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
702                                struct cpsw_priv *priv)
703 {
704         slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
705         slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
706 }
707
708 static bool cpsw_shp_is_off(struct cpsw_priv *priv)
709 {
710         struct cpsw_common *cpsw = priv->cpsw;
711         struct cpsw_slave *slave;
712         u32 shift, mask, val;
713
714         val = readl_relaxed(&cpsw->regs->ptype);
715
716         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
717         shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
718         mask = 7 << shift;
719         val = val & mask;
720
721         return !val;
722 }
723
724 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
725 {
726         struct cpsw_common *cpsw = priv->cpsw;
727         struct cpsw_slave *slave;
728         u32 shift, mask, val;
729
730         val = readl_relaxed(&cpsw->regs->ptype);
731
732         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
733         shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
734         mask = (1 << --fifo) << shift;
735         val = on ? val | mask : val & ~mask;
736
737         writel_relaxed(val, &cpsw->regs->ptype);
738 }
739
740 static void _cpsw_adjust_link(struct cpsw_slave *slave,
741                               struct cpsw_priv *priv, bool *link)
742 {
743         struct phy_device       *phy = slave->phy;
744         u32                     mac_control = 0;
745         u32                     slave_port;
746         struct cpsw_common *cpsw = priv->cpsw;
747
748         if (!phy)
749                 return;
750
751         slave_port = cpsw_get_slave_port(slave->slave_num);
752
753         if (phy->link) {
754                 mac_control = CPSW_SL_CTL_GMII_EN;
755
756                 if (phy->speed == 1000)
757                         mac_control |= CPSW_SL_CTL_GIG;
758                 if (phy->duplex)
759                         mac_control |= CPSW_SL_CTL_FULLDUPLEX;
760
761                 /* set speed_in input in case RMII mode is used in 100Mbps */
762                 if (phy->speed == 100)
763                         mac_control |= CPSW_SL_CTL_IFCTL_A;
764                 /* in band mode only works in 10Mbps RGMII mode */
765                 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
766                         mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
767
768                 if (priv->rx_pause)
769                         mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
770
771                 if (priv->tx_pause)
772                         mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
773
774                 if (mac_control != slave->mac_control)
775                         cpsw_sl_ctl_set(slave->mac_sl, mac_control);
776
777                 /* enable forwarding */
778                 cpsw_ale_control_set(cpsw->ale, slave_port,
779                                      ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
780
781                 *link = true;
782
783                 if (priv->shp_cfg_speed &&
784                     priv->shp_cfg_speed != slave->phy->speed &&
785                     !cpsw_shp_is_off(priv))
786                         dev_warn(priv->dev,
787                                  "Speed was changed, CBS shaper speeds are changed!");
788         } else {
789                 mac_control = 0;
790                 /* disable forwarding */
791                 cpsw_ale_control_set(cpsw->ale, slave_port,
792                                      ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
793
794                 cpsw_sl_wait_for_idle(slave->mac_sl, 100);
795
796                 cpsw_sl_ctl_reset(slave->mac_sl);
797         }
798
799         if (mac_control != slave->mac_control)
800                 phy_print_status(phy);
801
802         slave->mac_control = mac_control;
803 }
804
805 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
806 {
807         int i, speed;
808
809         for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
810                 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
811                         speed += cpsw->slaves[i].phy->speed;
812
813         return speed;
814 }
815
816 static int cpsw_need_resplit(struct cpsw_common *cpsw)
817 {
818         int i, rlim_ch_num;
819         int speed, ch_rate;
820
821         /* re-split resources only in case speed was changed */
822         speed = cpsw_get_common_speed(cpsw);
823         if (speed == cpsw->speed || !speed)
824                 return 0;
825
826         cpsw->speed = speed;
827
828         for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
829                 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
830                 if (!ch_rate)
831                         break;
832
833                 rlim_ch_num++;
834         }
835
836         /* cases not dependent on speed */
837         if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
838                 return 0;
839
840         return 1;
841 }
842
843 static void cpsw_adjust_link(struct net_device *ndev)
844 {
845         struct cpsw_priv        *priv = netdev_priv(ndev);
846         struct cpsw_common      *cpsw = priv->cpsw;
847         bool                    link = false;
848
849         for_each_slave(priv, _cpsw_adjust_link, priv, &link);
850
851         if (link) {
852                 if (cpsw_need_resplit(cpsw))
853                         cpsw_split_res(cpsw);
854
855                 netif_carrier_on(ndev);
856                 if (netif_running(ndev))
857                         netif_tx_wake_all_queues(ndev);
858         } else {
859                 netif_carrier_off(ndev);
860                 netif_tx_stop_all_queues(ndev);
861         }
862 }
863
864 static inline void cpsw_add_dual_emac_def_ale_entries(
865                 struct cpsw_priv *priv, struct cpsw_slave *slave,
866                 u32 slave_port)
867 {
868         struct cpsw_common *cpsw = priv->cpsw;
869         u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
870
871         if (cpsw->version == CPSW_VERSION_1)
872                 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
873         else
874                 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
875         cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
876                           port_mask, port_mask, 0);
877         cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
878                            ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
879         cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
880                            HOST_PORT_NUM, ALE_VLAN |
881                            ALE_SECURE, slave->port_vlan);
882         cpsw_ale_control_set(cpsw->ale, slave_port,
883                              ALE_PORT_DROP_UNKNOWN_VLAN, 1);
884 }
885
886 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
887 {
888         u32 slave_port;
889         struct phy_device *phy;
890         struct cpsw_common *cpsw = priv->cpsw;
891
892         cpsw_sl_reset(slave->mac_sl, 100);
893         cpsw_sl_ctl_reset(slave->mac_sl);
894
895         /* setup priority mapping */
896         cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
897                           RX_PRIORITY_MAPPING);
898
899         switch (cpsw->version) {
900         case CPSW_VERSION_1:
901                 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
902                 /* Increase RX FIFO size to 5 for supporting fullduplex
903                  * flow control mode
904                  */
905                 slave_write(slave,
906                             (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
907                             CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
908                 break;
909         case CPSW_VERSION_2:
910         case CPSW_VERSION_3:
911         case CPSW_VERSION_4:
912                 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
913                 /* Increase RX FIFO size to 5 for supporting fullduplex
914                  * flow control mode
915                  */
916                 slave_write(slave,
917                             (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
918                             CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
919                 break;
920         }
921
922         /* setup max packet size, and mac address */
923         cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
924                           cpsw->rx_packet_max);
925         cpsw_set_slave_mac(slave, priv);
926
927         slave->mac_control = 0; /* no link yet */
928
929         slave_port = cpsw_get_slave_port(slave->slave_num);
930
931         if (cpsw->data.dual_emac)
932                 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
933         else
934                 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
935                                    1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
936
937         if (slave->data->phy_node) {
938                 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
939                                  &cpsw_adjust_link, 0, slave->data->phy_if);
940                 if (!phy) {
941                         dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
942                                 slave->data->phy_node,
943                                 slave->slave_num);
944                         return;
945                 }
946         } else {
947                 phy = phy_connect(priv->ndev, slave->data->phy_id,
948                                  &cpsw_adjust_link, slave->data->phy_if);
949                 if (IS_ERR(phy)) {
950                         dev_err(priv->dev,
951                                 "phy \"%s\" not found on slave %d, err %ld\n",
952                                 slave->data->phy_id, slave->slave_num,
953                                 PTR_ERR(phy));
954                         return;
955                 }
956         }
957
958         slave->phy = phy;
959
960         phy_attached_info(slave->phy);
961
962         phy_start(slave->phy);
963
964         /* Configure GMII_SEL register */
965         if (!IS_ERR(slave->data->ifphy))
966                 phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
967                                  slave->data->phy_if);
968         else
969                 cpsw_phy_sel(cpsw->dev, slave->phy->interface,
970                              slave->slave_num);
971 }
972
973 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
974 {
975         struct cpsw_common *cpsw = priv->cpsw;
976         const int vlan = cpsw->data.default_vlan;
977         u32 reg;
978         int i;
979         int unreg_mcast_mask;
980
981         reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
982                CPSW2_PORT_VLAN;
983
984         writel(vlan, &cpsw->host_port_regs->port_vlan);
985
986         for (i = 0; i < cpsw->data.slaves; i++)
987                 slave_write(cpsw->slaves + i, vlan, reg);
988
989         if (priv->ndev->flags & IFF_ALLMULTI)
990                 unreg_mcast_mask = ALE_ALL_PORTS;
991         else
992                 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
993
994         cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
995                           ALE_ALL_PORTS, ALE_ALL_PORTS,
996                           unreg_mcast_mask);
997 }
998
999 static void cpsw_init_host_port(struct cpsw_priv *priv)
1000 {
1001         u32 fifo_mode;
1002         u32 control_reg;
1003         struct cpsw_common *cpsw = priv->cpsw;
1004
1005         /* soft reset the controller and initialize ale */
1006         soft_reset("cpsw", &cpsw->regs->soft_reset);
1007         cpsw_ale_start(cpsw->ale);
1008
1009         /* switch to vlan unaware mode */
1010         cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
1011                              CPSW_ALE_VLAN_AWARE);
1012         control_reg = readl(&cpsw->regs->control);
1013         control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
1014         writel(control_reg, &cpsw->regs->control);
1015         fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1016                      CPSW_FIFO_NORMAL_MODE;
1017         writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
1018
1019         /* setup host port priority mapping */
1020         writel_relaxed(CPDMA_TX_PRIORITY_MAP,
1021                        &cpsw->host_port_regs->cpdma_tx_pri_map);
1022         writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
1023
1024         cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
1025                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1026
1027         if (!cpsw->data.dual_emac) {
1028                 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1029                                    0, 0);
1030                 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1031                                    ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
1032         }
1033 }
1034
1035 int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1036 {
1037         struct cpsw_common *cpsw = priv->cpsw;
1038         struct sk_buff *skb;
1039         int ch_buf_num;
1040         int ch, i, ret;
1041
1042         for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1043                 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1044                 for (i = 0; i < ch_buf_num; i++) {
1045                         skb = __netdev_alloc_skb_ip_align(priv->ndev,
1046                                                           cpsw->rx_packet_max,
1047                                                           GFP_KERNEL);
1048                         if (!skb) {
1049                                 cpsw_err(priv, ifup, "cannot allocate skb\n");
1050                                 return -ENOMEM;
1051                         }
1052
1053                         skb_set_queue_mapping(skb, ch);
1054                         ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
1055                                                 skb->data, skb_tailroom(skb),
1056                                                 0);
1057                         if (ret < 0) {
1058                                 cpsw_err(priv, ifup,
1059                                          "cannot submit skb to channel %d rx, error %d\n",
1060                                          ch, ret);
1061                                 kfree_skb(skb);
1062                                 return ret;
1063                         }
1064                         kmemleak_not_leak(skb);
1065                 }
1066
1067                 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1068                           ch, ch_buf_num);
1069         }
1070
1071         return 0;
1072 }
1073
1074 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
1075 {
1076         u32 slave_port;
1077
1078         slave_port = cpsw_get_slave_port(slave->slave_num);
1079
1080         if (!slave->phy)
1081                 return;
1082         phy_stop(slave->phy);
1083         phy_disconnect(slave->phy);
1084         slave->phy = NULL;
1085         cpsw_ale_control_set(cpsw->ale, slave_port,
1086                              ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1087         cpsw_sl_reset(slave->mac_sl, 100);
1088         cpsw_sl_ctl_reset(slave->mac_sl);
1089 }
1090
1091 static int cpsw_tc_to_fifo(int tc, int num_tc)
1092 {
1093         if (tc == num_tc - 1)
1094                 return 0;
1095
1096         return CPSW_FIFO_SHAPERS_NUM - tc;
1097 }
1098
1099 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
1100 {
1101         struct cpsw_common *cpsw = priv->cpsw;
1102         u32 val = 0, send_pct, shift;
1103         struct cpsw_slave *slave;
1104         int pct = 0, i;
1105
1106         if (bw > priv->shp_cfg_speed * 1000)
1107                 goto err;
1108
1109         /* shaping has to stay enabled for highest fifos linearly
1110          * and fifo bw no more then interface can allow
1111          */
1112         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1113         send_pct = slave_read(slave, SEND_PERCENT);
1114         for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
1115                 if (!bw) {
1116                         if (i >= fifo || !priv->fifo_bw[i])
1117                                 continue;
1118
1119                         dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
1120                         continue;
1121                 }
1122
1123                 if (!priv->fifo_bw[i] && i > fifo) {
1124                         dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
1125                         return -EINVAL;
1126                 }
1127
1128                 shift = (i - 1) * 8;
1129                 if (i == fifo) {
1130                         send_pct &= ~(CPSW_PCT_MASK << shift);
1131                         val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
1132                         if (!val)
1133                                 val = 1;
1134
1135                         send_pct |= val << shift;
1136                         pct += val;
1137                         continue;
1138                 }
1139
1140                 if (priv->fifo_bw[i])
1141                         pct += (send_pct >> shift) & CPSW_PCT_MASK;
1142         }
1143
1144         if (pct >= 100)
1145                 goto err;
1146
1147         slave_write(slave, send_pct, SEND_PERCENT);
1148         priv->fifo_bw[fifo] = bw;
1149
1150         dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
1151                  DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
1152
1153         return 0;
1154 err:
1155         dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
1156         return -EINVAL;
1157 }
1158
1159 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
1160 {
1161         struct cpsw_common *cpsw = priv->cpsw;
1162         struct cpsw_slave *slave;
1163         u32 tx_in_ctl_rg, val;
1164         int ret;
1165
1166         ret = cpsw_set_fifo_bw(priv, fifo, bw);
1167         if (ret)
1168                 return ret;
1169
1170         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1171         tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
1172                        CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
1173
1174         if (!bw)
1175                 cpsw_fifo_shp_on(priv, fifo, bw);
1176
1177         val = slave_read(slave, tx_in_ctl_rg);
1178         if (cpsw_shp_is_off(priv)) {
1179                 /* disable FIFOs rate limited queues */
1180                 val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
1181
1182                 /* set type of FIFO queues to normal priority mode */
1183                 val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
1184
1185                 /* set type of FIFO queues to be rate limited */
1186                 if (bw)
1187                         val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
1188                 else
1189                         priv->shp_cfg_speed = 0;
1190         }
1191
1192         /* toggle a FIFO rate limited queue */
1193         if (bw)
1194                 val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1195         else
1196                 val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1197         slave_write(slave, val, tx_in_ctl_rg);
1198
1199         /* FIFO transmit shape enable */
1200         cpsw_fifo_shp_on(priv, fifo, bw);
1201         return 0;
1202 }
1203
1204 /* Defaults:
1205  * class A - prio 3
1206  * class B - prio 2
1207  * shaping for class A should be set first
1208  */
1209 static int cpsw_set_cbs(struct net_device *ndev,
1210                         struct tc_cbs_qopt_offload *qopt)
1211 {
1212         struct cpsw_priv *priv = netdev_priv(ndev);
1213         struct cpsw_common *cpsw = priv->cpsw;
1214         struct cpsw_slave *slave;
1215         int prev_speed = 0;
1216         int tc, ret, fifo;
1217         u32 bw = 0;
1218
1219         tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
1220
1221         /* enable channels in backward order, as highest FIFOs must be rate
1222          * limited first and for compliance with CPDMA rate limited channels
1223          * that also used in bacward order. FIFO0 cannot be rate limited.
1224          */
1225         fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
1226         if (!fifo) {
1227                 dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
1228                 return -EINVAL;
1229         }
1230
1231         /* do nothing, it's disabled anyway */
1232         if (!qopt->enable && !priv->fifo_bw[fifo])
1233                 return 0;
1234
1235         /* shapers can be set if link speed is known */
1236         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1237         if (slave->phy && slave->phy->link) {
1238                 if (priv->shp_cfg_speed &&
1239                     priv->shp_cfg_speed != slave->phy->speed)
1240                         prev_speed = priv->shp_cfg_speed;
1241
1242                 priv->shp_cfg_speed = slave->phy->speed;
1243         }
1244
1245         if (!priv->shp_cfg_speed) {
1246                 dev_err(priv->dev, "Link speed is not known");
1247                 return -1;
1248         }
1249
1250         ret = pm_runtime_get_sync(cpsw->dev);
1251         if (ret < 0) {
1252                 pm_runtime_put_noidle(cpsw->dev);
1253                 return ret;
1254         }
1255
1256         bw = qopt->enable ? qopt->idleslope : 0;
1257         ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
1258         if (ret) {
1259                 priv->shp_cfg_speed = prev_speed;
1260                 prev_speed = 0;
1261         }
1262
1263         if (bw && prev_speed)
1264                 dev_warn(priv->dev,
1265                          "Speed was changed, CBS shaper speeds are changed!");
1266
1267         pm_runtime_put_sync(cpsw->dev);
1268         return ret;
1269 }
1270
1271 static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1272 {
1273         int fifo, bw;
1274
1275         for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1276                 bw = priv->fifo_bw[fifo];
1277                 if (!bw)
1278                         continue;
1279
1280                 cpsw_set_fifo_rlimit(priv, fifo, bw);
1281         }
1282 }
1283
1284 static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1285 {
1286         struct cpsw_common *cpsw = priv->cpsw;
1287         u32 tx_prio_map = 0;
1288         int i, tc, fifo;
1289         u32 tx_prio_rg;
1290
1291         if (!priv->mqprio_hw)
1292                 return;
1293
1294         for (i = 0; i < 8; i++) {
1295                 tc = netdev_get_prio_tc_map(priv->ndev, i);
1296                 fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1297                 tx_prio_map |= fifo << (4 * i);
1298         }
1299
1300         tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1301                      CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1302
1303         slave_write(slave, tx_prio_map, tx_prio_rg);
1304 }
1305
1306 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
1307 {
1308         struct cpsw_priv *priv = arg;
1309
1310         if (!vdev)
1311                 return 0;
1312
1313         cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
1314         return 0;
1315 }
1316
1317 /* restore resources after port reset */
1318 static void cpsw_restore(struct cpsw_priv *priv)
1319 {
1320         /* restore vlan configurations */
1321         vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
1322
1323         /* restore MQPRIO offload */
1324         for_each_slave(priv, cpsw_mqprio_resume, priv);
1325
1326         /* restore CBS offload */
1327         for_each_slave(priv, cpsw_cbs_resume, priv);
1328 }
1329
1330 static int cpsw_ndo_open(struct net_device *ndev)
1331 {
1332         struct cpsw_priv *priv = netdev_priv(ndev);
1333         struct cpsw_common *cpsw = priv->cpsw;
1334         int ret;
1335         u32 reg;
1336
1337         ret = pm_runtime_get_sync(cpsw->dev);
1338         if (ret < 0) {
1339                 pm_runtime_put_noidle(cpsw->dev);
1340                 return ret;
1341         }
1342
1343         netif_carrier_off(ndev);
1344
1345         /* Notify the stack of the actual queue counts. */
1346         ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
1347         if (ret) {
1348                 dev_err(priv->dev, "cannot set real number of tx queues\n");
1349                 goto err_cleanup;
1350         }
1351
1352         ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
1353         if (ret) {
1354                 dev_err(priv->dev, "cannot set real number of rx queues\n");
1355                 goto err_cleanup;
1356         }
1357
1358         reg = cpsw->version;
1359
1360         dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
1361                  CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
1362                  CPSW_RTL_VERSION(reg));
1363
1364         /* Initialize host and slave ports */
1365         if (!cpsw->usage_count)
1366                 cpsw_init_host_port(priv);
1367         for_each_slave(priv, cpsw_slave_open, priv);
1368
1369         /* Add default VLAN */
1370         if (!cpsw->data.dual_emac)
1371                 cpsw_add_default_vlan(priv);
1372         else
1373                 cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
1374                                   ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
1375
1376         /* initialize shared resources for every ndev */
1377         if (!cpsw->usage_count) {
1378                 /* disable priority elevation */
1379                 writel_relaxed(0, &cpsw->regs->ptype);
1380
1381                 /* enable statistics collection only on all ports */
1382                 writel_relaxed(0x7, &cpsw->regs->stat_port_en);
1383
1384                 /* Enable internal fifo flow control */
1385                 writel(0x7, &cpsw->regs->flow_control);
1386
1387                 napi_enable(&cpsw->napi_rx);
1388                 napi_enable(&cpsw->napi_tx);
1389
1390                 if (cpsw->tx_irq_disabled) {
1391                         cpsw->tx_irq_disabled = false;
1392                         enable_irq(cpsw->irqs_table[1]);
1393                 }
1394
1395                 if (cpsw->rx_irq_disabled) {
1396                         cpsw->rx_irq_disabled = false;
1397                         enable_irq(cpsw->irqs_table[0]);
1398                 }
1399
1400                 ret = cpsw_fill_rx_channels(priv);
1401                 if (ret < 0)
1402                         goto err_cleanup;
1403
1404                 if (cpts_register(cpsw->cpts))
1405                         dev_err(priv->dev, "error registering cpts device\n");
1406
1407         }
1408
1409         cpsw_restore(priv);
1410
1411         /* Enable Interrupt pacing if configured */
1412         if (cpsw->coal_intvl != 0) {
1413                 struct ethtool_coalesce coal;
1414
1415                 coal.rx_coalesce_usecs = cpsw->coal_intvl;
1416                 cpsw_set_coalesce(ndev, &coal);
1417         }
1418
1419         cpdma_ctlr_start(cpsw->dma);
1420         cpsw_intr_enable(cpsw);
1421         cpsw->usage_count++;
1422
1423         return 0;
1424
1425 err_cleanup:
1426         if (!cpsw->usage_count) {
1427                 cpdma_ctlr_stop(cpsw->dma);
1428                 for_each_slave(priv, cpsw_slave_stop, cpsw);
1429         }
1430
1431         pm_runtime_put_sync(cpsw->dev);
1432         netif_carrier_off(priv->ndev);
1433         return ret;
1434 }
1435
1436 static int cpsw_ndo_stop(struct net_device *ndev)
1437 {
1438         struct cpsw_priv *priv = netdev_priv(ndev);
1439         struct cpsw_common *cpsw = priv->cpsw;
1440
1441         cpsw_info(priv, ifdown, "shutting down cpsw device\n");
1442         __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
1443         netif_tx_stop_all_queues(priv->ndev);
1444         netif_carrier_off(priv->ndev);
1445
1446         if (cpsw->usage_count <= 1) {
1447                 napi_disable(&cpsw->napi_rx);
1448                 napi_disable(&cpsw->napi_tx);
1449                 cpts_unregister(cpsw->cpts);
1450                 cpsw_intr_disable(cpsw);
1451                 cpdma_ctlr_stop(cpsw->dma);
1452                 cpsw_ale_stop(cpsw->ale);
1453         }
1454         for_each_slave(priv, cpsw_slave_stop, cpsw);
1455
1456         if (cpsw_need_resplit(cpsw))
1457                 cpsw_split_res(cpsw);
1458
1459         cpsw->usage_count--;
1460         pm_runtime_put_sync(cpsw->dev);
1461         return 0;
1462 }
1463
1464 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1465                                        struct net_device *ndev)
1466 {
1467         struct cpsw_priv *priv = netdev_priv(ndev);
1468         struct cpsw_common *cpsw = priv->cpsw;
1469         struct cpts *cpts = cpsw->cpts;
1470         struct netdev_queue *txq;
1471         struct cpdma_chan *txch;
1472         int ret, q_idx;
1473
1474         if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
1475                 cpsw_err(priv, tx_err, "packet pad failed\n");
1476                 ndev->stats.tx_dropped++;
1477                 return NET_XMIT_DROP;
1478         }
1479
1480         if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
1481             priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
1482                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1483
1484         q_idx = skb_get_queue_mapping(skb);
1485         if (q_idx >= cpsw->tx_ch_num)
1486                 q_idx = q_idx % cpsw->tx_ch_num;
1487
1488         txch = cpsw->txv[q_idx].ch;
1489         txq = netdev_get_tx_queue(ndev, q_idx);
1490         skb_tx_timestamp(skb);
1491         ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
1492                                 priv->emac_port + cpsw->data.dual_emac);
1493         if (unlikely(ret != 0)) {
1494                 cpsw_err(priv, tx_err, "desc submit failed\n");
1495                 goto fail;
1496         }
1497
1498         /* If there is no more tx desc left free then we need to
1499          * tell the kernel to stop sending us tx frames.
1500          */
1501         if (unlikely(!cpdma_check_free_tx_desc(txch))) {
1502                 netif_tx_stop_queue(txq);
1503
1504                 /* Barrier, so that stop_queue visible to other cpus */
1505                 smp_mb__after_atomic();
1506
1507                 if (cpdma_check_free_tx_desc(txch))
1508                         netif_tx_wake_queue(txq);
1509         }
1510
1511         return NETDEV_TX_OK;
1512 fail:
1513         ndev->stats.tx_dropped++;
1514         netif_tx_stop_queue(txq);
1515
1516         /* Barrier, so that stop_queue visible to other cpus */
1517         smp_mb__after_atomic();
1518
1519         if (cpdma_check_free_tx_desc(txch))
1520                 netif_tx_wake_queue(txq);
1521
1522         return NETDEV_TX_BUSY;
1523 }
1524
1525 #if IS_ENABLED(CONFIG_TI_CPTS)
1526
1527 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
1528 {
1529         struct cpsw_common *cpsw = priv->cpsw;
1530         struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
1531         u32 ts_en, seq_id;
1532
1533         if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
1534                 slave_write(slave, 0, CPSW1_TS_CTL);
1535                 return;
1536         }
1537
1538         seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
1539         ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
1540
1541         if (priv->tx_ts_enabled)
1542                 ts_en |= CPSW_V1_TS_TX_EN;
1543
1544         if (priv->rx_ts_enabled)
1545                 ts_en |= CPSW_V1_TS_RX_EN;
1546
1547         slave_write(slave, ts_en, CPSW1_TS_CTL);
1548         slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
1549 }
1550
1551 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
1552 {
1553         struct cpsw_slave *slave;
1554         struct cpsw_common *cpsw = priv->cpsw;
1555         u32 ctrl, mtype;
1556
1557         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1558
1559         ctrl = slave_read(slave, CPSW2_CONTROL);
1560         switch (cpsw->version) {
1561         case CPSW_VERSION_2:
1562                 ctrl &= ~CTRL_V2_ALL_TS_MASK;
1563
1564                 if (priv->tx_ts_enabled)
1565                         ctrl |= CTRL_V2_TX_TS_BITS;
1566
1567                 if (priv->rx_ts_enabled)
1568                         ctrl |= CTRL_V2_RX_TS_BITS;
1569                 break;
1570         case CPSW_VERSION_3:
1571         default:
1572                 ctrl &= ~CTRL_V3_ALL_TS_MASK;
1573
1574                 if (priv->tx_ts_enabled)
1575                         ctrl |= CTRL_V3_TX_TS_BITS;
1576
1577                 if (priv->rx_ts_enabled)
1578                         ctrl |= CTRL_V3_RX_TS_BITS;
1579                 break;
1580         }
1581
1582         mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
1583
1584         slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
1585         slave_write(slave, ctrl, CPSW2_CONTROL);
1586         writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
1587         writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
1588 }
1589
1590 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1591 {
1592         struct cpsw_priv *priv = netdev_priv(dev);
1593         struct hwtstamp_config cfg;
1594         struct cpsw_common *cpsw = priv->cpsw;
1595
1596         if (cpsw->version != CPSW_VERSION_1 &&
1597             cpsw->version != CPSW_VERSION_2 &&
1598             cpsw->version != CPSW_VERSION_3)
1599                 return -EOPNOTSUPP;
1600
1601         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1602                 return -EFAULT;
1603
1604         /* reserved for future extensions */
1605         if (cfg.flags)
1606                 return -EINVAL;
1607
1608         if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
1609                 return -ERANGE;
1610
1611         switch (cfg.rx_filter) {
1612         case HWTSTAMP_FILTER_NONE:
1613                 priv->rx_ts_enabled = 0;
1614                 break;
1615         case HWTSTAMP_FILTER_ALL:
1616         case HWTSTAMP_FILTER_NTP_ALL:
1617                 return -ERANGE;
1618         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1619         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1620         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1621                 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
1622                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
1623                 break;
1624         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1625         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1626         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1627         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1628         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1629         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1630         case HWTSTAMP_FILTER_PTP_V2_EVENT:
1631         case HWTSTAMP_FILTER_PTP_V2_SYNC:
1632         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1633                 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
1634                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1635                 break;
1636         default:
1637                 return -ERANGE;
1638         }
1639
1640         priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
1641
1642         switch (cpsw->version) {
1643         case CPSW_VERSION_1:
1644                 cpsw_hwtstamp_v1(priv);
1645                 break;
1646         case CPSW_VERSION_2:
1647         case CPSW_VERSION_3:
1648                 cpsw_hwtstamp_v2(priv);
1649                 break;
1650         default:
1651                 WARN_ON(1);
1652         }
1653
1654         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1655 }
1656
1657 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1658 {
1659         struct cpsw_common *cpsw = ndev_to_cpsw(dev);
1660         struct cpsw_priv *priv = netdev_priv(dev);
1661         struct hwtstamp_config cfg;
1662
1663         if (cpsw->version != CPSW_VERSION_1 &&
1664             cpsw->version != CPSW_VERSION_2 &&
1665             cpsw->version != CPSW_VERSION_3)
1666                 return -EOPNOTSUPP;
1667
1668         cfg.flags = 0;
1669         cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1670         cfg.rx_filter = priv->rx_ts_enabled;
1671
1672         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1673 }
1674 #else
1675 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1676 {
1677         return -EOPNOTSUPP;
1678 }
1679
1680 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1681 {
1682         return -EOPNOTSUPP;
1683 }
1684 #endif /*CONFIG_TI_CPTS*/
1685
1686 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1687 {
1688         struct cpsw_priv *priv = netdev_priv(dev);
1689         struct cpsw_common *cpsw = priv->cpsw;
1690         int slave_no = cpsw_slave_index(cpsw, priv);
1691
1692         if (!netif_running(dev))
1693                 return -EINVAL;
1694
1695         switch (cmd) {
1696         case SIOCSHWTSTAMP:
1697                 return cpsw_hwtstamp_set(dev, req);
1698         case SIOCGHWTSTAMP:
1699                 return cpsw_hwtstamp_get(dev, req);
1700         }
1701
1702         if (!cpsw->slaves[slave_no].phy)
1703                 return -EOPNOTSUPP;
1704         return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
1705 }
1706
1707 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1708 {
1709         struct cpsw_priv *priv = netdev_priv(ndev);
1710         struct cpsw_common *cpsw = priv->cpsw;
1711         int ch;
1712
1713         cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
1714         ndev->stats.tx_errors++;
1715         cpsw_intr_disable(cpsw);
1716         for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
1717                 cpdma_chan_stop(cpsw->txv[ch].ch);
1718                 cpdma_chan_start(cpsw->txv[ch].ch);
1719         }
1720
1721         cpsw_intr_enable(cpsw);
1722         netif_trans_update(ndev);
1723         netif_tx_wake_all_queues(ndev);
1724 }
1725
1726 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1727 {
1728         struct cpsw_priv *priv = netdev_priv(ndev);
1729         struct sockaddr *addr = (struct sockaddr *)p;
1730         struct cpsw_common *cpsw = priv->cpsw;
1731         int flags = 0;
1732         u16 vid = 0;
1733         int ret;
1734
1735         if (!is_valid_ether_addr(addr->sa_data))
1736                 return -EADDRNOTAVAIL;
1737
1738         ret = pm_runtime_get_sync(cpsw->dev);
1739         if (ret < 0) {
1740                 pm_runtime_put_noidle(cpsw->dev);
1741                 return ret;
1742         }
1743
1744         if (cpsw->data.dual_emac) {
1745                 vid = cpsw->slaves[priv->emac_port].port_vlan;
1746                 flags = ALE_VLAN;
1747         }
1748
1749         cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1750                            flags, vid);
1751         cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
1752                            flags, vid);
1753
1754         memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
1755         memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1756         for_each_slave(priv, cpsw_set_slave_mac, priv);
1757
1758         pm_runtime_put(cpsw->dev);
1759
1760         return 0;
1761 }
1762
1763 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
1764                                 unsigned short vid)
1765 {
1766         int ret;
1767         int unreg_mcast_mask = 0;
1768         int mcast_mask;
1769         u32 port_mask;
1770         struct cpsw_common *cpsw = priv->cpsw;
1771
1772         if (cpsw->data.dual_emac) {
1773                 port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
1774
1775                 mcast_mask = ALE_PORT_HOST;
1776                 if (priv->ndev->flags & IFF_ALLMULTI)
1777                         unreg_mcast_mask = mcast_mask;
1778         } else {
1779                 port_mask = ALE_ALL_PORTS;
1780                 mcast_mask = port_mask;
1781
1782                 if (priv->ndev->flags & IFF_ALLMULTI)
1783                         unreg_mcast_mask = ALE_ALL_PORTS;
1784                 else
1785                         unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1786         }
1787
1788         ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
1789                                 unreg_mcast_mask);
1790         if (ret != 0)
1791                 return ret;
1792
1793         ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1794                                  HOST_PORT_NUM, ALE_VLAN, vid);
1795         if (ret != 0)
1796                 goto clean_vid;
1797
1798         ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1799                                  mcast_mask, ALE_VLAN, vid, 0);
1800         if (ret != 0)
1801                 goto clean_vlan_ucast;
1802         return 0;
1803
1804 clean_vlan_ucast:
1805         cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1806                            HOST_PORT_NUM, ALE_VLAN, vid);
1807 clean_vid:
1808         cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1809         return ret;
1810 }
1811
1812 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1813                                     __be16 proto, u16 vid)
1814 {
1815         struct cpsw_priv *priv = netdev_priv(ndev);
1816         struct cpsw_common *cpsw = priv->cpsw;
1817         int ret;
1818
1819         if (vid == cpsw->data.default_vlan)
1820                 return 0;
1821
1822         ret = pm_runtime_get_sync(cpsw->dev);
1823         if (ret < 0) {
1824                 pm_runtime_put_noidle(cpsw->dev);
1825                 return ret;
1826         }
1827
1828         if (cpsw->data.dual_emac) {
1829                 /* In dual EMAC, reserved VLAN id should not be used for
1830                  * creating VLAN interfaces as this can break the dual
1831                  * EMAC port separation
1832                  */
1833                 int i;
1834
1835                 for (i = 0; i < cpsw->data.slaves; i++) {
1836                         if (vid == cpsw->slaves[i].port_vlan) {
1837                                 ret = -EINVAL;
1838                                 goto err;
1839                         }
1840                 }
1841         }
1842
1843         dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1844         ret = cpsw_add_vlan_ale_entry(priv, vid);
1845 err:
1846         pm_runtime_put(cpsw->dev);
1847         return ret;
1848 }
1849
1850 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1851                                      __be16 proto, u16 vid)
1852 {
1853         struct cpsw_priv *priv = netdev_priv(ndev);
1854         struct cpsw_common *cpsw = priv->cpsw;
1855         int ret;
1856
1857         if (vid == cpsw->data.default_vlan)
1858                 return 0;
1859
1860         ret = pm_runtime_get_sync(cpsw->dev);
1861         if (ret < 0) {
1862                 pm_runtime_put_noidle(cpsw->dev);
1863                 return ret;
1864         }
1865
1866         if (cpsw->data.dual_emac) {
1867                 int i;
1868
1869                 for (i = 0; i < cpsw->data.slaves; i++) {
1870                         if (vid == cpsw->slaves[i].port_vlan)
1871                                 goto err;
1872                 }
1873         }
1874
1875         dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1876         ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1877         ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1878                                   HOST_PORT_NUM, ALE_VLAN, vid);
1879         ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
1880                                   0, ALE_VLAN, vid);
1881         ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
1882 err:
1883         pm_runtime_put(cpsw->dev);
1884         return ret;
1885 }
1886
1887 static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
1888 {
1889         struct cpsw_priv *priv = netdev_priv(ndev);
1890         struct cpsw_common *cpsw = priv->cpsw;
1891         struct cpsw_slave *slave;
1892         u32 min_rate;
1893         u32 ch_rate;
1894         int i, ret;
1895
1896         ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
1897         if (ch_rate == rate)
1898                 return 0;
1899
1900         ch_rate = rate * 1000;
1901         min_rate = cpdma_chan_get_min_rate(cpsw->dma);
1902         if ((ch_rate < min_rate && ch_rate)) {
1903                 dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
1904                         min_rate);
1905                 return -EINVAL;
1906         }
1907
1908         if (rate > cpsw->speed) {
1909                 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
1910                 return -EINVAL;
1911         }
1912
1913         ret = pm_runtime_get_sync(cpsw->dev);
1914         if (ret < 0) {
1915                 pm_runtime_put_noidle(cpsw->dev);
1916                 return ret;
1917         }
1918
1919         ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
1920         pm_runtime_put(cpsw->dev);
1921
1922         if (ret)
1923                 return ret;
1924
1925         /* update rates for slaves tx queues */
1926         for (i = 0; i < cpsw->data.slaves; i++) {
1927                 slave = &cpsw->slaves[i];
1928                 if (!slave->ndev)
1929                         continue;
1930
1931                 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
1932         }
1933
1934         cpsw_split_res(cpsw);
1935         return ret;
1936 }
1937
1938 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
1939 {
1940         struct tc_mqprio_qopt_offload *mqprio = type_data;
1941         struct cpsw_priv *priv = netdev_priv(ndev);
1942         struct cpsw_common *cpsw = priv->cpsw;
1943         int fifo, num_tc, count, offset;
1944         struct cpsw_slave *slave;
1945         u32 tx_prio_map = 0;
1946         int i, tc, ret;
1947
1948         num_tc = mqprio->qopt.num_tc;
1949         if (num_tc > CPSW_TC_NUM)
1950                 return -EINVAL;
1951
1952         if (mqprio->mode != TC_MQPRIO_MODE_DCB)
1953                 return -EINVAL;
1954
1955         ret = pm_runtime_get_sync(cpsw->dev);
1956         if (ret < 0) {
1957                 pm_runtime_put_noidle(cpsw->dev);
1958                 return ret;
1959         }
1960
1961         if (num_tc) {
1962                 for (i = 0; i < 8; i++) {
1963                         tc = mqprio->qopt.prio_tc_map[i];
1964                         fifo = cpsw_tc_to_fifo(tc, num_tc);
1965                         tx_prio_map |= fifo << (4 * i);
1966                 }
1967
1968                 netdev_set_num_tc(ndev, num_tc);
1969                 for (i = 0; i < num_tc; i++) {
1970                         count = mqprio->qopt.count[i];
1971                         offset = mqprio->qopt.offset[i];
1972                         netdev_set_tc_queue(ndev, i, count, offset);
1973                 }
1974         }
1975
1976         if (!mqprio->qopt.hw) {
1977                 /* restore default configuration */
1978                 netdev_reset_tc(ndev);
1979                 tx_prio_map = TX_PRIORITY_MAPPING;
1980         }
1981
1982         priv->mqprio_hw = mqprio->qopt.hw;
1983
1984         offset = cpsw->version == CPSW_VERSION_1 ?
1985                  CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1986
1987         slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1988         slave_write(slave, tx_prio_map, offset);
1989
1990         pm_runtime_put_sync(cpsw->dev);
1991
1992         return 0;
1993 }
1994
1995 static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1996                              void *type_data)
1997 {
1998         switch (type) {
1999         case TC_SETUP_QDISC_CBS:
2000                 return cpsw_set_cbs(ndev, type_data);
2001
2002         case TC_SETUP_QDISC_MQPRIO:
2003                 return cpsw_set_mqprio(ndev, type_data);
2004
2005         default:
2006                 return -EOPNOTSUPP;
2007         }
2008 }
2009
2010 #ifdef CONFIG_NET_POLL_CONTROLLER
2011 static void cpsw_ndo_poll_controller(struct net_device *ndev)
2012 {
2013         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2014
2015         cpsw_intr_disable(cpsw);
2016         cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
2017         cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
2018         cpsw_intr_enable(cpsw);
2019 }
2020 #endif
2021
2022 static const struct net_device_ops cpsw_netdev_ops = {
2023         .ndo_open               = cpsw_ndo_open,
2024         .ndo_stop               = cpsw_ndo_stop,
2025         .ndo_start_xmit         = cpsw_ndo_start_xmit,
2026         .ndo_set_mac_address    = cpsw_ndo_set_mac_address,
2027         .ndo_do_ioctl           = cpsw_ndo_ioctl,
2028         .ndo_validate_addr      = eth_validate_addr,
2029         .ndo_tx_timeout         = cpsw_ndo_tx_timeout,
2030         .ndo_set_rx_mode        = cpsw_ndo_set_rx_mode,
2031         .ndo_set_tx_maxrate     = cpsw_ndo_set_tx_maxrate,
2032 #ifdef CONFIG_NET_POLL_CONTROLLER
2033         .ndo_poll_controller    = cpsw_ndo_poll_controller,
2034 #endif
2035         .ndo_vlan_rx_add_vid    = cpsw_ndo_vlan_rx_add_vid,
2036         .ndo_vlan_rx_kill_vid   = cpsw_ndo_vlan_rx_kill_vid,
2037         .ndo_setup_tc           = cpsw_ndo_setup_tc,
2038 };
2039
2040 static void cpsw_get_drvinfo(struct net_device *ndev,
2041                              struct ethtool_drvinfo *info)
2042 {
2043         struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2044         struct platform_device  *pdev = to_platform_device(cpsw->dev);
2045
2046         strlcpy(info->driver, "cpsw", sizeof(info->driver));
2047         strlcpy(info->version, "1.0", sizeof(info->version));
2048         strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
2049 }
2050
2051 static int cpsw_set_pauseparam(struct net_device *ndev,
2052                                struct ethtool_pauseparam *pause)
2053 {
2054         struct cpsw_priv *priv = netdev_priv(ndev);
2055         bool link;
2056
2057         priv->rx_pause = pause->rx_pause ? true : false;
2058         priv->tx_pause = pause->tx_pause ? true : false;
2059
2060         for_each_slave(priv, _cpsw_adjust_link, priv, &link);
2061         return 0;
2062 }
2063
2064 static int cpsw_set_channels(struct net_device *ndev,
2065                              struct ethtool_channels *chs)
2066 {
2067         return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
2068 }
2069
2070 static const struct ethtool_ops cpsw_ethtool_ops = {
2071         .get_drvinfo    = cpsw_get_drvinfo,
2072         .get_msglevel   = cpsw_get_msglevel,
2073         .set_msglevel   = cpsw_set_msglevel,
2074         .get_link       = ethtool_op_get_link,
2075         .get_ts_info    = cpsw_get_ts_info,
2076         .get_coalesce   = cpsw_get_coalesce,
2077         .set_coalesce   = cpsw_set_coalesce,
2078         .get_sset_count         = cpsw_get_sset_count,
2079         .get_strings            = cpsw_get_strings,
2080         .get_ethtool_stats      = cpsw_get_ethtool_stats,
2081         .get_pauseparam         = cpsw_get_pauseparam,
2082         .set_pauseparam         = cpsw_set_pauseparam,
2083         .get_wol        = cpsw_get_wol,
2084         .set_wol        = cpsw_set_wol,
2085         .get_regs_len   = cpsw_get_regs_len,
2086         .get_regs       = cpsw_get_regs,
2087         .begin          = cpsw_ethtool_op_begin,
2088         .complete       = cpsw_ethtool_op_complete,
2089         .get_channels   = cpsw_get_channels,
2090         .set_channels   = cpsw_set_channels,
2091         .get_link_ksettings     = cpsw_get_link_ksettings,
2092         .set_link_ksettings     = cpsw_set_link_ksettings,
2093         .get_eee        = cpsw_get_eee,
2094         .set_eee        = cpsw_set_eee,
2095         .nway_reset     = cpsw_nway_reset,
2096         .get_ringparam = cpsw_get_ringparam,
2097         .set_ringparam = cpsw_set_ringparam,
2098 };
2099
2100 static int cpsw_probe_dt(struct cpsw_platform_data *data,
2101                          struct platform_device *pdev)
2102 {
2103         struct device_node *node = pdev->dev.of_node;
2104         struct device_node *slave_node;
2105         int i = 0, ret;
2106         u32 prop;
2107
2108         if (!node)
2109                 return -EINVAL;
2110
2111         if (of_property_read_u32(node, "slaves", &prop)) {
2112                 dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
2113                 return -EINVAL;
2114         }
2115         data->slaves = prop;
2116
2117         if (of_property_read_u32(node, "active_slave", &prop)) {
2118                 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
2119                 return -EINVAL;
2120         }
2121         data->active_slave = prop;
2122
2123         data->slave_data = devm_kcalloc(&pdev->dev,
2124                                         data->slaves,
2125                                         sizeof(struct cpsw_slave_data),
2126                                         GFP_KERNEL);
2127         if (!data->slave_data)
2128                 return -ENOMEM;
2129
2130         if (of_property_read_u32(node, "cpdma_channels", &prop)) {
2131                 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
2132                 return -EINVAL;
2133         }
2134         data->channels = prop;
2135
2136         if (of_property_read_u32(node, "ale_entries", &prop)) {
2137                 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
2138                 return -EINVAL;
2139         }
2140         data->ale_entries = prop;
2141
2142         if (of_property_read_u32(node, "bd_ram_size", &prop)) {
2143                 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
2144                 return -EINVAL;
2145         }
2146         data->bd_ram_size = prop;
2147
2148         if (of_property_read_u32(node, "mac_control", &prop)) {
2149                 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
2150                 return -EINVAL;
2151         }
2152         data->mac_control = prop;
2153
2154         if (of_property_read_bool(node, "dual_emac"))
2155                 data->dual_emac = 1;
2156
2157         /*
2158          * Populate all the child nodes here...
2159          */
2160         ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2161         /* We do not want to force this, as in some cases may not have child */
2162         if (ret)
2163                 dev_warn(&pdev->dev, "Doesn't have any child node\n");
2164
2165         for_each_available_child_of_node(node, slave_node) {
2166                 struct cpsw_slave_data *slave_data = data->slave_data + i;
2167                 const void *mac_addr = NULL;
2168                 int lenp;
2169                 const __be32 *parp;
2170
2171                 /* This is no slave child node, continue */
2172                 if (!of_node_name_eq(slave_node, "slave"))
2173                         continue;
2174
2175                 slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
2176                                                     NULL);
2177                 if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
2178                     IS_ERR(slave_data->ifphy)) {
2179                         ret = PTR_ERR(slave_data->ifphy);
2180                         dev_err(&pdev->dev,
2181                                 "%d: Error retrieving port phy: %d\n", i, ret);
2182                         return ret;
2183                 }
2184
2185                 slave_data->phy_node = of_parse_phandle(slave_node,
2186                                                         "phy-handle", 0);
2187                 parp = of_get_property(slave_node, "phy_id", &lenp);
2188                 if (slave_data->phy_node) {
2189                         dev_dbg(&pdev->dev,
2190                                 "slave[%d] using phy-handle=\"%pOF\"\n",
2191                                 i, slave_data->phy_node);
2192                 } else if (of_phy_is_fixed_link(slave_node)) {
2193                         /* In the case of a fixed PHY, the DT node associated
2194                          * to the PHY is the Ethernet MAC DT node.
2195                          */
2196                         ret = of_phy_register_fixed_link(slave_node);
2197                         if (ret) {
2198                                 if (ret != -EPROBE_DEFER)
2199                                         dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
2200                                 return ret;
2201                         }
2202                         slave_data->phy_node = of_node_get(slave_node);
2203                 } else if (parp) {
2204                         u32 phyid;
2205                         struct device_node *mdio_node;
2206                         struct platform_device *mdio;
2207
2208                         if (lenp != (sizeof(__be32) * 2)) {
2209                                 dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
2210                                 goto no_phy_slave;
2211                         }
2212                         mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
2213                         phyid = be32_to_cpup(parp+1);
2214                         mdio = of_find_device_by_node(mdio_node);
2215                         of_node_put(mdio_node);
2216                         if (!mdio) {
2217                                 dev_err(&pdev->dev, "Missing mdio platform device\n");
2218                                 return -EINVAL;
2219                         }
2220                         snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2221                                  PHY_ID_FMT, mdio->name, phyid);
2222                         put_device(&mdio->dev);
2223                 } else {
2224                         dev_err(&pdev->dev,
2225                                 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
2226                                 i);
2227                         goto no_phy_slave;
2228                 }
2229                 slave_data->phy_if = of_get_phy_mode(slave_node);
2230                 if (slave_data->phy_if < 0) {
2231                         dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
2232                                 i);
2233                         return slave_data->phy_if;
2234                 }
2235
2236 no_phy_slave:
2237                 mac_addr = of_get_mac_address(slave_node);
2238                 if (!IS_ERR(mac_addr)) {
2239                         ether_addr_copy(slave_data->mac_addr, mac_addr);
2240                 } else {
2241                         ret = ti_cm_get_macid(&pdev->dev, i,
2242                                               slave_data->mac_addr);
2243                         if (ret)
2244                                 return ret;
2245                 }
2246                 if (data->dual_emac) {
2247                         if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
2248                                                  &prop)) {
2249                                 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
2250                                 slave_data->dual_emac_res_vlan = i+1;
2251                                 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
2252                                         slave_data->dual_emac_res_vlan, i);
2253                         } else {
2254                                 slave_data->dual_emac_res_vlan = prop;
2255                         }
2256                 }
2257
2258                 i++;
2259                 if (i == data->slaves)
2260                         break;
2261         }
2262
2263         return 0;
2264 }
2265
2266 static void cpsw_remove_dt(struct platform_device *pdev)
2267 {
2268         struct cpsw_common *cpsw = platform_get_drvdata(pdev);
2269         struct cpsw_platform_data *data = &cpsw->data;
2270         struct device_node *node = pdev->dev.of_node;
2271         struct device_node *slave_node;
2272         int i = 0;
2273
2274         for_each_available_child_of_node(node, slave_node) {
2275                 struct cpsw_slave_data *slave_data = &data->slave_data[i];
2276
2277                 if (!of_node_name_eq(slave_node, "slave"))
2278                         continue;
2279
2280                 if (of_phy_is_fixed_link(slave_node))
2281                         of_phy_deregister_fixed_link(slave_node);
2282
2283                 of_node_put(slave_data->phy_node);
2284
2285                 i++;
2286                 if (i == data->slaves)
2287                         break;
2288         }
2289
2290         of_platform_depopulate(&pdev->dev);
2291 }
2292
2293 static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
2294 {
2295         struct cpsw_common              *cpsw = priv->cpsw;
2296         struct cpsw_platform_data       *data = &cpsw->data;
2297         struct net_device               *ndev;
2298         struct cpsw_priv                *priv_sl2;
2299         int ret = 0;
2300
2301         ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv),
2302                                        CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
2303         if (!ndev) {
2304                 dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
2305                 return -ENOMEM;
2306         }
2307
2308         priv_sl2 = netdev_priv(ndev);
2309         priv_sl2->cpsw = cpsw;
2310         priv_sl2->ndev = ndev;
2311         priv_sl2->dev  = &ndev->dev;
2312         priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2313
2314         if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
2315                 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
2316                         ETH_ALEN);
2317                 dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
2318                          priv_sl2->mac_addr);
2319         } else {
2320                 eth_random_addr(priv_sl2->mac_addr);
2321                 dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
2322                          priv_sl2->mac_addr);
2323         }
2324         memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
2325
2326         priv_sl2->emac_port = 1;
2327         cpsw->slaves[1].ndev = ndev;
2328         ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
2329
2330         ndev->netdev_ops = &cpsw_netdev_ops;
2331         ndev->ethtool_ops = &cpsw_ethtool_ops;
2332
2333         /* register the network device */
2334         SET_NETDEV_DEV(ndev, cpsw->dev);
2335         ret = register_netdev(ndev);
2336         if (ret)
2337                 dev_err(cpsw->dev, "cpsw: error registering net device\n");
2338
2339         return ret;
2340 }
2341
2342 static const struct of_device_id cpsw_of_mtable[] = {
2343         { .compatible = "ti,cpsw"},
2344         { .compatible = "ti,am335x-cpsw"},
2345         { .compatible = "ti,am4372-cpsw"},
2346         { .compatible = "ti,dra7-cpsw"},
2347         { /* sentinel */ },
2348 };
2349 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
2350
2351 static const struct soc_device_attribute cpsw_soc_devices[] = {
2352         { .family = "AM33xx", .revision = "ES1.0"},
2353         { /* sentinel */ }
2354 };
2355
2356 static int cpsw_probe(struct platform_device *pdev)
2357 {
2358         struct device                   *dev = &pdev->dev;
2359         struct clk                      *clk;
2360         struct cpsw_platform_data       *data;
2361         struct net_device               *ndev;
2362         struct cpsw_priv                *priv;
2363         void __iomem                    *ss_regs;
2364         struct resource                 *res, *ss_res;
2365         struct gpio_descs               *mode;
2366         const struct soc_device_attribute *soc;
2367         struct cpsw_common              *cpsw;
2368         int ret = 0, ch;
2369         int irq;
2370
2371         cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
2372         if (!cpsw)
2373                 return -ENOMEM;
2374
2375         cpsw->dev = dev;
2376
2377         mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
2378         if (IS_ERR(mode)) {
2379                 ret = PTR_ERR(mode);
2380                 dev_err(dev, "gpio request failed, ret %d\n", ret);
2381                 return ret;
2382         }
2383
2384         clk = devm_clk_get(dev, "fck");
2385         if (IS_ERR(clk)) {
2386                 ret = PTR_ERR(clk);
2387                 dev_err(dev, "fck is not found %d\n", ret);
2388                 return ret;
2389         }
2390         cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
2391
2392         ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2393         ss_regs = devm_ioremap_resource(dev, ss_res);
2394         if (IS_ERR(ss_regs))
2395                 return PTR_ERR(ss_regs);
2396         cpsw->regs = ss_regs;
2397
2398         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2399         cpsw->wr_regs = devm_ioremap_resource(dev, res);
2400         if (IS_ERR(cpsw->wr_regs))
2401                 return PTR_ERR(cpsw->wr_regs);
2402
2403         /* RX IRQ */
2404         irq = platform_get_irq(pdev, 1);
2405         if (irq < 0)
2406                 return irq;
2407         cpsw->irqs_table[0] = irq;
2408
2409         /* TX IRQ */
2410         irq = platform_get_irq(pdev, 2);
2411         if (irq < 0)
2412                 return irq;
2413         cpsw->irqs_table[1] = irq;
2414
2415         /*
2416          * This may be required here for child devices.
2417          */
2418         pm_runtime_enable(dev);
2419
2420         /* Need to enable clocks with runtime PM api to access module
2421          * registers
2422          */
2423         ret = pm_runtime_get_sync(dev);
2424         if (ret < 0) {
2425                 pm_runtime_put_noidle(dev);
2426                 goto clean_runtime_disable_ret;
2427         }
2428
2429         ret = cpsw_probe_dt(&cpsw->data, pdev);
2430         if (ret)
2431                 goto clean_dt_ret;
2432
2433         soc = soc_device_match(cpsw_soc_devices);
2434         if (soc)
2435                 cpsw->quirk_irq = 1;
2436
2437         data = &cpsw->data;
2438         cpsw->slaves = devm_kcalloc(dev,
2439                                     data->slaves, sizeof(struct cpsw_slave),
2440                                     GFP_KERNEL);
2441         if (!cpsw->slaves) {
2442                 ret = -ENOMEM;
2443                 goto clean_dt_ret;
2444         }
2445
2446         cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE);
2447         cpsw->descs_pool_size = descs_pool_size;
2448
2449         ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
2450                                ss_res->start + CPSW2_BD_OFFSET,
2451                                descs_pool_size);
2452         if (ret)
2453                 goto clean_dt_ret;
2454
2455         ch = cpsw->quirk_irq ? 0 : 7;
2456         cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
2457         if (IS_ERR(cpsw->txv[0].ch)) {
2458                 dev_err(dev, "error initializing tx dma channel\n");
2459                 ret = PTR_ERR(cpsw->txv[0].ch);
2460                 goto clean_cpts;
2461         }
2462
2463         cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
2464         if (IS_ERR(cpsw->rxv[0].ch)) {
2465                 dev_err(dev, "error initializing rx dma channel\n");
2466                 ret = PTR_ERR(cpsw->rxv[0].ch);
2467                 goto clean_cpts;
2468         }
2469         cpsw_split_res(cpsw);
2470
2471         /* setup netdev */
2472         ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
2473                                        CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
2474         if (!ndev) {
2475                 dev_err(dev, "error allocating net_device\n");
2476                 goto clean_cpts;
2477         }
2478
2479         platform_set_drvdata(pdev, cpsw);
2480         priv = netdev_priv(ndev);
2481         priv->cpsw = cpsw;
2482         priv->ndev = ndev;
2483         priv->dev  = dev;
2484         priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2485         priv->emac_port = 0;
2486
2487         if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
2488                 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
2489                 dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr);
2490         } else {
2491                 eth_random_addr(priv->mac_addr);
2492                 dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
2493         }
2494
2495         memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2496
2497         cpsw->slaves[0].ndev = ndev;
2498
2499         ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
2500
2501         ndev->netdev_ops = &cpsw_netdev_ops;
2502         ndev->ethtool_ops = &cpsw_ethtool_ops;
2503         netif_napi_add(ndev, &cpsw->napi_rx,
2504                        cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
2505                        CPSW_POLL_WEIGHT);
2506         netif_tx_napi_add(ndev, &cpsw->napi_tx,
2507                           cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
2508                           CPSW_POLL_WEIGHT);
2509
2510         /* register the network device */
2511         SET_NETDEV_DEV(ndev, dev);
2512         ret = register_netdev(ndev);
2513         if (ret) {
2514                 dev_err(dev, "error registering net device\n");
2515                 ret = -ENODEV;
2516                 goto clean_cpts;
2517         }
2518
2519         if (cpsw->data.dual_emac) {
2520                 ret = cpsw_probe_dual_emac(priv);
2521                 if (ret) {
2522                         cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
2523                         goto clean_unregister_netdev_ret;
2524                 }
2525         }
2526
2527         /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
2528          * MISC IRQs which are always kept disabled with this driver so
2529          * we will not request them.
2530          *
2531          * If anyone wants to implement support for those, make sure to
2532          * first request and append them to irqs_table array.
2533          */
2534         ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
2535                                0, dev_name(dev), cpsw);
2536         if (ret < 0) {
2537                 dev_err(dev, "error attaching irq (%d)\n", ret);
2538                 goto clean_unregister_netdev_ret;
2539         }
2540
2541
2542         ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
2543                                0, dev_name(&pdev->dev), cpsw);
2544         if (ret < 0) {
2545                 dev_err(dev, "error attaching irq (%d)\n", ret);
2546                 goto clean_unregister_netdev_ret;
2547         }
2548
2549         cpsw_notice(priv, probe,
2550                     "initialized device (regs %pa, irq %d, pool size %d)\n",
2551                     &ss_res->start, cpsw->irqs_table[0], descs_pool_size);
2552
2553         pm_runtime_put(&pdev->dev);
2554
2555         return 0;
2556
2557 clean_unregister_netdev_ret:
2558         unregister_netdev(ndev);
2559 clean_cpts:
2560         cpts_release(cpsw->cpts);
2561         cpdma_ctlr_destroy(cpsw->dma);
2562 clean_dt_ret:
2563         cpsw_remove_dt(pdev);
2564         pm_runtime_put_sync(&pdev->dev);
2565 clean_runtime_disable_ret:
2566         pm_runtime_disable(&pdev->dev);
2567         return ret;
2568 }
2569
2570 static int cpsw_remove(struct platform_device *pdev)
2571 {
2572         struct cpsw_common *cpsw = platform_get_drvdata(pdev);
2573         int i, ret;
2574
2575         ret = pm_runtime_get_sync(&pdev->dev);
2576         if (ret < 0) {
2577                 pm_runtime_put_noidle(&pdev->dev);
2578                 return ret;
2579         }
2580
2581         for (i = 0; i < cpsw->data.slaves; i++)
2582                 if (cpsw->slaves[i].ndev)
2583                         unregister_netdev(cpsw->slaves[i].ndev);
2584
2585         cpts_release(cpsw->cpts);
2586         cpdma_ctlr_destroy(cpsw->dma);
2587         cpsw_remove_dt(pdev);
2588         pm_runtime_put_sync(&pdev->dev);
2589         pm_runtime_disable(&pdev->dev);
2590         return 0;
2591 }
2592
2593 #ifdef CONFIG_PM_SLEEP
2594 static int cpsw_suspend(struct device *dev)
2595 {
2596         struct net_device       *ndev = dev_get_drvdata(dev);
2597         struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
2598
2599         if (cpsw->data.dual_emac) {
2600                 int i;
2601
2602                 for (i = 0; i < cpsw->data.slaves; i++) {
2603                         if (netif_running(cpsw->slaves[i].ndev))
2604                                 cpsw_ndo_stop(cpsw->slaves[i].ndev);
2605                 }
2606         } else {
2607                 if (netif_running(ndev))
2608                         cpsw_ndo_stop(ndev);
2609         }
2610
2611         /* Select sleep pin state */
2612         pinctrl_pm_select_sleep_state(dev);
2613
2614         return 0;
2615 }
2616
2617 static int cpsw_resume(struct device *dev)
2618 {
2619         struct net_device       *ndev = dev_get_drvdata(dev);
2620         struct cpsw_common      *cpsw = ndev_to_cpsw(ndev);
2621
2622         /* Select default pin state */
2623         pinctrl_pm_select_default_state(dev);
2624
2625         /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
2626         rtnl_lock();
2627         if (cpsw->data.dual_emac) {
2628                 int i;
2629
2630                 for (i = 0; i < cpsw->data.slaves; i++) {
2631                         if (netif_running(cpsw->slaves[i].ndev))
2632                                 cpsw_ndo_open(cpsw->slaves[i].ndev);
2633                 }
2634         } else {
2635                 if (netif_running(ndev))
2636                         cpsw_ndo_open(ndev);
2637         }
2638         rtnl_unlock();
2639
2640         return 0;
2641 }
2642 #endif
2643
2644 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2645
2646 static struct platform_driver cpsw_driver = {
2647         .driver = {
2648                 .name    = "cpsw",
2649                 .pm      = &cpsw_pm_ops,
2650                 .of_match_table = cpsw_of_mtable,
2651         },
2652         .probe = cpsw_probe,
2653         .remove = cpsw_remove,
2654 };
2655
2656 module_platform_driver(cpsw_driver);
2657
2658 MODULE_LICENSE("GPL");
2659 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
2660 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
2661 MODULE_DESCRIPTION("TI CPSW Ethernet driver");