1 // SPDX-License-Identifier: GPL-2.0
3 * Texas Instruments Ethernet Switch Driver
5 * Copyright (C) 2012 Texas Instruments
9 #include <linux/kernel.h>
11 #include <linux/clk.h>
12 #include <linux/timer.h>
13 #include <linux/module.h>
14 #include <linux/platform_device.h>
15 #include <linux/irqreturn.h>
16 #include <linux/interrupt.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/netdevice.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/phy.h>
22 #include <linux/phy/phy.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/gpio/consumer.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/of_device.h>
31 #include <linux/if_vlan.h>
32 #include <linux/kmemleak.h>
33 #include <linux/sys_soc.h>
35 #include <linux/pinctrl/consumer.h>
36 #include <net/pkt_cls.h>
40 #include "cpsw_priv.h"
43 #include "davinci_cpdma.h"
45 #include <net/pkt_sched.h>
47 static int debug_level;
48 module_param(debug_level, int, 0);
49 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
51 static int ale_ageout = 10;
52 module_param(ale_ageout, int, 0);
53 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
55 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
56 module_param(rx_packet_max, int, 0);
57 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
59 static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
60 module_param(descs_pool_size, int, 0444);
61 MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
63 #define for_each_slave(priv, func, arg...) \
65 struct cpsw_slave *slave; \
66 struct cpsw_common *cpsw = (priv)->cpsw; \
68 if (cpsw->data.dual_emac) \
69 (func)((cpsw)->slaves + priv->emac_port, ##arg);\
71 for (n = cpsw->data.slaves, \
72 slave = cpsw->slaves; \
74 (func)(slave++, ##arg); \
77 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
78 __be16 proto, u16 vid);
80 static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
82 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
83 struct cpsw_ale *ale = cpsw->ale;
86 if (cpsw->data.dual_emac) {
89 /* Enabling promiscuous mode for one interface will be
90 * common for both the interface as the interface shares
91 * the same hardware resource.
93 for (i = 0; i < cpsw->data.slaves; i++)
94 if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
97 if (!enable && flag) {
99 dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
104 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
106 dev_dbg(&ndev->dev, "promiscuity enabled\n");
109 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
110 dev_dbg(&ndev->dev, "promiscuity disabled\n");
114 unsigned long timeout = jiffies + HZ;
116 /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
117 for (i = 0; i <= cpsw->data.slaves; i++) {
118 cpsw_ale_control_set(ale, i,
119 ALE_PORT_NOLEARN, 1);
120 cpsw_ale_control_set(ale, i,
121 ALE_PORT_NO_SA_UPDATE, 1);
124 /* Clear All Untouched entries */
125 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
128 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
130 } while (time_after(timeout, jiffies));
131 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
133 /* Clear all mcast from ALE */
134 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
135 __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
137 /* Flood All Unicast Packets to Host port */
138 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
139 dev_dbg(&ndev->dev, "promiscuity enabled\n");
141 /* Don't Flood All Unicast Packets to Host port */
142 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
144 /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
145 for (i = 0; i <= cpsw->data.slaves; i++) {
146 cpsw_ale_control_set(ale, i,
147 ALE_PORT_NOLEARN, 0);
148 cpsw_ale_control_set(ale, i,
149 ALE_PORT_NO_SA_UPDATE, 0);
151 dev_dbg(&ndev->dev, "promiscuity disabled\n");
157 * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
158 * if it's not deleted
159 * @ndev: device to sync
160 * @addr: address to be added or deleted
161 * @vid: vlan id, if vid < 0 set/unset address for real device
162 * @add: add address if the flag is set or remove otherwise
164 static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
167 struct cpsw_priv *priv = netdev_priv(ndev);
168 struct cpsw_common *cpsw = priv->cpsw;
169 int mask, flags, ret;
172 if (cpsw->data.dual_emac)
173 vid = cpsw->slaves[priv->emac_port].port_vlan;
178 mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
179 flags = vid ? ALE_VLAN : 0;
182 ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
184 ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
189 static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
191 struct addr_sync_ctx *sync_ctx = ctx;
192 struct netdev_hw_addr *ha;
193 int found = 0, ret = 0;
195 if (!vdev || !(vdev->flags & IFF_UP))
198 /* vlan address is relevant if its sync_cnt != 0 */
199 netdev_for_each_mc_addr(ha, vdev) {
200 if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
201 found = ha->sync_cnt;
207 sync_ctx->consumed++;
209 if (sync_ctx->flush) {
211 cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
216 ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
221 static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
223 struct addr_sync_ctx sync_ctx;
226 sync_ctx.consumed = 0;
227 sync_ctx.addr = addr;
228 sync_ctx.ndev = ndev;
231 ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
232 if (sync_ctx.consumed < num && !ret)
233 ret = cpsw_set_mc(ndev, addr, -1, 1);
238 static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
240 struct addr_sync_ctx sync_ctx;
242 sync_ctx.consumed = 0;
243 sync_ctx.addr = addr;
244 sync_ctx.ndev = ndev;
247 vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
248 if (sync_ctx.consumed == num)
249 cpsw_set_mc(ndev, addr, -1, 0);
254 static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
256 struct addr_sync_ctx *sync_ctx = ctx;
257 struct netdev_hw_addr *ha;
260 if (!vdev || !(vdev->flags & IFF_UP))
263 /* vlan address is relevant if its sync_cnt != 0 */
264 netdev_for_each_mc_addr(ha, vdev) {
265 if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
266 found = ha->sync_cnt;
274 sync_ctx->consumed++;
275 cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
279 static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
281 struct addr_sync_ctx sync_ctx;
283 sync_ctx.addr = addr;
284 sync_ctx.ndev = ndev;
285 sync_ctx.consumed = 0;
287 vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
288 if (sync_ctx.consumed < num)
289 cpsw_set_mc(ndev, addr, -1, 0);
294 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
296 struct cpsw_priv *priv = netdev_priv(ndev);
297 struct cpsw_common *cpsw = priv->cpsw;
300 if (cpsw->data.dual_emac)
301 slave_port = priv->emac_port + 1;
303 if (ndev->flags & IFF_PROMISC) {
304 /* Enable promiscuous mode */
305 cpsw_set_promiscious(ndev, true);
306 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
309 /* Disable promiscuous mode */
310 cpsw_set_promiscious(ndev, false);
313 /* Restore allmulti on vlans if necessary */
314 cpsw_ale_set_allmulti(cpsw->ale,
315 ndev->flags & IFF_ALLMULTI, slave_port);
317 /* add/remove mcast address either for real netdev or for vlan */
318 __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
322 void cpsw_intr_enable(struct cpsw_common *cpsw)
324 writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
325 writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
327 cpdma_ctlr_int_ctrl(cpsw->dma, true);
331 void cpsw_intr_disable(struct cpsw_common *cpsw)
333 writel_relaxed(0, &cpsw->wr_regs->tx_en);
334 writel_relaxed(0, &cpsw->wr_regs->rx_en);
336 cpdma_ctlr_int_ctrl(cpsw->dma, false);
340 void cpsw_tx_handler(void *token, int len, int status)
342 struct netdev_queue *txq;
343 struct sk_buff *skb = token;
344 struct net_device *ndev = skb->dev;
345 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
347 /* Check whether the queue is stopped due to stalled tx dma, if the
348 * queue is stopped then start the queue as we have free desc for tx
350 txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
351 if (unlikely(netif_tx_queue_stopped(txq)))
352 netif_tx_wake_queue(txq);
354 cpts_tx_timestamp(cpsw->cpts, skb);
355 ndev->stats.tx_packets++;
356 ndev->stats.tx_bytes += len;
357 dev_kfree_skb_any(skb);
360 static void cpsw_rx_vlan_encap(struct sk_buff *skb)
362 struct cpsw_priv *priv = netdev_priv(skb->dev);
363 struct cpsw_common *cpsw = priv->cpsw;
364 u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
365 u16 vtag, vid, prio, pkt_type;
367 /* Remove VLAN header encapsulation word */
368 skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
370 pkt_type = (rx_vlan_encap_hdr >>
371 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
372 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
373 /* Ignore unknown & Priority-tagged packets*/
374 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
375 pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
378 vid = (rx_vlan_encap_hdr >>
379 CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
381 /* Ignore vid 0 and pass packet as is */
384 /* Ignore default vlans in dual mac mode */
385 if (cpsw->data.dual_emac &&
386 vid == cpsw->slaves[priv->emac_port].port_vlan)
389 prio = (rx_vlan_encap_hdr >>
390 CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
391 CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
393 vtag = (prio << VLAN_PRIO_SHIFT) | vid;
394 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
396 /* strip vlan tag for VLAN-tagged packet */
397 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
398 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
399 skb_pull(skb, VLAN_HLEN);
403 static void cpsw_rx_handler(void *token, int len, int status)
405 struct cpdma_chan *ch;
406 struct sk_buff *skb = token;
407 struct sk_buff *new_skb;
408 struct net_device *ndev = skb->dev;
410 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
411 struct cpsw_priv *priv;
413 if (cpsw->data.dual_emac) {
414 port = CPDMA_RX_SOURCE_PORT(status);
416 ndev = cpsw->slaves[--port].ndev;
421 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
422 /* In dual emac mode check for all interfaces */
423 if (cpsw->data.dual_emac && cpsw->usage_count &&
425 /* The packet received is for the interface which
426 * is already down and the other interface is up
427 * and running, instead of freeing which results
428 * in reducing of the number of rx descriptor in
429 * DMA engine, requeue skb back to cpdma.
435 /* the interface is going down, skbs are purged */
436 dev_kfree_skb_any(skb);
440 new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
442 skb_copy_queue_mapping(new_skb, skb);
444 if (status & CPDMA_RX_VLAN_ENCAP)
445 cpsw_rx_vlan_encap(skb);
446 priv = netdev_priv(ndev);
447 if (priv->rx_ts_enabled)
448 cpts_rx_timestamp(cpsw->cpts, skb);
449 skb->protocol = eth_type_trans(skb, ndev);
450 netif_receive_skb(skb);
451 ndev->stats.rx_bytes += len;
452 ndev->stats.rx_packets++;
453 kmemleak_not_leak(new_skb);
455 ndev->stats.rx_dropped++;
460 if (netif_dormant(ndev)) {
461 dev_kfree_skb_any(new_skb);
465 ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
466 ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
467 skb_tailroom(new_skb), 0);
468 if (WARN_ON(ret < 0))
469 dev_kfree_skb_any(new_skb);
472 void cpsw_split_res(struct cpsw_common *cpsw)
474 u32 consumed_rate = 0, bigest_rate = 0;
475 struct cpsw_vector *txv = cpsw->txv;
476 int i, ch_weight, rlim_ch_num = 0;
477 int budget, bigest_rate_ch = 0;
478 u32 ch_rate, max_rate;
481 for (i = 0; i < cpsw->tx_ch_num; i++) {
482 ch_rate = cpdma_chan_get_rate(txv[i].ch);
487 consumed_rate += ch_rate;
490 if (cpsw->tx_ch_num == rlim_ch_num) {
491 max_rate = consumed_rate;
492 } else if (!rlim_ch_num) {
493 ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
495 max_rate = consumed_rate;
497 max_rate = cpsw->speed * 1000;
499 /* if max_rate is less then expected due to reduced link speed,
500 * split proportionally according next potential max speed
502 if (max_rate < consumed_rate)
505 if (max_rate < consumed_rate)
508 ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
509 ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
510 (cpsw->tx_ch_num - rlim_ch_num);
511 bigest_rate = (max_rate - consumed_rate) /
512 (cpsw->tx_ch_num - rlim_ch_num);
515 /* split tx weight/budget */
516 budget = CPSW_POLL_WEIGHT;
517 for (i = 0; i < cpsw->tx_ch_num; i++) {
518 ch_rate = cpdma_chan_get_rate(txv[i].ch);
520 txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
523 if (ch_rate > bigest_rate) {
525 bigest_rate = ch_rate;
528 ch_weight = (ch_rate * 100) / max_rate;
531 cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
533 txv[i].budget = ch_budget;
536 cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
539 budget -= txv[i].budget;
543 txv[bigest_rate_ch].budget += budget;
545 /* split rx budget */
546 budget = CPSW_POLL_WEIGHT;
547 ch_budget = budget / cpsw->rx_ch_num;
548 for (i = 0; i < cpsw->rx_ch_num; i++) {
549 cpsw->rxv[i].budget = ch_budget;
554 cpsw->rxv[0].budget += budget;
557 static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
559 struct cpsw_common *cpsw = dev_id;
561 writel(0, &cpsw->wr_regs->tx_en);
562 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
564 if (cpsw->quirk_irq) {
565 disable_irq_nosync(cpsw->irqs_table[1]);
566 cpsw->tx_irq_disabled = true;
569 napi_schedule(&cpsw->napi_tx);
573 static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
575 struct cpsw_common *cpsw = dev_id;
577 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
578 writel(0, &cpsw->wr_regs->rx_en);
580 if (cpsw->quirk_irq) {
581 disable_irq_nosync(cpsw->irqs_table[0]);
582 cpsw->rx_irq_disabled = true;
585 napi_schedule(&cpsw->napi_rx);
589 static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
592 int num_tx, cur_budget, ch;
593 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
594 struct cpsw_vector *txv;
596 /* process every unprocessed channel */
597 ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
598 for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
599 if (!(ch_map & 0x80))
602 txv = &cpsw->txv[ch];
603 if (unlikely(txv->budget > budget - num_tx))
604 cur_budget = budget - num_tx;
606 cur_budget = txv->budget;
608 num_tx += cpdma_chan_process(txv->ch, cur_budget);
609 if (num_tx >= budget)
613 if (num_tx < budget) {
614 napi_complete(napi_tx);
615 writel(0xff, &cpsw->wr_regs->tx_en);
621 static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
623 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
626 num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
627 if (num_tx < budget) {
628 napi_complete(napi_tx);
629 writel(0xff, &cpsw->wr_regs->tx_en);
630 if (cpsw->tx_irq_disabled) {
631 cpsw->tx_irq_disabled = false;
632 enable_irq(cpsw->irqs_table[1]);
639 static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
642 int num_rx, cur_budget, ch;
643 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
644 struct cpsw_vector *rxv;
646 /* process every unprocessed channel */
647 ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
648 for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
649 if (!(ch_map & 0x01))
652 rxv = &cpsw->rxv[ch];
653 if (unlikely(rxv->budget > budget - num_rx))
654 cur_budget = budget - num_rx;
656 cur_budget = rxv->budget;
658 num_rx += cpdma_chan_process(rxv->ch, cur_budget);
659 if (num_rx >= budget)
663 if (num_rx < budget) {
664 napi_complete_done(napi_rx, num_rx);
665 writel(0xff, &cpsw->wr_regs->rx_en);
671 static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
673 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
676 num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
677 if (num_rx < budget) {
678 napi_complete_done(napi_rx, num_rx);
679 writel(0xff, &cpsw->wr_regs->rx_en);
680 if (cpsw->rx_irq_disabled) {
681 cpsw->rx_irq_disabled = false;
682 enable_irq(cpsw->irqs_table[0]);
689 static inline void soft_reset(const char *module, void __iomem *reg)
691 unsigned long timeout = jiffies + HZ;
693 writel_relaxed(1, reg);
696 } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
698 WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
701 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
702 struct cpsw_priv *priv)
704 slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
705 slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
708 static bool cpsw_shp_is_off(struct cpsw_priv *priv)
710 struct cpsw_common *cpsw = priv->cpsw;
711 struct cpsw_slave *slave;
712 u32 shift, mask, val;
714 val = readl_relaxed(&cpsw->regs->ptype);
716 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
717 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
724 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
726 struct cpsw_common *cpsw = priv->cpsw;
727 struct cpsw_slave *slave;
728 u32 shift, mask, val;
730 val = readl_relaxed(&cpsw->regs->ptype);
732 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
733 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
734 mask = (1 << --fifo) << shift;
735 val = on ? val | mask : val & ~mask;
737 writel_relaxed(val, &cpsw->regs->ptype);
740 static void _cpsw_adjust_link(struct cpsw_slave *slave,
741 struct cpsw_priv *priv, bool *link)
743 struct phy_device *phy = slave->phy;
746 struct cpsw_common *cpsw = priv->cpsw;
751 slave_port = cpsw_get_slave_port(slave->slave_num);
754 mac_control = CPSW_SL_CTL_GMII_EN;
756 if (phy->speed == 1000)
757 mac_control |= CPSW_SL_CTL_GIG;
759 mac_control |= CPSW_SL_CTL_FULLDUPLEX;
761 /* set speed_in input in case RMII mode is used in 100Mbps */
762 if (phy->speed == 100)
763 mac_control |= CPSW_SL_CTL_IFCTL_A;
764 /* in band mode only works in 10Mbps RGMII mode */
765 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
766 mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
769 mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
772 mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
774 if (mac_control != slave->mac_control)
775 cpsw_sl_ctl_set(slave->mac_sl, mac_control);
777 /* enable forwarding */
778 cpsw_ale_control_set(cpsw->ale, slave_port,
779 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
783 if (priv->shp_cfg_speed &&
784 priv->shp_cfg_speed != slave->phy->speed &&
785 !cpsw_shp_is_off(priv))
787 "Speed was changed, CBS shaper speeds are changed!");
790 /* disable forwarding */
791 cpsw_ale_control_set(cpsw->ale, slave_port,
792 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
794 cpsw_sl_wait_for_idle(slave->mac_sl, 100);
796 cpsw_sl_ctl_reset(slave->mac_sl);
799 if (mac_control != slave->mac_control)
800 phy_print_status(phy);
802 slave->mac_control = mac_control;
805 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
809 for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
810 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
811 speed += cpsw->slaves[i].phy->speed;
816 static int cpsw_need_resplit(struct cpsw_common *cpsw)
821 /* re-split resources only in case speed was changed */
822 speed = cpsw_get_common_speed(cpsw);
823 if (speed == cpsw->speed || !speed)
828 for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
829 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
836 /* cases not dependent on speed */
837 if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
843 static void cpsw_adjust_link(struct net_device *ndev)
845 struct cpsw_priv *priv = netdev_priv(ndev);
846 struct cpsw_common *cpsw = priv->cpsw;
849 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
852 if (cpsw_need_resplit(cpsw))
853 cpsw_split_res(cpsw);
855 netif_carrier_on(ndev);
856 if (netif_running(ndev))
857 netif_tx_wake_all_queues(ndev);
859 netif_carrier_off(ndev);
860 netif_tx_stop_all_queues(ndev);
864 static inline void cpsw_add_dual_emac_def_ale_entries(
865 struct cpsw_priv *priv, struct cpsw_slave *slave,
868 struct cpsw_common *cpsw = priv->cpsw;
869 u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
871 if (cpsw->version == CPSW_VERSION_1)
872 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
874 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
875 cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
876 port_mask, port_mask, 0);
877 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
878 ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
879 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
880 HOST_PORT_NUM, ALE_VLAN |
881 ALE_SECURE, slave->port_vlan);
882 cpsw_ale_control_set(cpsw->ale, slave_port,
883 ALE_PORT_DROP_UNKNOWN_VLAN, 1);
886 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
889 struct phy_device *phy;
890 struct cpsw_common *cpsw = priv->cpsw;
892 cpsw_sl_reset(slave->mac_sl, 100);
893 cpsw_sl_ctl_reset(slave->mac_sl);
895 /* setup priority mapping */
896 cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
897 RX_PRIORITY_MAPPING);
899 switch (cpsw->version) {
901 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
902 /* Increase RX FIFO size to 5 for supporting fullduplex
906 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
907 CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
912 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
913 /* Increase RX FIFO size to 5 for supporting fullduplex
917 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
918 CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
922 /* setup max packet size, and mac address */
923 cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
924 cpsw->rx_packet_max);
925 cpsw_set_slave_mac(slave, priv);
927 slave->mac_control = 0; /* no link yet */
929 slave_port = cpsw_get_slave_port(slave->slave_num);
931 if (cpsw->data.dual_emac)
932 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
934 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
935 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
937 if (slave->data->phy_node) {
938 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
939 &cpsw_adjust_link, 0, slave->data->phy_if);
941 dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
942 slave->data->phy_node,
947 phy = phy_connect(priv->ndev, slave->data->phy_id,
948 &cpsw_adjust_link, slave->data->phy_if);
951 "phy \"%s\" not found on slave %d, err %ld\n",
952 slave->data->phy_id, slave->slave_num,
960 phy_attached_info(slave->phy);
962 phy_start(slave->phy);
964 /* Configure GMII_SEL register */
965 if (!IS_ERR(slave->data->ifphy))
966 phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
967 slave->data->phy_if);
969 cpsw_phy_sel(cpsw->dev, slave->phy->interface,
973 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
975 struct cpsw_common *cpsw = priv->cpsw;
976 const int vlan = cpsw->data.default_vlan;
979 int unreg_mcast_mask;
981 reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
984 writel(vlan, &cpsw->host_port_regs->port_vlan);
986 for (i = 0; i < cpsw->data.slaves; i++)
987 slave_write(cpsw->slaves + i, vlan, reg);
989 if (priv->ndev->flags & IFF_ALLMULTI)
990 unreg_mcast_mask = ALE_ALL_PORTS;
992 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
994 cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
995 ALE_ALL_PORTS, ALE_ALL_PORTS,
999 static void cpsw_init_host_port(struct cpsw_priv *priv)
1003 struct cpsw_common *cpsw = priv->cpsw;
1005 /* soft reset the controller and initialize ale */
1006 soft_reset("cpsw", &cpsw->regs->soft_reset);
1007 cpsw_ale_start(cpsw->ale);
1009 /* switch to vlan unaware mode */
1010 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
1011 CPSW_ALE_VLAN_AWARE);
1012 control_reg = readl(&cpsw->regs->control);
1013 control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
1014 writel(control_reg, &cpsw->regs->control);
1015 fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
1016 CPSW_FIFO_NORMAL_MODE;
1017 writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
1019 /* setup host port priority mapping */
1020 writel_relaxed(CPDMA_TX_PRIORITY_MAP,
1021 &cpsw->host_port_regs->cpdma_tx_pri_map);
1022 writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
1024 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
1025 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1027 if (!cpsw->data.dual_emac) {
1028 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1030 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1031 ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
1035 int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1037 struct cpsw_common *cpsw = priv->cpsw;
1038 struct sk_buff *skb;
1042 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1043 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1044 for (i = 0; i < ch_buf_num; i++) {
1045 skb = __netdev_alloc_skb_ip_align(priv->ndev,
1046 cpsw->rx_packet_max,
1049 cpsw_err(priv, ifup, "cannot allocate skb\n");
1053 skb_set_queue_mapping(skb, ch);
1054 ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
1055 skb->data, skb_tailroom(skb),
1058 cpsw_err(priv, ifup,
1059 "cannot submit skb to channel %d rx, error %d\n",
1064 kmemleak_not_leak(skb);
1067 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1074 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
1078 slave_port = cpsw_get_slave_port(slave->slave_num);
1082 phy_stop(slave->phy);
1083 phy_disconnect(slave->phy);
1085 cpsw_ale_control_set(cpsw->ale, slave_port,
1086 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1087 cpsw_sl_reset(slave->mac_sl, 100);
1088 cpsw_sl_ctl_reset(slave->mac_sl);
1091 static int cpsw_tc_to_fifo(int tc, int num_tc)
1093 if (tc == num_tc - 1)
1096 return CPSW_FIFO_SHAPERS_NUM - tc;
1099 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
1101 struct cpsw_common *cpsw = priv->cpsw;
1102 u32 val = 0, send_pct, shift;
1103 struct cpsw_slave *slave;
1106 if (bw > priv->shp_cfg_speed * 1000)
1109 /* shaping has to stay enabled for highest fifos linearly
1110 * and fifo bw no more then interface can allow
1112 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1113 send_pct = slave_read(slave, SEND_PERCENT);
1114 for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
1116 if (i >= fifo || !priv->fifo_bw[i])
1119 dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
1123 if (!priv->fifo_bw[i] && i > fifo) {
1124 dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
1128 shift = (i - 1) * 8;
1130 send_pct &= ~(CPSW_PCT_MASK << shift);
1131 val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
1135 send_pct |= val << shift;
1140 if (priv->fifo_bw[i])
1141 pct += (send_pct >> shift) & CPSW_PCT_MASK;
1147 slave_write(slave, send_pct, SEND_PERCENT);
1148 priv->fifo_bw[fifo] = bw;
1150 dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
1151 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
1155 dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
1159 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
1161 struct cpsw_common *cpsw = priv->cpsw;
1162 struct cpsw_slave *slave;
1163 u32 tx_in_ctl_rg, val;
1166 ret = cpsw_set_fifo_bw(priv, fifo, bw);
1170 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1171 tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
1172 CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
1175 cpsw_fifo_shp_on(priv, fifo, bw);
1177 val = slave_read(slave, tx_in_ctl_rg);
1178 if (cpsw_shp_is_off(priv)) {
1179 /* disable FIFOs rate limited queues */
1180 val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
1182 /* set type of FIFO queues to normal priority mode */
1183 val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
1185 /* set type of FIFO queues to be rate limited */
1187 val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
1189 priv->shp_cfg_speed = 0;
1192 /* toggle a FIFO rate limited queue */
1194 val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1196 val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1197 slave_write(slave, val, tx_in_ctl_rg);
1199 /* FIFO transmit shape enable */
1200 cpsw_fifo_shp_on(priv, fifo, bw);
1207 * shaping for class A should be set first
1209 static int cpsw_set_cbs(struct net_device *ndev,
1210 struct tc_cbs_qopt_offload *qopt)
1212 struct cpsw_priv *priv = netdev_priv(ndev);
1213 struct cpsw_common *cpsw = priv->cpsw;
1214 struct cpsw_slave *slave;
1219 tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
1221 /* enable channels in backward order, as highest FIFOs must be rate
1222 * limited first and for compliance with CPDMA rate limited channels
1223 * that also used in bacward order. FIFO0 cannot be rate limited.
1225 fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
1227 dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
1231 /* do nothing, it's disabled anyway */
1232 if (!qopt->enable && !priv->fifo_bw[fifo])
1235 /* shapers can be set if link speed is known */
1236 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1237 if (slave->phy && slave->phy->link) {
1238 if (priv->shp_cfg_speed &&
1239 priv->shp_cfg_speed != slave->phy->speed)
1240 prev_speed = priv->shp_cfg_speed;
1242 priv->shp_cfg_speed = slave->phy->speed;
1245 if (!priv->shp_cfg_speed) {
1246 dev_err(priv->dev, "Link speed is not known");
1250 ret = pm_runtime_get_sync(cpsw->dev);
1252 pm_runtime_put_noidle(cpsw->dev);
1256 bw = qopt->enable ? qopt->idleslope : 0;
1257 ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
1259 priv->shp_cfg_speed = prev_speed;
1263 if (bw && prev_speed)
1265 "Speed was changed, CBS shaper speeds are changed!");
1267 pm_runtime_put_sync(cpsw->dev);
1271 static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1275 for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1276 bw = priv->fifo_bw[fifo];
1280 cpsw_set_fifo_rlimit(priv, fifo, bw);
1284 static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1286 struct cpsw_common *cpsw = priv->cpsw;
1287 u32 tx_prio_map = 0;
1291 if (!priv->mqprio_hw)
1294 for (i = 0; i < 8; i++) {
1295 tc = netdev_get_prio_tc_map(priv->ndev, i);
1296 fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1297 tx_prio_map |= fifo << (4 * i);
1300 tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1301 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1303 slave_write(slave, tx_prio_map, tx_prio_rg);
1306 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
1308 struct cpsw_priv *priv = arg;
1313 cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
1317 /* restore resources after port reset */
1318 static void cpsw_restore(struct cpsw_priv *priv)
1320 /* restore vlan configurations */
1321 vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
1323 /* restore MQPRIO offload */
1324 for_each_slave(priv, cpsw_mqprio_resume, priv);
1326 /* restore CBS offload */
1327 for_each_slave(priv, cpsw_cbs_resume, priv);
1330 static int cpsw_ndo_open(struct net_device *ndev)
1332 struct cpsw_priv *priv = netdev_priv(ndev);
1333 struct cpsw_common *cpsw = priv->cpsw;
1337 ret = pm_runtime_get_sync(cpsw->dev);
1339 pm_runtime_put_noidle(cpsw->dev);
1343 netif_carrier_off(ndev);
1345 /* Notify the stack of the actual queue counts. */
1346 ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
1348 dev_err(priv->dev, "cannot set real number of tx queues\n");
1352 ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
1354 dev_err(priv->dev, "cannot set real number of rx queues\n");
1358 reg = cpsw->version;
1360 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
1361 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
1362 CPSW_RTL_VERSION(reg));
1364 /* Initialize host and slave ports */
1365 if (!cpsw->usage_count)
1366 cpsw_init_host_port(priv);
1367 for_each_slave(priv, cpsw_slave_open, priv);
1369 /* Add default VLAN */
1370 if (!cpsw->data.dual_emac)
1371 cpsw_add_default_vlan(priv);
1373 cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
1374 ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
1376 /* initialize shared resources for every ndev */
1377 if (!cpsw->usage_count) {
1378 /* disable priority elevation */
1379 writel_relaxed(0, &cpsw->regs->ptype);
1381 /* enable statistics collection only on all ports */
1382 writel_relaxed(0x7, &cpsw->regs->stat_port_en);
1384 /* Enable internal fifo flow control */
1385 writel(0x7, &cpsw->regs->flow_control);
1387 napi_enable(&cpsw->napi_rx);
1388 napi_enable(&cpsw->napi_tx);
1390 if (cpsw->tx_irq_disabled) {
1391 cpsw->tx_irq_disabled = false;
1392 enable_irq(cpsw->irqs_table[1]);
1395 if (cpsw->rx_irq_disabled) {
1396 cpsw->rx_irq_disabled = false;
1397 enable_irq(cpsw->irqs_table[0]);
1400 ret = cpsw_fill_rx_channels(priv);
1404 if (cpts_register(cpsw->cpts))
1405 dev_err(priv->dev, "error registering cpts device\n");
1411 /* Enable Interrupt pacing if configured */
1412 if (cpsw->coal_intvl != 0) {
1413 struct ethtool_coalesce coal;
1415 coal.rx_coalesce_usecs = cpsw->coal_intvl;
1416 cpsw_set_coalesce(ndev, &coal);
1419 cpdma_ctlr_start(cpsw->dma);
1420 cpsw_intr_enable(cpsw);
1421 cpsw->usage_count++;
1426 cpdma_ctlr_stop(cpsw->dma);
1427 for_each_slave(priv, cpsw_slave_stop, cpsw);
1428 pm_runtime_put_sync(cpsw->dev);
1429 netif_carrier_off(priv->ndev);
1433 static int cpsw_ndo_stop(struct net_device *ndev)
1435 struct cpsw_priv *priv = netdev_priv(ndev);
1436 struct cpsw_common *cpsw = priv->cpsw;
1438 cpsw_info(priv, ifdown, "shutting down cpsw device\n");
1439 __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
1440 netif_tx_stop_all_queues(priv->ndev);
1441 netif_carrier_off(priv->ndev);
1443 if (cpsw->usage_count <= 1) {
1444 napi_disable(&cpsw->napi_rx);
1445 napi_disable(&cpsw->napi_tx);
1446 cpts_unregister(cpsw->cpts);
1447 cpsw_intr_disable(cpsw);
1448 cpdma_ctlr_stop(cpsw->dma);
1449 cpsw_ale_stop(cpsw->ale);
1451 for_each_slave(priv, cpsw_slave_stop, cpsw);
1453 if (cpsw_need_resplit(cpsw))
1454 cpsw_split_res(cpsw);
1456 cpsw->usage_count--;
1457 pm_runtime_put_sync(cpsw->dev);
1461 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1462 struct net_device *ndev)
1464 struct cpsw_priv *priv = netdev_priv(ndev);
1465 struct cpsw_common *cpsw = priv->cpsw;
1466 struct cpts *cpts = cpsw->cpts;
1467 struct netdev_queue *txq;
1468 struct cpdma_chan *txch;
1471 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
1472 cpsw_err(priv, tx_err, "packet pad failed\n");
1473 ndev->stats.tx_dropped++;
1474 return NET_XMIT_DROP;
1477 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
1478 priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
1479 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1481 q_idx = skb_get_queue_mapping(skb);
1482 if (q_idx >= cpsw->tx_ch_num)
1483 q_idx = q_idx % cpsw->tx_ch_num;
1485 txch = cpsw->txv[q_idx].ch;
1486 txq = netdev_get_tx_queue(ndev, q_idx);
1487 skb_tx_timestamp(skb);
1488 ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
1489 priv->emac_port + cpsw->data.dual_emac);
1490 if (unlikely(ret != 0)) {
1491 cpsw_err(priv, tx_err, "desc submit failed\n");
1495 /* If there is no more tx desc left free then we need to
1496 * tell the kernel to stop sending us tx frames.
1498 if (unlikely(!cpdma_check_free_tx_desc(txch))) {
1499 netif_tx_stop_queue(txq);
1501 /* Barrier, so that stop_queue visible to other cpus */
1502 smp_mb__after_atomic();
1504 if (cpdma_check_free_tx_desc(txch))
1505 netif_tx_wake_queue(txq);
1508 return NETDEV_TX_OK;
1510 ndev->stats.tx_dropped++;
1511 netif_tx_stop_queue(txq);
1513 /* Barrier, so that stop_queue visible to other cpus */
1514 smp_mb__after_atomic();
1516 if (cpdma_check_free_tx_desc(txch))
1517 netif_tx_wake_queue(txq);
1519 return NETDEV_TX_BUSY;
1522 #if IS_ENABLED(CONFIG_TI_CPTS)
1524 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
1526 struct cpsw_common *cpsw = priv->cpsw;
1527 struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
1530 if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
1531 slave_write(slave, 0, CPSW1_TS_CTL);
1535 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
1536 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
1538 if (priv->tx_ts_enabled)
1539 ts_en |= CPSW_V1_TS_TX_EN;
1541 if (priv->rx_ts_enabled)
1542 ts_en |= CPSW_V1_TS_RX_EN;
1544 slave_write(slave, ts_en, CPSW1_TS_CTL);
1545 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
1548 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
1550 struct cpsw_slave *slave;
1551 struct cpsw_common *cpsw = priv->cpsw;
1554 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1556 ctrl = slave_read(slave, CPSW2_CONTROL);
1557 switch (cpsw->version) {
1558 case CPSW_VERSION_2:
1559 ctrl &= ~CTRL_V2_ALL_TS_MASK;
1561 if (priv->tx_ts_enabled)
1562 ctrl |= CTRL_V2_TX_TS_BITS;
1564 if (priv->rx_ts_enabled)
1565 ctrl |= CTRL_V2_RX_TS_BITS;
1567 case CPSW_VERSION_3:
1569 ctrl &= ~CTRL_V3_ALL_TS_MASK;
1571 if (priv->tx_ts_enabled)
1572 ctrl |= CTRL_V3_TX_TS_BITS;
1574 if (priv->rx_ts_enabled)
1575 ctrl |= CTRL_V3_RX_TS_BITS;
1579 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
1581 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
1582 slave_write(slave, ctrl, CPSW2_CONTROL);
1583 writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
1584 writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
1587 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1589 struct cpsw_priv *priv = netdev_priv(dev);
1590 struct hwtstamp_config cfg;
1591 struct cpsw_common *cpsw = priv->cpsw;
1593 if (cpsw->version != CPSW_VERSION_1 &&
1594 cpsw->version != CPSW_VERSION_2 &&
1595 cpsw->version != CPSW_VERSION_3)
1598 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1601 /* reserved for future extensions */
1605 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
1608 switch (cfg.rx_filter) {
1609 case HWTSTAMP_FILTER_NONE:
1610 priv->rx_ts_enabled = 0;
1612 case HWTSTAMP_FILTER_ALL:
1613 case HWTSTAMP_FILTER_NTP_ALL:
1615 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1616 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1617 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1618 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
1619 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
1621 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1622 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1623 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1624 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1625 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1626 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1627 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1628 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1629 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1630 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
1631 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
1637 priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
1639 switch (cpsw->version) {
1640 case CPSW_VERSION_1:
1641 cpsw_hwtstamp_v1(priv);
1643 case CPSW_VERSION_2:
1644 case CPSW_VERSION_3:
1645 cpsw_hwtstamp_v2(priv);
1651 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1654 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1656 struct cpsw_common *cpsw = ndev_to_cpsw(dev);
1657 struct cpsw_priv *priv = netdev_priv(dev);
1658 struct hwtstamp_config cfg;
1660 if (cpsw->version != CPSW_VERSION_1 &&
1661 cpsw->version != CPSW_VERSION_2 &&
1662 cpsw->version != CPSW_VERSION_3)
1666 cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1667 cfg.rx_filter = priv->rx_ts_enabled;
1669 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1672 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1677 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
1681 #endif /*CONFIG_TI_CPTS*/
1683 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1685 struct cpsw_priv *priv = netdev_priv(dev);
1686 struct cpsw_common *cpsw = priv->cpsw;
1687 int slave_no = cpsw_slave_index(cpsw, priv);
1689 if (!netif_running(dev))
1694 return cpsw_hwtstamp_set(dev, req);
1696 return cpsw_hwtstamp_get(dev, req);
1699 if (!cpsw->slaves[slave_no].phy)
1701 return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
1704 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1706 struct cpsw_priv *priv = netdev_priv(ndev);
1707 struct cpsw_common *cpsw = priv->cpsw;
1710 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
1711 ndev->stats.tx_errors++;
1712 cpsw_intr_disable(cpsw);
1713 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
1714 cpdma_chan_stop(cpsw->txv[ch].ch);
1715 cpdma_chan_start(cpsw->txv[ch].ch);
1718 cpsw_intr_enable(cpsw);
1719 netif_trans_update(ndev);
1720 netif_tx_wake_all_queues(ndev);
1723 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1725 struct cpsw_priv *priv = netdev_priv(ndev);
1726 struct sockaddr *addr = (struct sockaddr *)p;
1727 struct cpsw_common *cpsw = priv->cpsw;
1732 if (!is_valid_ether_addr(addr->sa_data))
1733 return -EADDRNOTAVAIL;
1735 ret = pm_runtime_get_sync(cpsw->dev);
1737 pm_runtime_put_noidle(cpsw->dev);
1741 if (cpsw->data.dual_emac) {
1742 vid = cpsw->slaves[priv->emac_port].port_vlan;
1746 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
1748 cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
1751 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
1752 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1753 for_each_slave(priv, cpsw_set_slave_mac, priv);
1755 pm_runtime_put(cpsw->dev);
1760 static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
1764 int unreg_mcast_mask = 0;
1767 struct cpsw_common *cpsw = priv->cpsw;
1769 if (cpsw->data.dual_emac) {
1770 port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
1772 mcast_mask = ALE_PORT_HOST;
1773 if (priv->ndev->flags & IFF_ALLMULTI)
1774 unreg_mcast_mask = mcast_mask;
1776 port_mask = ALE_ALL_PORTS;
1777 mcast_mask = port_mask;
1779 if (priv->ndev->flags & IFF_ALLMULTI)
1780 unreg_mcast_mask = ALE_ALL_PORTS;
1782 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1785 ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
1790 ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1791 HOST_PORT_NUM, ALE_VLAN, vid);
1795 ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
1796 mcast_mask, ALE_VLAN, vid, 0);
1798 goto clean_vlan_ucast;
1802 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1803 HOST_PORT_NUM, ALE_VLAN, vid);
1805 cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1809 static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1810 __be16 proto, u16 vid)
1812 struct cpsw_priv *priv = netdev_priv(ndev);
1813 struct cpsw_common *cpsw = priv->cpsw;
1816 if (vid == cpsw->data.default_vlan)
1819 ret = pm_runtime_get_sync(cpsw->dev);
1821 pm_runtime_put_noidle(cpsw->dev);
1825 if (cpsw->data.dual_emac) {
1826 /* In dual EMAC, reserved VLAN id should not be used for
1827 * creating VLAN interfaces as this can break the dual
1828 * EMAC port separation
1832 for (i = 0; i < cpsw->data.slaves; i++) {
1833 if (vid == cpsw->slaves[i].port_vlan) {
1840 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1841 ret = cpsw_add_vlan_ale_entry(priv, vid);
1843 pm_runtime_put(cpsw->dev);
1847 static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1848 __be16 proto, u16 vid)
1850 struct cpsw_priv *priv = netdev_priv(ndev);
1851 struct cpsw_common *cpsw = priv->cpsw;
1854 if (vid == cpsw->data.default_vlan)
1857 ret = pm_runtime_get_sync(cpsw->dev);
1859 pm_runtime_put_noidle(cpsw->dev);
1863 if (cpsw->data.dual_emac) {
1866 for (i = 0; i < cpsw->data.slaves; i++) {
1867 if (vid == cpsw->slaves[i].port_vlan)
1872 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1873 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
1874 ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
1875 HOST_PORT_NUM, ALE_VLAN, vid);
1876 ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
1878 ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
1880 pm_runtime_put(cpsw->dev);
1884 static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
1886 struct cpsw_priv *priv = netdev_priv(ndev);
1887 struct cpsw_common *cpsw = priv->cpsw;
1888 struct cpsw_slave *slave;
1893 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
1894 if (ch_rate == rate)
1897 ch_rate = rate * 1000;
1898 min_rate = cpdma_chan_get_min_rate(cpsw->dma);
1899 if ((ch_rate < min_rate && ch_rate)) {
1900 dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
1905 if (rate > cpsw->speed) {
1906 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
1910 ret = pm_runtime_get_sync(cpsw->dev);
1912 pm_runtime_put_noidle(cpsw->dev);
1916 ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
1917 pm_runtime_put(cpsw->dev);
1922 /* update rates for slaves tx queues */
1923 for (i = 0; i < cpsw->data.slaves; i++) {
1924 slave = &cpsw->slaves[i];
1928 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
1931 cpsw_split_res(cpsw);
1935 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
1937 struct tc_mqprio_qopt_offload *mqprio = type_data;
1938 struct cpsw_priv *priv = netdev_priv(ndev);
1939 struct cpsw_common *cpsw = priv->cpsw;
1940 int fifo, num_tc, count, offset;
1941 struct cpsw_slave *slave;
1942 u32 tx_prio_map = 0;
1945 num_tc = mqprio->qopt.num_tc;
1946 if (num_tc > CPSW_TC_NUM)
1949 if (mqprio->mode != TC_MQPRIO_MODE_DCB)
1952 ret = pm_runtime_get_sync(cpsw->dev);
1954 pm_runtime_put_noidle(cpsw->dev);
1959 for (i = 0; i < 8; i++) {
1960 tc = mqprio->qopt.prio_tc_map[i];
1961 fifo = cpsw_tc_to_fifo(tc, num_tc);
1962 tx_prio_map |= fifo << (4 * i);
1965 netdev_set_num_tc(ndev, num_tc);
1966 for (i = 0; i < num_tc; i++) {
1967 count = mqprio->qopt.count[i];
1968 offset = mqprio->qopt.offset[i];
1969 netdev_set_tc_queue(ndev, i, count, offset);
1973 if (!mqprio->qopt.hw) {
1974 /* restore default configuration */
1975 netdev_reset_tc(ndev);
1976 tx_prio_map = TX_PRIORITY_MAPPING;
1979 priv->mqprio_hw = mqprio->qopt.hw;
1981 offset = cpsw->version == CPSW_VERSION_1 ?
1982 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1984 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1985 slave_write(slave, tx_prio_map, offset);
1987 pm_runtime_put_sync(cpsw->dev);
1992 static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1996 case TC_SETUP_QDISC_CBS:
1997 return cpsw_set_cbs(ndev, type_data);
1999 case TC_SETUP_QDISC_MQPRIO:
2000 return cpsw_set_mqprio(ndev, type_data);
2007 #ifdef CONFIG_NET_POLL_CONTROLLER
2008 static void cpsw_ndo_poll_controller(struct net_device *ndev)
2010 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2012 cpsw_intr_disable(cpsw);
2013 cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
2014 cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
2015 cpsw_intr_enable(cpsw);
2019 static const struct net_device_ops cpsw_netdev_ops = {
2020 .ndo_open = cpsw_ndo_open,
2021 .ndo_stop = cpsw_ndo_stop,
2022 .ndo_start_xmit = cpsw_ndo_start_xmit,
2023 .ndo_set_mac_address = cpsw_ndo_set_mac_address,
2024 .ndo_do_ioctl = cpsw_ndo_ioctl,
2025 .ndo_validate_addr = eth_validate_addr,
2026 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
2027 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
2028 .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
2029 #ifdef CONFIG_NET_POLL_CONTROLLER
2030 .ndo_poll_controller = cpsw_ndo_poll_controller,
2032 .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
2033 .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
2034 .ndo_setup_tc = cpsw_ndo_setup_tc,
2037 static void cpsw_get_drvinfo(struct net_device *ndev,
2038 struct ethtool_drvinfo *info)
2040 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2041 struct platform_device *pdev = to_platform_device(cpsw->dev);
2043 strlcpy(info->driver, "cpsw", sizeof(info->driver));
2044 strlcpy(info->version, "1.0", sizeof(info->version));
2045 strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
2048 static int cpsw_set_pauseparam(struct net_device *ndev,
2049 struct ethtool_pauseparam *pause)
2051 struct cpsw_priv *priv = netdev_priv(ndev);
2054 priv->rx_pause = pause->rx_pause ? true : false;
2055 priv->tx_pause = pause->tx_pause ? true : false;
2057 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
2061 static int cpsw_set_channels(struct net_device *ndev,
2062 struct ethtool_channels *chs)
2064 return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
2067 static const struct ethtool_ops cpsw_ethtool_ops = {
2068 .get_drvinfo = cpsw_get_drvinfo,
2069 .get_msglevel = cpsw_get_msglevel,
2070 .set_msglevel = cpsw_set_msglevel,
2071 .get_link = ethtool_op_get_link,
2072 .get_ts_info = cpsw_get_ts_info,
2073 .get_coalesce = cpsw_get_coalesce,
2074 .set_coalesce = cpsw_set_coalesce,
2075 .get_sset_count = cpsw_get_sset_count,
2076 .get_strings = cpsw_get_strings,
2077 .get_ethtool_stats = cpsw_get_ethtool_stats,
2078 .get_pauseparam = cpsw_get_pauseparam,
2079 .set_pauseparam = cpsw_set_pauseparam,
2080 .get_wol = cpsw_get_wol,
2081 .set_wol = cpsw_set_wol,
2082 .get_regs_len = cpsw_get_regs_len,
2083 .get_regs = cpsw_get_regs,
2084 .begin = cpsw_ethtool_op_begin,
2085 .complete = cpsw_ethtool_op_complete,
2086 .get_channels = cpsw_get_channels,
2087 .set_channels = cpsw_set_channels,
2088 .get_link_ksettings = cpsw_get_link_ksettings,
2089 .set_link_ksettings = cpsw_set_link_ksettings,
2090 .get_eee = cpsw_get_eee,
2091 .set_eee = cpsw_set_eee,
2092 .nway_reset = cpsw_nway_reset,
2093 .get_ringparam = cpsw_get_ringparam,
2094 .set_ringparam = cpsw_set_ringparam,
2097 static int cpsw_probe_dt(struct cpsw_platform_data *data,
2098 struct platform_device *pdev)
2100 struct device_node *node = pdev->dev.of_node;
2101 struct device_node *slave_node;
2108 if (of_property_read_u32(node, "slaves", &prop)) {
2109 dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
2112 data->slaves = prop;
2114 if (of_property_read_u32(node, "active_slave", &prop)) {
2115 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
2118 data->active_slave = prop;
2120 data->slave_data = devm_kcalloc(&pdev->dev,
2122 sizeof(struct cpsw_slave_data),
2124 if (!data->slave_data)
2127 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
2128 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
2131 data->channels = prop;
2133 if (of_property_read_u32(node, "ale_entries", &prop)) {
2134 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
2137 data->ale_entries = prop;
2139 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
2140 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
2143 data->bd_ram_size = prop;
2145 if (of_property_read_u32(node, "mac_control", &prop)) {
2146 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
2149 data->mac_control = prop;
2151 if (of_property_read_bool(node, "dual_emac"))
2152 data->dual_emac = 1;
2155 * Populate all the child nodes here...
2157 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2158 /* We do not want to force this, as in some cases may not have child */
2160 dev_warn(&pdev->dev, "Doesn't have any child node\n");
2162 for_each_available_child_of_node(node, slave_node) {
2163 struct cpsw_slave_data *slave_data = data->slave_data + i;
2164 const void *mac_addr = NULL;
2168 /* This is no slave child node, continue */
2169 if (!of_node_name_eq(slave_node, "slave"))
2172 slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
2174 if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
2175 IS_ERR(slave_data->ifphy)) {
2176 ret = PTR_ERR(slave_data->ifphy);
2178 "%d: Error retrieving port phy: %d\n", i, ret);
2182 slave_data->phy_node = of_parse_phandle(slave_node,
2184 parp = of_get_property(slave_node, "phy_id", &lenp);
2185 if (slave_data->phy_node) {
2187 "slave[%d] using phy-handle=\"%pOF\"\n",
2188 i, slave_data->phy_node);
2189 } else if (of_phy_is_fixed_link(slave_node)) {
2190 /* In the case of a fixed PHY, the DT node associated
2191 * to the PHY is the Ethernet MAC DT node.
2193 ret = of_phy_register_fixed_link(slave_node);
2195 if (ret != -EPROBE_DEFER)
2196 dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
2199 slave_data->phy_node = of_node_get(slave_node);
2202 struct device_node *mdio_node;
2203 struct platform_device *mdio;
2205 if (lenp != (sizeof(__be32) * 2)) {
2206 dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
2209 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
2210 phyid = be32_to_cpup(parp+1);
2211 mdio = of_find_device_by_node(mdio_node);
2212 of_node_put(mdio_node);
2214 dev_err(&pdev->dev, "Missing mdio platform device\n");
2217 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2218 PHY_ID_FMT, mdio->name, phyid);
2219 put_device(&mdio->dev);
2222 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
2226 slave_data->phy_if = of_get_phy_mode(slave_node);
2227 if (slave_data->phy_if < 0) {
2228 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
2230 return slave_data->phy_if;
2234 mac_addr = of_get_mac_address(slave_node);
2235 if (!IS_ERR(mac_addr)) {
2236 ether_addr_copy(slave_data->mac_addr, mac_addr);
2238 ret = ti_cm_get_macid(&pdev->dev, i,
2239 slave_data->mac_addr);
2243 if (data->dual_emac) {
2244 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
2246 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
2247 slave_data->dual_emac_res_vlan = i+1;
2248 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
2249 slave_data->dual_emac_res_vlan, i);
2251 slave_data->dual_emac_res_vlan = prop;
2256 if (i == data->slaves)
2263 static void cpsw_remove_dt(struct platform_device *pdev)
2265 struct net_device *ndev = platform_get_drvdata(pdev);
2266 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2267 struct cpsw_platform_data *data = &cpsw->data;
2268 struct device_node *node = pdev->dev.of_node;
2269 struct device_node *slave_node;
2272 for_each_available_child_of_node(node, slave_node) {
2273 struct cpsw_slave_data *slave_data = &data->slave_data[i];
2275 if (!of_node_name_eq(slave_node, "slave"))
2278 if (of_phy_is_fixed_link(slave_node))
2279 of_phy_deregister_fixed_link(slave_node);
2281 of_node_put(slave_data->phy_node);
2284 if (i == data->slaves)
2288 of_platform_depopulate(&pdev->dev);
2291 static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
2293 struct cpsw_common *cpsw = priv->cpsw;
2294 struct cpsw_platform_data *data = &cpsw->data;
2295 struct net_device *ndev;
2296 struct cpsw_priv *priv_sl2;
2299 ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv),
2300 CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
2302 dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
2306 priv_sl2 = netdev_priv(ndev);
2307 priv_sl2->cpsw = cpsw;
2308 priv_sl2->ndev = ndev;
2309 priv_sl2->dev = &ndev->dev;
2310 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2312 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
2313 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
2315 dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
2316 priv_sl2->mac_addr);
2318 eth_random_addr(priv_sl2->mac_addr);
2319 dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
2320 priv_sl2->mac_addr);
2322 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
2324 priv_sl2->emac_port = 1;
2325 cpsw->slaves[1].ndev = ndev;
2326 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
2328 ndev->netdev_ops = &cpsw_netdev_ops;
2329 ndev->ethtool_ops = &cpsw_ethtool_ops;
2331 /* register the network device */
2332 SET_NETDEV_DEV(ndev, cpsw->dev);
2333 ret = register_netdev(ndev);
2335 dev_err(cpsw->dev, "cpsw: error registering net device\n");
2340 static const struct of_device_id cpsw_of_mtable[] = {
2341 { .compatible = "ti,cpsw"},
2342 { .compatible = "ti,am335x-cpsw"},
2343 { .compatible = "ti,am4372-cpsw"},
2344 { .compatible = "ti,dra7-cpsw"},
2347 MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
2349 static const struct soc_device_attribute cpsw_soc_devices[] = {
2350 { .family = "AM33xx", .revision = "ES1.0"},
2354 static int cpsw_probe(struct platform_device *pdev)
2356 struct device *dev = &pdev->dev;
2358 struct cpsw_platform_data *data;
2359 struct net_device *ndev;
2360 struct cpsw_priv *priv;
2361 void __iomem *ss_regs;
2362 struct resource *res, *ss_res;
2363 struct gpio_descs *mode;
2364 const struct soc_device_attribute *soc;
2365 struct cpsw_common *cpsw;
2369 cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
2375 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
2377 ret = PTR_ERR(mode);
2378 dev_err(dev, "gpio request failed, ret %d\n", ret);
2382 clk = devm_clk_get(dev, "fck");
2385 dev_err(dev, "fck is not found %d\n", ret);
2388 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
2390 ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2391 ss_regs = devm_ioremap_resource(dev, ss_res);
2392 if (IS_ERR(ss_regs))
2393 return PTR_ERR(ss_regs);
2394 cpsw->regs = ss_regs;
2396 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2397 cpsw->wr_regs = devm_ioremap_resource(dev, res);
2398 if (IS_ERR(cpsw->wr_regs))
2399 return PTR_ERR(cpsw->wr_regs);
2402 irq = platform_get_irq(pdev, 1);
2405 cpsw->irqs_table[0] = irq;
2408 irq = platform_get_irq(pdev, 2);
2411 cpsw->irqs_table[1] = irq;
2414 * This may be required here for child devices.
2416 pm_runtime_enable(dev);
2418 /* Need to enable clocks with runtime PM api to access module
2421 ret = pm_runtime_get_sync(dev);
2423 pm_runtime_put_noidle(dev);
2424 goto clean_runtime_disable_ret;
2427 ret = cpsw_probe_dt(&cpsw->data, pdev);
2431 soc = soc_device_match(cpsw_soc_devices);
2433 cpsw->quirk_irq = 1;
2436 cpsw->slaves = devm_kcalloc(dev,
2437 data->slaves, sizeof(struct cpsw_slave),
2439 if (!cpsw->slaves) {
2444 cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE);
2445 cpsw->descs_pool_size = descs_pool_size;
2447 ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
2448 ss_res->start + CPSW2_BD_OFFSET,
2453 ch = cpsw->quirk_irq ? 0 : 7;
2454 cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
2455 if (IS_ERR(cpsw->txv[0].ch)) {
2456 dev_err(dev, "error initializing tx dma channel\n");
2457 ret = PTR_ERR(cpsw->txv[0].ch);
2461 cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
2462 if (IS_ERR(cpsw->rxv[0].ch)) {
2463 dev_err(dev, "error initializing rx dma channel\n");
2464 ret = PTR_ERR(cpsw->rxv[0].ch);
2467 cpsw_split_res(cpsw);
2470 ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
2471 CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
2473 dev_err(dev, "error allocating net_device\n");
2477 platform_set_drvdata(pdev, ndev);
2478 priv = netdev_priv(ndev);
2482 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
2483 priv->emac_port = 0;
2485 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
2486 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
2487 dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr);
2489 eth_random_addr(priv->mac_addr);
2490 dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
2493 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2495 cpsw->slaves[0].ndev = ndev;
2497 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
2499 ndev->netdev_ops = &cpsw_netdev_ops;
2500 ndev->ethtool_ops = &cpsw_ethtool_ops;
2501 netif_napi_add(ndev, &cpsw->napi_rx,
2502 cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
2504 netif_tx_napi_add(ndev, &cpsw->napi_tx,
2505 cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
2508 /* register the network device */
2509 SET_NETDEV_DEV(ndev, dev);
2510 ret = register_netdev(ndev);
2512 dev_err(dev, "error registering net device\n");
2517 if (cpsw->data.dual_emac) {
2518 ret = cpsw_probe_dual_emac(priv);
2520 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
2521 goto clean_unregister_netdev_ret;
2525 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
2526 * MISC IRQs which are always kept disabled with this driver so
2527 * we will not request them.
2529 * If anyone wants to implement support for those, make sure to
2530 * first request and append them to irqs_table array.
2532 ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
2533 0, dev_name(dev), cpsw);
2535 dev_err(dev, "error attaching irq (%d)\n", ret);
2536 goto clean_unregister_netdev_ret;
2540 ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
2541 0, dev_name(&pdev->dev), cpsw);
2543 dev_err(dev, "error attaching irq (%d)\n", ret);
2544 goto clean_unregister_netdev_ret;
2547 cpsw_notice(priv, probe,
2548 "initialized device (regs %pa, irq %d, pool size %d)\n",
2549 &ss_res->start, cpsw->irqs_table[0], descs_pool_size);
2551 pm_runtime_put(&pdev->dev);
2555 clean_unregister_netdev_ret:
2556 unregister_netdev(ndev);
2558 cpts_release(cpsw->cpts);
2559 cpdma_ctlr_destroy(cpsw->dma);
2561 cpsw_remove_dt(pdev);
2562 pm_runtime_put_sync(&pdev->dev);
2563 clean_runtime_disable_ret:
2564 pm_runtime_disable(&pdev->dev);
2568 static int cpsw_remove(struct platform_device *pdev)
2570 struct net_device *ndev = platform_get_drvdata(pdev);
2571 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2574 ret = pm_runtime_get_sync(&pdev->dev);
2576 pm_runtime_put_noidle(&pdev->dev);
2580 if (cpsw->data.dual_emac)
2581 unregister_netdev(cpsw->slaves[1].ndev);
2582 unregister_netdev(ndev);
2584 cpts_release(cpsw->cpts);
2585 cpdma_ctlr_destroy(cpsw->dma);
2586 cpsw_remove_dt(pdev);
2587 pm_runtime_put_sync(&pdev->dev);
2588 pm_runtime_disable(&pdev->dev);
2592 #ifdef CONFIG_PM_SLEEP
2593 static int cpsw_suspend(struct device *dev)
2595 struct net_device *ndev = dev_get_drvdata(dev);
2596 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2598 if (cpsw->data.dual_emac) {
2601 for (i = 0; i < cpsw->data.slaves; i++) {
2602 if (netif_running(cpsw->slaves[i].ndev))
2603 cpsw_ndo_stop(cpsw->slaves[i].ndev);
2606 if (netif_running(ndev))
2607 cpsw_ndo_stop(ndev);
2610 /* Select sleep pin state */
2611 pinctrl_pm_select_sleep_state(dev);
2616 static int cpsw_resume(struct device *dev)
2618 struct net_device *ndev = dev_get_drvdata(dev);
2619 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2621 /* Select default pin state */
2622 pinctrl_pm_select_default_state(dev);
2624 /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
2626 if (cpsw->data.dual_emac) {
2629 for (i = 0; i < cpsw->data.slaves; i++) {
2630 if (netif_running(cpsw->slaves[i].ndev))
2631 cpsw_ndo_open(cpsw->slaves[i].ndev);
2634 if (netif_running(ndev))
2635 cpsw_ndo_open(ndev);
2643 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2645 static struct platform_driver cpsw_driver = {
2649 .of_match_table = cpsw_of_mtable,
2651 .probe = cpsw_probe,
2652 .remove = cpsw_remove,
2655 module_platform_driver(cpsw_driver);
2657 MODULE_LICENSE("GPL");
2658 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
2659 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
2660 MODULE_DESCRIPTION("TI CPSW Ethernet driver");