1 // SPDX-License-Identifier: GPL-2.0
3 * Texas Instruments Ethernet Switch Driver
5 * Copyright (C) 2019 Texas Instruments
9 #include <linux/bpf_trace.h>
10 #include <linux/if_ether.h>
11 #include <linux/if_vlan.h>
12 #include <linux/kmemleak.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/net_tstamp.h>
17 #include <linux/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/skbuff.h>
21 #include <net/page_pool.h>
22 #include <net/pkt_cls.h>
27 #include "cpsw_priv.h"
29 #include "davinci_cpdma.h"
31 #define CPTS_N_ETX_TS 4
33 int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
35 void cpsw_intr_enable(struct cpsw_common *cpsw)
37 writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
38 writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
40 cpdma_ctlr_int_ctrl(cpsw->dma, true);
43 void cpsw_intr_disable(struct cpsw_common *cpsw)
45 writel_relaxed(0, &cpsw->wr_regs->tx_en);
46 writel_relaxed(0, &cpsw->wr_regs->rx_en);
48 cpdma_ctlr_int_ctrl(cpsw->dma, false);
51 void cpsw_tx_handler(void *token, int len, int status)
53 struct cpsw_meta_xdp *xmeta;
54 struct xdp_frame *xdpf;
55 struct net_device *ndev;
56 struct netdev_queue *txq;
60 if (cpsw_is_xdpf_handle(token)) {
61 xdpf = cpsw_handle_to_xdpf(token);
62 xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
65 xdp_return_frame(xdpf);
69 ch = skb_get_queue_mapping(skb);
70 cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
71 dev_kfree_skb_any(skb);
74 /* Check whether the queue is stopped due to stalled tx dma, if the
75 * queue is stopped then start the queue as we have free desc for tx
77 txq = netdev_get_tx_queue(ndev, ch);
78 if (unlikely(netif_tx_queue_stopped(txq)))
79 netif_tx_wake_queue(txq);
81 ndev->stats.tx_packets++;
82 ndev->stats.tx_bytes += len;
85 irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
87 struct cpsw_common *cpsw = dev_id;
89 writel(0, &cpsw->wr_regs->tx_en);
90 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
92 if (cpsw->quirk_irq) {
93 disable_irq_nosync(cpsw->irqs_table[1]);
94 cpsw->tx_irq_disabled = true;
97 napi_schedule(&cpsw->napi_tx);
101 irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
103 struct cpsw_common *cpsw = dev_id;
105 writel(0, &cpsw->wr_regs->rx_en);
106 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
108 if (cpsw->quirk_irq) {
109 disable_irq_nosync(cpsw->irqs_table[0]);
110 cpsw->rx_irq_disabled = true;
113 napi_schedule(&cpsw->napi_rx);
117 irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id)
119 struct cpsw_common *cpsw = dev_id;
121 writel(0, &cpsw->wr_regs->misc_en);
122 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC);
123 cpts_misc_interrupt(cpsw->cpts);
124 writel(0x10, &cpsw->wr_regs->misc_en);
129 int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
131 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
132 int num_tx, cur_budget, ch;
134 struct cpsw_vector *txv;
136 /* process every unprocessed channel */
137 ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
138 for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
139 if (!(ch_map & 0x80))
142 txv = &cpsw->txv[ch];
143 if (unlikely(txv->budget > budget - num_tx))
144 cur_budget = budget - num_tx;
146 cur_budget = txv->budget;
148 num_tx += cpdma_chan_process(txv->ch, cur_budget);
149 if (num_tx >= budget)
153 if (num_tx < budget) {
154 napi_complete(napi_tx);
155 writel(0xff, &cpsw->wr_regs->tx_en);
161 int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
163 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
166 num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
167 if (num_tx < budget) {
168 napi_complete(napi_tx);
169 writel(0xff, &cpsw->wr_regs->tx_en);
170 if (cpsw->tx_irq_disabled) {
171 cpsw->tx_irq_disabled = false;
172 enable_irq(cpsw->irqs_table[1]);
179 int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
181 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
182 int num_rx, cur_budget, ch;
184 struct cpsw_vector *rxv;
186 /* process every unprocessed channel */
187 ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
188 for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
189 if (!(ch_map & 0x01))
192 rxv = &cpsw->rxv[ch];
193 if (unlikely(rxv->budget > budget - num_rx))
194 cur_budget = budget - num_rx;
196 cur_budget = rxv->budget;
198 num_rx += cpdma_chan_process(rxv->ch, cur_budget);
199 if (num_rx >= budget)
203 if (num_rx < budget) {
204 napi_complete_done(napi_rx, num_rx);
205 writel(0xff, &cpsw->wr_regs->rx_en);
211 int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
213 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
216 num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
217 if (num_rx < budget) {
218 napi_complete_done(napi_rx, num_rx);
219 writel(0xff, &cpsw->wr_regs->rx_en);
220 if (cpsw->rx_irq_disabled) {
221 cpsw->rx_irq_disabled = false;
222 enable_irq(cpsw->irqs_table[0]);
229 void cpsw_rx_vlan_encap(struct sk_buff *skb)
231 struct cpsw_priv *priv = netdev_priv(skb->dev);
232 u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
233 struct cpsw_common *cpsw = priv->cpsw;
234 u16 vtag, vid, prio, pkt_type;
236 /* Remove VLAN header encapsulation word */
237 skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
239 pkt_type = (rx_vlan_encap_hdr >>
240 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
241 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
242 /* Ignore unknown & Priority-tagged packets*/
243 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
244 pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
247 vid = (rx_vlan_encap_hdr >>
248 CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
250 /* Ignore vid 0 and pass packet as is */
254 /* Untag P0 packets if set for vlan */
255 if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) {
256 prio = (rx_vlan_encap_hdr >>
257 CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
258 CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
260 vtag = (prio << VLAN_PRIO_SHIFT) | vid;
261 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
264 /* strip vlan tag for VLAN-tagged packet */
265 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
266 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
267 skb_pull(skb, VLAN_HLEN);
271 void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv)
273 slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
274 slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
277 void soft_reset(const char *module, void __iomem *reg)
279 unsigned long timeout = jiffies + HZ;
281 writel_relaxed(1, reg);
284 } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
286 WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
289 void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
291 struct cpsw_priv *priv = netdev_priv(ndev);
292 struct cpsw_common *cpsw = priv->cpsw;
295 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
296 ndev->stats.tx_errors++;
297 cpsw_intr_disable(cpsw);
298 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
299 cpdma_chan_stop(cpsw->txv[ch].ch);
300 cpdma_chan_start(cpsw->txv[ch].ch);
303 cpsw_intr_enable(cpsw);
304 netif_trans_update(ndev);
305 netif_tx_wake_all_queues(ndev);
308 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
312 for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
313 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
314 speed += cpsw->slaves[i].phy->speed;
319 int cpsw_need_resplit(struct cpsw_common *cpsw)
324 /* re-split resources only in case speed was changed */
325 speed = cpsw_get_common_speed(cpsw);
326 if (speed == cpsw->speed || !speed)
331 for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
332 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
339 /* cases not dependent on speed */
340 if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
346 void cpsw_split_res(struct cpsw_common *cpsw)
348 u32 consumed_rate = 0, bigest_rate = 0;
349 struct cpsw_vector *txv = cpsw->txv;
350 int i, ch_weight, rlim_ch_num = 0;
351 int budget, bigest_rate_ch = 0;
352 u32 ch_rate, max_rate;
355 for (i = 0; i < cpsw->tx_ch_num; i++) {
356 ch_rate = cpdma_chan_get_rate(txv[i].ch);
361 consumed_rate += ch_rate;
364 if (cpsw->tx_ch_num == rlim_ch_num) {
365 max_rate = consumed_rate;
366 } else if (!rlim_ch_num) {
367 ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
369 max_rate = consumed_rate;
371 max_rate = cpsw->speed * 1000;
373 /* if max_rate is less then expected due to reduced link speed,
374 * split proportionally according next potential max speed
376 if (max_rate < consumed_rate)
379 if (max_rate < consumed_rate)
382 ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
383 ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
384 (cpsw->tx_ch_num - rlim_ch_num);
385 bigest_rate = (max_rate - consumed_rate) /
386 (cpsw->tx_ch_num - rlim_ch_num);
389 /* split tx weight/budget */
390 budget = CPSW_POLL_WEIGHT;
391 for (i = 0; i < cpsw->tx_ch_num; i++) {
392 ch_rate = cpdma_chan_get_rate(txv[i].ch);
394 txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
397 if (ch_rate > bigest_rate) {
399 bigest_rate = ch_rate;
402 ch_weight = (ch_rate * 100) / max_rate;
405 cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
407 txv[i].budget = ch_budget;
410 cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
413 budget -= txv[i].budget;
417 txv[bigest_rate_ch].budget += budget;
419 /* split rx budget */
420 budget = CPSW_POLL_WEIGHT;
421 ch_budget = budget / cpsw->rx_ch_num;
422 for (i = 0; i < cpsw->rx_ch_num; i++) {
423 cpsw->rxv[i].budget = ch_budget;
428 cpsw->rxv[0].budget += budget;
431 int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
432 int ale_ageout, phys_addr_t desc_mem_phys,
435 u32 slave_offset, sliver_offset, slave_size;
436 struct cpsw_ale_params ale_params;
437 struct cpsw_platform_data *data;
438 struct cpdma_params dma_params;
439 struct device *dev = cpsw->dev;
440 struct device_node *cpts_node;
441 void __iomem *cpts_regs;
448 cpsw->version = readl(&cpsw->regs->id_ver);
450 memset(&dma_params, 0, sizeof(dma_params));
451 memset(&ale_params, 0, sizeof(ale_params));
453 switch (cpsw->version) {
455 cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
456 cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
457 cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
458 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
459 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
460 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
461 slave_offset = CPSW1_SLAVE_OFFSET;
462 slave_size = CPSW1_SLAVE_SIZE;
463 sliver_offset = CPSW1_SLIVER_OFFSET;
464 dma_params.desc_mem_phys = 0;
469 cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
470 cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
471 cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
472 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
473 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
474 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
475 slave_offset = CPSW2_SLAVE_OFFSET;
476 slave_size = CPSW2_SLAVE_SIZE;
477 sliver_offset = CPSW2_SLIVER_OFFSET;
478 dma_params.desc_mem_phys = desc_mem_phys;
481 dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
485 for (i = 0; i < cpsw->data.slaves; i++) {
486 struct cpsw_slave *slave = &cpsw->slaves[i];
487 void __iomem *regs = cpsw->regs;
489 slave->slave_num = i;
490 slave->data = &cpsw->data.slave_data[i];
491 slave->regs = regs + slave_offset;
492 slave->port_vlan = slave->data->dual_emac_res_vlan;
493 slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset);
494 if (IS_ERR(slave->mac_sl))
495 return PTR_ERR(slave->mac_sl);
497 slave_offset += slave_size;
498 sliver_offset += SLIVER_SIZE;
501 ale_params.dev = dev;
502 ale_params.ale_ageout = ale_ageout;
503 ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
504 ale_params.dev_id = "cpsw";
506 cpsw->ale = cpsw_ale_create(&ale_params);
507 if (IS_ERR(cpsw->ale)) {
508 dev_err(dev, "error initializing ale engine\n");
509 return PTR_ERR(cpsw->ale);
512 dma_params.dev = dev;
513 dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
514 dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
515 dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
516 dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
517 dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
519 dma_params.num_chan = data->channels;
520 dma_params.has_soft_reset = true;
521 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
522 dma_params.desc_mem_size = data->bd_ram_size;
523 dma_params.desc_align = 16;
524 dma_params.has_ext_regs = true;
525 dma_params.desc_hw_addr = dma_params.desc_mem_phys;
526 dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
527 dma_params.descs_pool_size = descs_pool_size;
529 cpsw->dma = cpdma_ctlr_create(&dma_params);
531 dev_err(dev, "error initializing dma\n");
535 cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts");
537 cpts_node = cpsw->dev->of_node;
539 cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node,
541 if (IS_ERR(cpsw->cpts)) {
542 ret = PTR_ERR(cpsw->cpts);
543 cpdma_ctlr_destroy(cpsw->dma);
545 of_node_put(cpts_node);
550 #if IS_ENABLED(CONFIG_TI_CPTS)
552 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
554 struct cpsw_common *cpsw = priv->cpsw;
555 struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
558 if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
559 slave_write(slave, 0, CPSW1_TS_CTL);
563 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
564 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
566 if (priv->tx_ts_enabled)
567 ts_en |= CPSW_V1_TS_TX_EN;
569 if (priv->rx_ts_enabled)
570 ts_en |= CPSW_V1_TS_RX_EN;
572 slave_write(slave, ts_en, CPSW1_TS_CTL);
573 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
576 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
578 struct cpsw_common *cpsw = priv->cpsw;
579 struct cpsw_slave *slave;
582 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
584 ctrl = slave_read(slave, CPSW2_CONTROL);
585 switch (cpsw->version) {
587 ctrl &= ~CTRL_V2_ALL_TS_MASK;
589 if (priv->tx_ts_enabled)
590 ctrl |= CTRL_V2_TX_TS_BITS;
592 if (priv->rx_ts_enabled)
593 ctrl |= CTRL_V2_RX_TS_BITS;
597 ctrl &= ~CTRL_V3_ALL_TS_MASK;
599 if (priv->tx_ts_enabled)
600 ctrl |= CTRL_V3_TX_TS_BITS;
602 if (priv->rx_ts_enabled)
603 ctrl |= CTRL_V3_RX_TS_BITS;
607 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
609 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
610 slave_write(slave, ctrl, CPSW2_CONTROL);
611 writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
612 writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
615 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
617 struct cpsw_priv *priv = netdev_priv(dev);
618 struct cpsw_common *cpsw = priv->cpsw;
619 struct hwtstamp_config cfg;
621 if (cpsw->version != CPSW_VERSION_1 &&
622 cpsw->version != CPSW_VERSION_2 &&
623 cpsw->version != CPSW_VERSION_3)
626 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
629 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
632 switch (cfg.rx_filter) {
633 case HWTSTAMP_FILTER_NONE:
634 priv->rx_ts_enabled = 0;
636 case HWTSTAMP_FILTER_ALL:
637 case HWTSTAMP_FILTER_NTP_ALL:
638 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
639 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
640 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
642 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
643 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
644 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
645 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
646 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
647 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
648 case HWTSTAMP_FILTER_PTP_V2_EVENT:
649 case HWTSTAMP_FILTER_PTP_V2_SYNC:
650 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
651 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
652 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
658 priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
660 switch (cpsw->version) {
662 cpsw_hwtstamp_v1(priv);
666 cpsw_hwtstamp_v2(priv);
672 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
675 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
677 struct cpsw_common *cpsw = ndev_to_cpsw(dev);
678 struct cpsw_priv *priv = netdev_priv(dev);
679 struct hwtstamp_config cfg;
681 if (cpsw->version != CPSW_VERSION_1 &&
682 cpsw->version != CPSW_VERSION_2 &&
683 cpsw->version != CPSW_VERSION_3)
687 cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
688 cfg.rx_filter = priv->rx_ts_enabled;
690 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
693 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
698 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
702 #endif /*CONFIG_TI_CPTS*/
704 int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
706 struct cpsw_priv *priv = netdev_priv(dev);
707 struct cpsw_common *cpsw = priv->cpsw;
708 int slave_no = cpsw_slave_index(cpsw, priv);
709 struct phy_device *phy;
711 if (!netif_running(dev))
714 phy = cpsw->slaves[slave_no].phy;
716 if (!phy_has_hwtstamp(phy)) {
719 return cpsw_hwtstamp_set(dev, req);
721 return cpsw_hwtstamp_get(dev, req);
726 return phy_mii_ioctl(phy, req, cmd);
731 int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
733 struct cpsw_priv *priv = netdev_priv(ndev);
734 struct cpsw_common *cpsw = priv->cpsw;
735 struct cpsw_slave *slave;
740 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
744 ch_rate = rate * 1000;
745 min_rate = cpdma_chan_get_min_rate(cpsw->dma);
746 if ((ch_rate < min_rate && ch_rate)) {
747 dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
752 if (rate > cpsw->speed) {
753 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
757 ret = pm_runtime_get_sync(cpsw->dev);
759 pm_runtime_put_noidle(cpsw->dev);
763 ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
764 pm_runtime_put(cpsw->dev);
769 /* update rates for slaves tx queues */
770 for (i = 0; i < cpsw->data.slaves; i++) {
771 slave = &cpsw->slaves[i];
775 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
778 cpsw_split_res(cpsw);
782 static int cpsw_tc_to_fifo(int tc, int num_tc)
784 if (tc == num_tc - 1)
787 return CPSW_FIFO_SHAPERS_NUM - tc;
790 bool cpsw_shp_is_off(struct cpsw_priv *priv)
792 struct cpsw_common *cpsw = priv->cpsw;
793 struct cpsw_slave *slave;
794 u32 shift, mask, val;
796 val = readl_relaxed(&cpsw->regs->ptype);
798 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
799 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
806 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
808 struct cpsw_common *cpsw = priv->cpsw;
809 struct cpsw_slave *slave;
810 u32 shift, mask, val;
812 val = readl_relaxed(&cpsw->regs->ptype);
814 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
815 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
816 mask = (1 << --fifo) << shift;
817 val = on ? val | mask : val & ~mask;
819 writel_relaxed(val, &cpsw->regs->ptype);
822 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
824 struct cpsw_common *cpsw = priv->cpsw;
825 u32 val = 0, send_pct, shift;
826 struct cpsw_slave *slave;
829 if (bw > priv->shp_cfg_speed * 1000)
832 /* shaping has to stay enabled for highest fifos linearly
833 * and fifo bw no more then interface can allow
835 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
836 send_pct = slave_read(slave, SEND_PERCENT);
837 for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
839 if (i >= fifo || !priv->fifo_bw[i])
842 dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
846 if (!priv->fifo_bw[i] && i > fifo) {
847 dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
853 send_pct &= ~(CPSW_PCT_MASK << shift);
854 val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
858 send_pct |= val << shift;
863 if (priv->fifo_bw[i])
864 pct += (send_pct >> shift) & CPSW_PCT_MASK;
870 slave_write(slave, send_pct, SEND_PERCENT);
871 priv->fifo_bw[fifo] = bw;
873 dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
874 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
878 dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
882 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
884 struct cpsw_common *cpsw = priv->cpsw;
885 struct cpsw_slave *slave;
886 u32 tx_in_ctl_rg, val;
889 ret = cpsw_set_fifo_bw(priv, fifo, bw);
893 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
894 tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
895 CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
898 cpsw_fifo_shp_on(priv, fifo, bw);
900 val = slave_read(slave, tx_in_ctl_rg);
901 if (cpsw_shp_is_off(priv)) {
902 /* disable FIFOs rate limited queues */
903 val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
905 /* set type of FIFO queues to normal priority mode */
906 val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
908 /* set type of FIFO queues to be rate limited */
910 val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
912 priv->shp_cfg_speed = 0;
915 /* toggle a FIFO rate limited queue */
917 val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
919 val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
920 slave_write(slave, val, tx_in_ctl_rg);
922 /* FIFO transmit shape enable */
923 cpsw_fifo_shp_on(priv, fifo, bw);
930 * shaping for class A should be set first
932 static int cpsw_set_cbs(struct net_device *ndev,
933 struct tc_cbs_qopt_offload *qopt)
935 struct cpsw_priv *priv = netdev_priv(ndev);
936 struct cpsw_common *cpsw = priv->cpsw;
937 struct cpsw_slave *slave;
942 tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
944 /* enable channels in backward order, as highest FIFOs must be rate
945 * limited first and for compliance with CPDMA rate limited channels
946 * that also used in bacward order. FIFO0 cannot be rate limited.
948 fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
950 dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
954 /* do nothing, it's disabled anyway */
955 if (!qopt->enable && !priv->fifo_bw[fifo])
958 /* shapers can be set if link speed is known */
959 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
960 if (slave->phy && slave->phy->link) {
961 if (priv->shp_cfg_speed &&
962 priv->shp_cfg_speed != slave->phy->speed)
963 prev_speed = priv->shp_cfg_speed;
965 priv->shp_cfg_speed = slave->phy->speed;
968 if (!priv->shp_cfg_speed) {
969 dev_err(priv->dev, "Link speed is not known");
973 ret = pm_runtime_get_sync(cpsw->dev);
975 pm_runtime_put_noidle(cpsw->dev);
979 bw = qopt->enable ? qopt->idleslope : 0;
980 ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
982 priv->shp_cfg_speed = prev_speed;
986 if (bw && prev_speed)
988 "Speed was changed, CBS shaper speeds are changed!");
990 pm_runtime_put_sync(cpsw->dev);
994 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
996 struct tc_mqprio_qopt_offload *mqprio = type_data;
997 struct cpsw_priv *priv = netdev_priv(ndev);
998 struct cpsw_common *cpsw = priv->cpsw;
999 int fifo, num_tc, count, offset;
1000 struct cpsw_slave *slave;
1001 u32 tx_prio_map = 0;
1004 num_tc = mqprio->qopt.num_tc;
1005 if (num_tc > CPSW_TC_NUM)
1008 if (mqprio->mode != TC_MQPRIO_MODE_DCB)
1011 ret = pm_runtime_get_sync(cpsw->dev);
1013 pm_runtime_put_noidle(cpsw->dev);
1018 for (i = 0; i < 8; i++) {
1019 tc = mqprio->qopt.prio_tc_map[i];
1020 fifo = cpsw_tc_to_fifo(tc, num_tc);
1021 tx_prio_map |= fifo << (4 * i);
1024 netdev_set_num_tc(ndev, num_tc);
1025 for (i = 0; i < num_tc; i++) {
1026 count = mqprio->qopt.count[i];
1027 offset = mqprio->qopt.offset[i];
1028 netdev_set_tc_queue(ndev, i, count, offset);
1032 if (!mqprio->qopt.hw) {
1033 /* restore default configuration */
1034 netdev_reset_tc(ndev);
1035 tx_prio_map = TX_PRIORITY_MAPPING;
1038 priv->mqprio_hw = mqprio->qopt.hw;
1040 offset = cpsw->version == CPSW_VERSION_1 ?
1041 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1043 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1044 slave_write(slave, tx_prio_map, offset);
1046 pm_runtime_put_sync(cpsw->dev);
1051 int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1055 case TC_SETUP_QDISC_CBS:
1056 return cpsw_set_cbs(ndev, type_data);
1058 case TC_SETUP_QDISC_MQPRIO:
1059 return cpsw_set_mqprio(ndev, type_data);
1066 void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1070 for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1071 bw = priv->fifo_bw[fifo];
1075 cpsw_set_fifo_rlimit(priv, fifo, bw);
1079 void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1081 struct cpsw_common *cpsw = priv->cpsw;
1082 u32 tx_prio_map = 0;
1086 if (!priv->mqprio_hw)
1089 for (i = 0; i < 8; i++) {
1090 tc = netdev_get_prio_tc_map(priv->ndev, i);
1091 fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1092 tx_prio_map |= fifo << (4 * i);
1095 tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1096 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1098 slave_write(slave, tx_prio_map, tx_prio_rg);
1101 int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1103 struct cpsw_common *cpsw = priv->cpsw;
1104 struct cpsw_meta_xdp *xmeta;
1105 struct page_pool *pool;
1111 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1112 pool = cpsw->page_pool[ch];
1113 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1114 for (i = 0; i < ch_buf_num; i++) {
1115 page = page_pool_dev_alloc_pages(pool);
1117 cpsw_err(priv, ifup, "allocate rx page err\n");
1121 xmeta = page_address(page) + CPSW_XMETA_OFFSET;
1122 xmeta->ndev = priv->ndev;
1125 dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA;
1126 ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
1128 cpsw->rx_packet_max,
1131 cpsw_err(priv, ifup,
1132 "cannot submit page to channel %d rx, error %d\n",
1134 page_pool_recycle_direct(pool, page);
1139 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1146 static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
1149 struct page_pool_params pp_params = {};
1150 struct page_pool *pool;
1152 pp_params.order = 0;
1153 pp_params.flags = PP_FLAG_DMA_MAP;
1154 pp_params.pool_size = size;
1155 pp_params.nid = NUMA_NO_NODE;
1156 pp_params.dma_dir = DMA_BIDIRECTIONAL;
1157 pp_params.dev = cpsw->dev;
1159 pool = page_pool_create(&pp_params);
1161 dev_err(cpsw->dev, "cannot create rx page pool\n");
1166 static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
1168 struct page_pool *pool;
1169 int ret = 0, pool_size;
1171 pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1172 pool = cpsw_create_page_pool(cpsw, pool_size);
1174 ret = PTR_ERR(pool);
1176 cpsw->page_pool[ch] = pool;
1181 static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
1183 struct cpsw_common *cpsw = priv->cpsw;
1184 struct xdp_rxq_info *rxq;
1185 struct page_pool *pool;
1188 pool = cpsw->page_pool[ch];
1189 rxq = &priv->xdp_rxq[ch];
1191 ret = xdp_rxq_info_reg(rxq, priv->ndev, ch, 0);
1195 ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
1197 xdp_rxq_info_unreg(rxq);
1202 static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
1204 struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
1206 if (!xdp_rxq_info_is_reg(rxq))
1209 xdp_rxq_info_unreg(rxq);
1212 void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
1214 struct net_device *ndev;
1217 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1218 for (i = 0; i < cpsw->data.slaves; i++) {
1219 ndev = cpsw->slaves[i].ndev;
1223 cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
1226 page_pool_destroy(cpsw->page_pool[ch]);
1227 cpsw->page_pool[ch] = NULL;
1231 int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
1233 struct net_device *ndev;
1236 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1237 ret = cpsw_create_rx_pool(cpsw, ch);
1241 /* using same page pool is allowed as no running rx handlers
1242 * simultaneously for both ndevs
1244 for (i = 0; i < cpsw->data.slaves; i++) {
1245 ndev = cpsw->slaves[i].ndev;
1249 ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
1258 cpsw_destroy_xdp_rxqs(cpsw);
1263 static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
1265 struct bpf_prog *prog = bpf->prog;
1267 if (!priv->xdpi.prog && !prog)
1270 WRITE_ONCE(priv->xdp_prog, prog);
1272 xdp_attachment_setup(&priv->xdpi, bpf);
1277 int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1279 struct cpsw_priv *priv = netdev_priv(ndev);
1281 switch (bpf->command) {
1282 case XDP_SETUP_PROG:
1283 return cpsw_xdp_prog_setup(priv, bpf);
1290 int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
1291 struct page *page, int port)
1293 struct cpsw_common *cpsw = priv->cpsw;
1294 struct cpsw_meta_xdp *xmeta;
1295 struct cpdma_chan *txch;
1299 xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
1300 xmeta->ndev = priv->ndev;
1302 txch = cpsw->txv[0].ch;
1305 dma = page_pool_get_dma_addr(page);
1306 dma += xdpf->headroom + sizeof(struct xdp_frame);
1307 ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
1308 dma, xdpf->len, port);
1310 if (sizeof(*xmeta) > xdpf->headroom)
1313 ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
1314 xdpf->data, xdpf->len, port);
1318 priv->ndev->stats.tx_dropped++;
1323 int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
1324 struct page *page, int port, int *len)
1326 struct cpsw_common *cpsw = priv->cpsw;
1327 struct net_device *ndev = priv->ndev;
1328 int ret = CPSW_XDP_CONSUMED;
1329 struct xdp_frame *xdpf;
1330 struct bpf_prog *prog;
1333 prog = READ_ONCE(priv->xdp_prog);
1335 return CPSW_XDP_PASS;
1337 act = bpf_prog_run_xdp(prog, xdp);
1338 /* XDP prog might have changed packet data and boundaries */
1339 *len = xdp->data_end - xdp->data;
1343 ret = CPSW_XDP_PASS;
1346 xdpf = xdp_convert_buff_to_frame(xdp);
1347 if (unlikely(!xdpf))
1350 if (cpsw_xdp_tx_frame(priv, xdpf, page, port))
1351 xdp_return_frame_rx_napi(xdpf);
1354 if (xdp_do_redirect(ndev, xdp, prog))
1357 /* Have to flush here, per packet, instead of doing it in bulk
1358 * at the end of the napi handler. The RX devices on this
1359 * particular hardware is sharing a common queue, so the
1360 * incoming device might change per packet.
1365 bpf_warn_invalid_xdp_action(ndev, prog, act);
1368 trace_xdp_exception(ndev, prog, act);
1369 fallthrough; /* handle aborts by dropping packet */
1371 ndev->stats.rx_bytes += *len;
1372 ndev->stats.rx_packets++;
1376 ndev->stats.rx_bytes += *len;
1377 ndev->stats.rx_packets++;
1381 page_pool_recycle_direct(cpsw->page_pool[ch], page);