1 // SPDX-License-Identifier: GPL-2.0
3 * Lantiq / Intel PMAC driver for XRX200 SoCs
5 * Copyright (C) 2010 Lantiq Deutschland
6 * Copyright (C) 2012 John Crispin <john@phrozen.org>
7 * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
10 #include <linux/etherdevice.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/interrupt.h>
14 #include <linux/clk.h>
15 #include <linux/delay.h>
17 #include <linux/of_net.h>
18 #include <linux/of_platform.h>
23 #define XRX200_DMA_DATA_LEN 0x600
24 #define XRX200_DMA_RX 0
25 #define XRX200_DMA_TX 1
28 #define PMAC_RX_IPG 0x0024
29 #define PMAC_RX_IPG_MASK 0xf
31 #define PMAC_HD_CTL 0x0000
32 /* Add Ethernet header to packets from DMA to PMAC */
33 #define PMAC_HD_CTL_ADD BIT(0)
34 /* Add VLAN tag to Packets from DMA to PMAC */
35 #define PMAC_HD_CTL_TAG BIT(1)
36 /* Add CRC to packets from DMA to PMAC */
37 #define PMAC_HD_CTL_AC BIT(2)
38 /* Add status header to packets from PMAC to DMA */
39 #define PMAC_HD_CTL_AS BIT(3)
40 /* Remove CRC from packets from PMAC to DMA */
41 #define PMAC_HD_CTL_RC BIT(4)
42 /* Remove Layer-2 header from packets from PMAC to DMA */
43 #define PMAC_HD_CTL_RL2 BIT(5)
44 /* Status header is present from DMA to PMAC */
45 #define PMAC_HD_CTL_RXSH BIT(6)
46 /* Add special tag from PMAC to switch */
47 #define PMAC_HD_CTL_AST BIT(7)
48 /* Remove specail Tag from PMAC to DMA */
49 #define PMAC_HD_CTL_RST BIT(8)
50 /* Check CRC from DMA to PMAC */
51 #define PMAC_HD_CTL_CCRC BIT(9)
52 /* Enable reaction to Pause frames in the PMAC */
53 #define PMAC_HD_CTL_FC BIT(10)
58 struct napi_struct napi;
59 struct ltq_dma_channel dma;
60 struct sk_buff *skb[LTQ_DESC_NUM];
62 struct xrx200_priv *priv;
68 struct xrx200_chan chan_tx;
69 struct xrx200_chan chan_rx;
71 struct net_device *net_dev;
74 __iomem void *pmac_reg;
77 static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset)
79 return __raw_readl(priv->pmac_reg + offset);
82 static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset)
84 __raw_writel(val, priv->pmac_reg + offset);
87 static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
90 u32 val = xrx200_pmac_r32(priv, offset);
94 xrx200_pmac_w32(priv, val, offset);
97 /* drop all the packets from the DMA ring */
98 static void xrx200_flush_dma(struct xrx200_chan *ch)
102 for (i = 0; i < LTQ_DESC_NUM; i++) {
103 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
105 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
108 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
111 ch->dma.desc %= LTQ_DESC_NUM;
115 static int xrx200_open(struct net_device *net_dev)
117 struct xrx200_priv *priv = netdev_priv(net_dev);
119 napi_enable(&priv->chan_tx.napi);
120 ltq_dma_open(&priv->chan_tx.dma);
121 ltq_dma_enable_irq(&priv->chan_tx.dma);
123 napi_enable(&priv->chan_rx.napi);
124 ltq_dma_open(&priv->chan_rx.dma);
125 /* The boot loader does not always deactivate the receiving of frames
126 * on the ports and then some packets queue up in the PPE buffers.
127 * They already passed the PMAC so they do not have the tags
128 * configured here. Read the these packets here and drop them.
129 * The HW should have written them into memory after 10us
131 usleep_range(20, 40);
132 xrx200_flush_dma(&priv->chan_rx);
133 ltq_dma_enable_irq(&priv->chan_rx.dma);
135 netif_wake_queue(net_dev);
140 static int xrx200_close(struct net_device *net_dev)
142 struct xrx200_priv *priv = netdev_priv(net_dev);
144 netif_stop_queue(net_dev);
146 napi_disable(&priv->chan_rx.napi);
147 ltq_dma_close(&priv->chan_rx.dma);
149 napi_disable(&priv->chan_tx.napi);
150 ltq_dma_close(&priv->chan_tx.dma);
155 static int xrx200_alloc_skb(struct xrx200_chan *ch)
160 ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
161 XRX200_DMA_DATA_LEN);
162 if (!ch->skb[ch->dma.desc]) {
167 mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
168 XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
169 if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
170 dev_kfree_skb_any(ch->skb[ch->dma.desc]);
175 ch->dma.desc_base[ch->dma.desc].addr = mapping;
176 /* Make sure the address is written before we give it to HW */
179 ch->dma.desc_base[ch->dma.desc].ctl =
180 LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
186 static int xrx200_hw_receive(struct xrx200_chan *ch)
188 struct xrx200_priv *priv = ch->priv;
189 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
190 struct sk_buff *skb = ch->skb[ch->dma.desc];
191 int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
192 struct net_device *net_dev = priv->net_dev;
195 ret = xrx200_alloc_skb(ch);
198 ch->dma.desc %= LTQ_DESC_NUM;
201 ch->skb[ch->dma.desc] = skb;
202 net_dev->stats.rx_dropped++;
203 netdev_err(net_dev, "failed to allocate new rx buffer\n");
208 skb->protocol = eth_type_trans(skb, net_dev);
209 netif_receive_skb(skb);
210 net_dev->stats.rx_packets++;
211 net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
216 static int xrx200_poll_rx(struct napi_struct *napi, int budget)
218 struct xrx200_chan *ch = container_of(napi,
219 struct xrx200_chan, napi);
223 while (rx < budget) {
224 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
226 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
227 ret = xrx200_hw_receive(ch);
237 if (napi_complete_done(&ch->napi, rx))
238 ltq_dma_enable_irq(&ch->dma);
244 static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
246 struct xrx200_chan *ch = container_of(napi,
247 struct xrx200_chan, napi);
248 struct net_device *net_dev = ch->priv->net_dev;
252 netif_tx_lock(net_dev);
253 while (pkts < budget) {
254 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free];
256 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
257 struct sk_buff *skb = ch->skb[ch->tx_free];
261 ch->skb[ch->tx_free] = NULL;
263 memset(&ch->dma.desc_base[ch->tx_free], 0,
264 sizeof(struct ltq_dma_desc));
266 ch->tx_free %= LTQ_DESC_NUM;
272 net_dev->stats.tx_packets += pkts;
273 net_dev->stats.tx_bytes += bytes;
274 netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
276 netif_tx_unlock(net_dev);
277 if (netif_queue_stopped(net_dev))
278 netif_wake_queue(net_dev);
281 if (napi_complete_done(&ch->napi, pkts))
282 ltq_dma_enable_irq(&ch->dma);
288 static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
289 struct net_device *net_dev)
291 struct xrx200_priv *priv = netdev_priv(net_dev);
292 struct xrx200_chan *ch = &priv->chan_tx;
293 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
299 if (skb_put_padto(skb, ETH_ZLEN)) {
300 net_dev->stats.tx_dropped++;
306 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
307 netdev_err(net_dev, "tx ring full\n");
308 netif_stop_queue(net_dev);
309 return NETDEV_TX_BUSY;
312 ch->skb[ch->dma.desc] = skb;
314 mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
315 if (unlikely(dma_mapping_error(priv->dev, mapping)))
318 /* dma needs to start on a 16 byte aligned address */
319 byte_offset = mapping % 16;
321 desc->addr = mapping - byte_offset;
322 /* Make sure the address is written before we give it to HW */
324 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
325 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
327 ch->dma.desc %= LTQ_DESC_NUM;
328 if (ch->dma.desc == ch->tx_free)
329 netif_stop_queue(net_dev);
331 netdev_sent_queue(net_dev, len);
337 net_dev->stats.tx_dropped++;
338 net_dev->stats.tx_errors++;
342 static const struct net_device_ops xrx200_netdev_ops = {
343 .ndo_open = xrx200_open,
344 .ndo_stop = xrx200_close,
345 .ndo_start_xmit = xrx200_start_xmit,
346 .ndo_set_mac_address = eth_mac_addr,
347 .ndo_validate_addr = eth_validate_addr,
350 static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
352 struct xrx200_chan *ch = ptr;
354 if (napi_schedule_prep(&ch->napi)) {
355 __napi_schedule(&ch->napi);
356 ltq_dma_disable_irq(&ch->dma);
359 ltq_dma_ack_irq(&ch->dma);
364 static int xrx200_dma_init(struct xrx200_priv *priv)
366 struct xrx200_chan *ch_rx = &priv->chan_rx;
367 struct xrx200_chan *ch_tx = &priv->chan_tx;
371 ltq_dma_init_port(DMA_PORT_ETOP);
373 ch_rx->dma.nr = XRX200_DMA_RX;
374 ch_rx->dma.dev = priv->dev;
377 ltq_dma_alloc_rx(&ch_rx->dma);
378 for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
380 ret = xrx200_alloc_skb(ch_rx);
385 ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0,
386 "xrx200_net_rx", &priv->chan_rx);
388 dev_err(priv->dev, "failed to request RX irq %d\n",
393 ch_tx->dma.nr = XRX200_DMA_TX;
394 ch_tx->dma.dev = priv->dev;
397 ltq_dma_alloc_tx(&ch_tx->dma);
398 ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0,
399 "xrx200_net_tx", &priv->chan_tx);
401 dev_err(priv->dev, "failed to request TX irq %d\n",
409 ltq_dma_free(&ch_tx->dma);
412 /* free the allocated RX ring */
413 for (i = 0; i < LTQ_DESC_NUM; i++) {
414 if (priv->chan_rx.skb[i])
415 dev_kfree_skb_any(priv->chan_rx.skb[i]);
419 ltq_dma_free(&ch_rx->dma);
423 static void xrx200_hw_cleanup(struct xrx200_priv *priv)
427 ltq_dma_free(&priv->chan_tx.dma);
428 ltq_dma_free(&priv->chan_rx.dma);
430 /* free the allocated RX ring */
431 for (i = 0; i < LTQ_DESC_NUM; i++)
432 dev_kfree_skb_any(priv->chan_rx.skb[i]);
435 static int xrx200_probe(struct platform_device *pdev)
437 struct device *dev = &pdev->dev;
438 struct device_node *np = dev->of_node;
439 struct resource *res;
440 struct xrx200_priv *priv;
441 struct net_device *net_dev;
444 /* alloc the network device */
445 net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv));
449 priv = netdev_priv(net_dev);
450 priv->net_dev = net_dev;
453 net_dev->netdev_ops = &xrx200_netdev_ops;
454 SET_NETDEV_DEV(net_dev, dev);
455 net_dev->min_mtu = ETH_ZLEN;
456 net_dev->max_mtu = XRX200_DMA_DATA_LEN;
458 /* load the memory ranges */
459 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
461 dev_err(dev, "failed to get resources\n");
465 priv->pmac_reg = devm_ioremap_resource(dev, res);
466 if (IS_ERR(priv->pmac_reg))
467 return PTR_ERR(priv->pmac_reg);
469 priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
470 if (priv->chan_rx.dma.irq < 0)
472 priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
473 if (priv->chan_tx.dma.irq < 0)
477 priv->clk = devm_clk_get(dev, NULL);
478 if (IS_ERR(priv->clk)) {
479 dev_err(dev, "failed to get clock\n");
480 return PTR_ERR(priv->clk);
483 err = of_get_mac_address(np, net_dev->dev_addr);
485 eth_hw_addr_random(net_dev);
487 /* bring up the dma engine and IP core */
488 err = xrx200_dma_init(priv);
492 /* enable clock gate */
493 err = clk_prepare_enable(priv->clk);
498 xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG);
500 /* enable status header, enable CRC */
501 xrx200_pmac_mask(priv, 0,
502 PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH |
503 PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
507 netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
508 netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
510 platform_set_drvdata(pdev, priv);
512 err = register_netdev(net_dev);
514 goto err_unprepare_clk;
519 clk_disable_unprepare(priv->clk);
522 xrx200_hw_cleanup(priv);
527 static int xrx200_remove(struct platform_device *pdev)
529 struct xrx200_priv *priv = platform_get_drvdata(pdev);
530 struct net_device *net_dev = priv->net_dev;
532 /* free stack related instances */
533 netif_stop_queue(net_dev);
534 netif_napi_del(&priv->chan_tx.napi);
535 netif_napi_del(&priv->chan_rx.napi);
537 /* remove the actual device */
538 unregister_netdev(net_dev);
540 /* release the clock */
541 clk_disable_unprepare(priv->clk);
543 /* shut down hardware */
544 xrx200_hw_cleanup(priv);
549 static const struct of_device_id xrx200_match[] = {
550 { .compatible = "lantiq,xrx200-net" },
553 MODULE_DEVICE_TABLE(of, xrx200_match);
555 static struct platform_driver xrx200_driver = {
556 .probe = xrx200_probe,
557 .remove = xrx200_remove,
559 .name = "lantiq,xrx200-net",
560 .of_match_table = xrx200_match,
564 module_platform_driver(xrx200_driver);
566 MODULE_AUTHOR("John Crispin <john@phrozen.org>");
567 MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
568 MODULE_LICENSE("GPL");