1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Applied Micro X-Gene SoC Ethernet v2 Driver
5 * Copyright (c) 2017, Applied Micro Circuits Corporation
6 * Author(s): Iyappan Subramanian <isubramanian@apm.com>
7 * Keyur Chudgar <kchudgar@apm.com>
12 static const struct acpi_device_id xge_acpi_match[];
14 static int xge_get_resources(struct xge_pdata *pdata)
16 struct platform_device *pdev;
17 struct net_device *ndev;
18 int phy_mode, ret = 0;
26 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
28 dev_err(dev, "Resource enet_csr not defined\n");
32 pdata->resources.base_addr = devm_ioremap(dev, res->start,
34 if (!pdata->resources.base_addr) {
35 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
39 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
40 eth_hw_addr_random(ndev);
42 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
44 phy_mode = device_get_phy_mode(dev);
46 dev_err(dev, "Unable to get phy-connection-type\n");
49 pdata->resources.phy_mode = phy_mode;
51 if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
52 dev_err(dev, "Incorrect phy-connection-type specified\n");
56 ret = platform_get_irq(pdev, 0);
58 dev_err(dev, "Unable to get irq\n");
61 pdata->resources.irq = ret;
66 static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
68 struct xge_pdata *pdata = netdev_priv(ndev);
69 struct xge_desc_ring *ring = pdata->rx_ring;
70 const u8 slots = XGENE_ENET_NUM_DESC - 1;
71 struct device *dev = &pdata->pdev->dev;
72 struct xge_raw_desc *raw_desc;
80 for (i = 0; i < nbuf; i++) {
81 raw_desc = &ring->raw_desc[tail];
83 len = XGENE_ENET_STD_MTU;
84 skb = netdev_alloc_skb(ndev, len);
88 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
89 if (dma_mapping_error(dev, dma_addr)) {
90 netdev_err(ndev, "DMA mapping error\n");
91 dev_kfree_skb_any(skb);
95 ring->pkt_info[tail].skb = skb;
96 ring->pkt_info[tail].dma_addr = dma_addr;
98 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
99 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
100 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
101 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
103 upper_32_bits(dma_addr)));
106 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
108 tail = (tail + 1) & slots;
116 static int xge_init_hw(struct net_device *ndev)
118 struct xge_pdata *pdata = netdev_priv(ndev);
121 ret = xge_port_reset(ndev);
126 pdata->nbufs = NUM_BUFS;
131 static irqreturn_t xge_irq(const int irq, void *data)
133 struct xge_pdata *pdata = data;
135 if (napi_schedule_prep(&pdata->napi)) {
136 xge_intr_disable(pdata);
137 __napi_schedule(&pdata->napi);
143 static int xge_request_irq(struct net_device *ndev)
145 struct xge_pdata *pdata = netdev_priv(ndev);
148 snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
150 ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
153 netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
158 static void xge_free_irq(struct net_device *ndev)
160 struct xge_pdata *pdata = netdev_priv(ndev);
162 free_irq(pdata->resources.irq, pdata);
165 static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
167 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
168 (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
174 static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
176 struct xge_pdata *pdata = netdev_priv(ndev);
177 struct device *dev = &pdata->pdev->dev;
178 struct xge_desc_ring *tx_ring;
179 struct xge_raw_desc *raw_desc;
180 static dma_addr_t dma_addr;
181 u64 addr_lo, addr_hi;
186 tx_ring = pdata->tx_ring;
187 tail = tx_ring->tail;
188 len = skb_headlen(skb);
189 raw_desc = &tx_ring->raw_desc[tail];
191 if (!is_tx_slot_available(raw_desc)) {
192 netif_stop_queue(ndev);
193 return NETDEV_TX_BUSY;
196 /* Packet buffers should be 64B aligned */
197 pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
199 if (unlikely(!pkt_buf)) {
200 dev_kfree_skb_any(skb);
203 memcpy(pkt_buf, skb->data, len);
205 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
206 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
207 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
208 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
210 upper_32_bits(dma_addr)));
212 tx_ring->pkt_info[tail].skb = skb;
213 tx_ring->pkt_info[tail].dma_addr = dma_addr;
214 tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
218 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
219 SET_BITS(PKT_SIZE, len) |
221 skb_tx_timestamp(skb);
222 xge_wr_csr(pdata, DMATXCTRL, 1);
224 tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
229 static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
231 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
232 !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
238 static void xge_txc_poll(struct net_device *ndev)
240 struct xge_pdata *pdata = netdev_priv(ndev);
241 struct device *dev = &pdata->pdev->dev;
242 struct xge_desc_ring *tx_ring;
243 struct xge_raw_desc *raw_desc;
250 tx_ring = pdata->tx_ring;
251 head = tx_ring->head;
253 data = xge_rd_csr(pdata, DMATXSTATUS);
254 if (!GET_BITS(TXPKTCOUNT, data))
258 raw_desc = &tx_ring->raw_desc[head];
260 if (!is_tx_hw_done(raw_desc))
265 skb = tx_ring->pkt_info[head].skb;
266 dma_addr = tx_ring->pkt_info[head].dma_addr;
267 pkt_buf = tx_ring->pkt_info[head].pkt_buf;
268 pdata->stats.tx_packets++;
269 pdata->stats.tx_bytes += skb->len;
270 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
271 dev_kfree_skb_any(skb);
273 /* clear pktstart address and pktsize */
274 raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
275 SET_BITS(PKT_SIZE, SLOT_EMPTY));
276 xge_wr_csr(pdata, DMATXSTATUS, 1);
278 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
281 if (netif_queue_stopped(ndev))
282 netif_wake_queue(ndev);
284 tx_ring->head = head;
287 static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
289 struct xge_pdata *pdata = netdev_priv(ndev);
290 struct device *dev = &pdata->pdev->dev;
291 struct xge_desc_ring *rx_ring;
292 struct xge_raw_desc *raw_desc;
301 rx_ring = pdata->rx_ring;
302 head = rx_ring->head;
304 data = xge_rd_csr(pdata, DMARXSTATUS);
305 if (!GET_BITS(RXPKTCOUNT, data))
308 for (i = 0; i < budget; i++) {
309 raw_desc = &rx_ring->raw_desc[head];
311 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
316 skb = rx_ring->pkt_info[head].skb;
317 rx_ring->pkt_info[head].skb = NULL;
318 dma_addr = rx_ring->pkt_info[head].dma_addr;
319 len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
320 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
323 rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
324 if (unlikely(rx_error)) {
325 pdata->stats.rx_errors++;
326 dev_kfree_skb_any(skb);
331 skb->protocol = eth_type_trans(skb, ndev);
333 pdata->stats.rx_packets++;
334 pdata->stats.rx_bytes += len;
335 napi_gro_receive(&pdata->napi, skb);
337 ret = xge_refill_buffers(ndev, 1);
338 xge_wr_csr(pdata, DMARXSTATUS, 1);
339 xge_wr_csr(pdata, DMARXCTRL, 1);
344 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
348 rx_ring->head = head;
353 static void xge_delete_desc_ring(struct net_device *ndev,
354 struct xge_desc_ring *ring)
356 struct xge_pdata *pdata = netdev_priv(ndev);
357 struct device *dev = &pdata->pdev->dev;
363 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
365 dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
367 kfree(ring->pkt_info);
371 static void xge_free_buffers(struct net_device *ndev)
373 struct xge_pdata *pdata = netdev_priv(ndev);
374 struct xge_desc_ring *ring = pdata->rx_ring;
375 struct device *dev = &pdata->pdev->dev;
380 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
381 skb = ring->pkt_info[i].skb;
382 dma_addr = ring->pkt_info[i].dma_addr;
387 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
389 dev_kfree_skb_any(skb);
393 static void xge_delete_desc_rings(struct net_device *ndev)
395 struct xge_pdata *pdata = netdev_priv(ndev);
398 xge_delete_desc_ring(ndev, pdata->tx_ring);
400 xge_rx_poll(ndev, 64);
401 xge_free_buffers(ndev);
402 xge_delete_desc_ring(ndev, pdata->rx_ring);
405 static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
407 struct xge_pdata *pdata = netdev_priv(ndev);
408 struct device *dev = &pdata->pdev->dev;
409 struct xge_desc_ring *ring;
412 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
418 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
419 ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
421 if (!ring->desc_addr)
424 ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
429 xge_setup_desc(ring);
434 xge_delete_desc_ring(ndev, ring);
439 static int xge_create_desc_rings(struct net_device *ndev)
441 struct xge_pdata *pdata = netdev_priv(ndev);
442 struct xge_desc_ring *ring;
446 ring = xge_create_desc_ring(ndev);
450 pdata->tx_ring = ring;
451 xge_update_tx_desc_addr(pdata);
454 ring = xge_create_desc_ring(ndev);
458 pdata->rx_ring = ring;
459 xge_update_rx_desc_addr(pdata);
461 ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
467 xge_delete_desc_rings(ndev);
472 static int xge_open(struct net_device *ndev)
474 struct xge_pdata *pdata = netdev_priv(ndev);
477 ret = xge_create_desc_rings(ndev);
481 napi_enable(&pdata->napi);
482 ret = xge_request_irq(ndev);
486 xge_intr_enable(pdata);
487 xge_wr_csr(pdata, DMARXCTRL, 1);
489 phy_start(ndev->phydev);
490 xge_mac_enable(pdata);
491 netif_start_queue(ndev);
496 static int xge_close(struct net_device *ndev)
498 struct xge_pdata *pdata = netdev_priv(ndev);
500 netif_stop_queue(ndev);
501 xge_mac_disable(pdata);
502 phy_stop(ndev->phydev);
504 xge_intr_disable(pdata);
506 napi_disable(&pdata->napi);
507 xge_delete_desc_rings(ndev);
512 static int xge_napi(struct napi_struct *napi, const int budget)
514 struct net_device *ndev = napi->dev;
515 struct xge_pdata *pdata;
518 pdata = netdev_priv(ndev);
521 processed = xge_rx_poll(ndev, budget);
523 if (processed < budget) {
524 napi_complete_done(napi, processed);
525 xge_intr_enable(pdata);
531 static int xge_set_mac_addr(struct net_device *ndev, void *addr)
533 struct xge_pdata *pdata = netdev_priv(ndev);
536 ret = eth_mac_addr(ndev, addr);
540 xge_mac_set_station_addr(pdata);
545 static bool is_tx_pending(struct xge_raw_desc *raw_desc)
547 if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
553 static void xge_free_pending_skb(struct net_device *ndev)
555 struct xge_pdata *pdata = netdev_priv(ndev);
556 struct device *dev = &pdata->pdev->dev;
557 struct xge_desc_ring *tx_ring;
558 struct xge_raw_desc *raw_desc;
564 tx_ring = pdata->tx_ring;
566 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
567 raw_desc = &tx_ring->raw_desc[i];
569 if (!is_tx_pending(raw_desc))
572 skb = tx_ring->pkt_info[i].skb;
573 dma_addr = tx_ring->pkt_info[i].dma_addr;
574 pkt_buf = tx_ring->pkt_info[i].pkt_buf;
575 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
576 dev_kfree_skb_any(skb);
580 static void xge_timeout(struct net_device *ndev)
582 struct xge_pdata *pdata = netdev_priv(ndev);
586 if (!netif_running(ndev))
589 netif_stop_queue(ndev);
590 xge_intr_disable(pdata);
591 napi_disable(&pdata->napi);
593 xge_wr_csr(pdata, DMATXCTRL, 0);
595 xge_free_pending_skb(ndev);
596 xge_wr_csr(pdata, DMATXSTATUS, ~0U);
598 xge_setup_desc(pdata->tx_ring);
599 xge_update_tx_desc_addr(pdata);
602 napi_enable(&pdata->napi);
603 xge_intr_enable(pdata);
604 xge_mac_enable(pdata);
605 netif_start_queue(ndev);
611 static void xge_get_stats64(struct net_device *ndev,
612 struct rtnl_link_stats64 *storage)
614 struct xge_pdata *pdata = netdev_priv(ndev);
615 struct xge_stats *stats = &pdata->stats;
617 storage->tx_packets += stats->tx_packets;
618 storage->tx_bytes += stats->tx_bytes;
620 storage->rx_packets += stats->rx_packets;
621 storage->rx_bytes += stats->rx_bytes;
622 storage->rx_errors += stats->rx_errors;
625 static const struct net_device_ops xgene_ndev_ops = {
626 .ndo_open = xge_open,
627 .ndo_stop = xge_close,
628 .ndo_start_xmit = xge_start_xmit,
629 .ndo_set_mac_address = xge_set_mac_addr,
630 .ndo_tx_timeout = xge_timeout,
631 .ndo_get_stats64 = xge_get_stats64,
634 static int xge_probe(struct platform_device *pdev)
636 struct device *dev = &pdev->dev;
637 struct net_device *ndev;
638 struct xge_pdata *pdata;
641 ndev = alloc_etherdev(sizeof(*pdata));
645 pdata = netdev_priv(ndev);
649 SET_NETDEV_DEV(ndev, dev);
650 platform_set_drvdata(pdev, pdata);
651 ndev->netdev_ops = &xgene_ndev_ops;
653 ndev->features |= NETIF_F_GSO |
656 ret = xge_get_resources(pdata);
660 ndev->hw_features = ndev->features;
661 xge_set_ethtool_ops(ndev);
663 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
665 netdev_err(ndev, "No usable DMA configuration\n");
669 ret = xge_init_hw(ndev);
673 ret = xge_mdio_config(ndev);
677 netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
679 ret = register_netdev(ndev);
681 netdev_err(ndev, "Failed to register netdev\n");
693 static int xge_remove(struct platform_device *pdev)
695 struct xge_pdata *pdata;
696 struct net_device *ndev;
698 pdata = platform_get_drvdata(pdev);
702 if (netif_running(ndev))
706 xge_mdio_remove(ndev);
707 unregister_netdev(ndev);
713 static void xge_shutdown(struct platform_device *pdev)
715 struct xge_pdata *pdata;
717 pdata = platform_get_drvdata(pdev);
727 static const struct acpi_device_id xge_acpi_match[] = {
731 MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
733 static struct platform_driver xge_driver = {
735 .name = "xgene-enet-v2",
736 .acpi_match_table = ACPI_PTR(xge_acpi_match),
739 .remove = xge_remove,
740 .shutdown = xge_shutdown,
742 module_platform_driver(xge_driver);
744 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
745 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
746 MODULE_VERSION(XGENE_ENET_V2_VERSION);
747 MODULE_LICENSE("GPL");