net: axienet: Preparatory changes for dmaengine support
authorSarath Babu Naidu Gaddam <sarath.babu.naidu.gaddam@amd.com>
Wed, 15 Nov 2023 18:56:52 +0000 (00:26 +0530)
committerJakub Kicinski <kuba@kernel.org>
Tue, 21 Nov 2023 01:52:22 +0000 (17:52 -0800)
The axiethernet driver has inbuilt dma programming. In order to add
dmaengine support and make it's integration seamless the current axidma
inbuilt programming code is put under use_dmaengine check.

It also performs minor code reordering to minimize conditional
use_dmaengine checks and there is no functional change. It uses
"dmas" property to identify whether it should use a dmaengine
framework or inbuilt axidma programming.

Signed-off-by: Sarath Babu Naidu Gaddam <sarath.babu.naidu.gaddam@amd.com>
Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
Link: https://lore.kernel.org/r/1700074613-1977070-3-git-send-email-radhey.shyam.pandey@amd.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/xilinx/xilinx_axienet.h
drivers/net/ethernet/xilinx/xilinx_axienet_main.c

index 575ff9d..3ead0ba 100644 (file)
@@ -435,6 +435,7 @@ struct axidma_bd {
  * @coalesce_usec_rx:  IRQ coalesce delay for RX
  * @coalesce_count_tx: Store the irq coalesce on TX side.
  * @coalesce_usec_tx:  IRQ coalesce delay for TX
+ * @use_dmaengine: flag to check dmaengine framework usage.
  */
 struct axienet_local {
        struct net_device *ndev;
@@ -499,6 +500,7 @@ struct axienet_local {
        u32 coalesce_usec_rx;
        u32 coalesce_count_tx;
        u32 coalesce_usec_tx;
+       u8  use_dmaengine;
 };
 
 /**
index 82d0d44..188b03e 100644 (file)
@@ -589,10 +589,6 @@ static int axienet_device_reset(struct net_device *ndev)
        struct axienet_local *lp = netdev_priv(ndev);
        int ret;
 
-       ret = __axienet_device_reset(lp);
-       if (ret)
-               return ret;
-
        lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
        lp->options |= XAE_OPTION_VLAN;
        lp->options &= (~XAE_OPTION_JUMBO);
@@ -606,11 +602,17 @@ static int axienet_device_reset(struct net_device *ndev)
                        lp->options |= XAE_OPTION_JUMBO;
        }
 
-       ret = axienet_dma_bd_init(ndev);
-       if (ret) {
-               netdev_err(ndev, "%s: descriptor allocation failed\n",
-                          __func__);
-               return ret;
+       if (!lp->use_dmaengine) {
+               ret = __axienet_device_reset(lp);
+               if (ret)
+                       return ret;
+
+               ret = axienet_dma_bd_init(ndev);
+               if (ret) {
+                       netdev_err(ndev, "%s: descriptor allocation failed\n",
+                                  __func__);
+                       return ret;
+               }
        }
 
        axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -1125,41 +1127,21 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
 static void axienet_dma_err_handler(struct work_struct *work);
 
 /**
- * axienet_open - Driver open routine.
- * @ndev:      Pointer to net_device structure
+ * axienet_init_legacy_dma - init the dma legacy code.
+ * @ndev:       Pointer to net_device structure
  *
  * Return: 0, on success.
- *         non-zero error value on failure
+ *          non-zero error value on failure
+ *
+ * This is the dma  initialization code. It also allocates interrupt
+ * service routines, enables the interrupt lines and ISR handling.
  *
- * This is the driver open routine. It calls phylink_start to start the
- * PHY device.
- * It also allocates interrupt service routines, enables the interrupt lines
- * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
- * descriptors are initialized.
  */
-static int axienet_open(struct net_device *ndev)
+static int axienet_init_legacy_dma(struct net_device *ndev)
 {
        int ret;
        struct axienet_local *lp = netdev_priv(ndev);
 
-       dev_dbg(&ndev->dev, "axienet_open()\n");
-
-       /* When we do an Axi Ethernet reset, it resets the complete core
-        * including the MDIO. MDIO must be disabled before resetting.
-        * Hold MDIO bus lock to avoid MDIO accesses during the reset.
-        */
-       axienet_lock_mii(lp);
-       ret = axienet_device_reset(ndev);
-       axienet_unlock_mii(lp);
-
-       ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
-       if (ret) {
-               dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
-               return ret;
-       }
-
-       phylink_start(lp->phylink);
-
        /* Enable worker thread for Axi DMA error handling */
        INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
 
@@ -1193,13 +1175,61 @@ err_rx_irq:
 err_tx_irq:
        napi_disable(&lp->napi_tx);
        napi_disable(&lp->napi_rx);
-       phylink_stop(lp->phylink);
-       phylink_disconnect_phy(lp->phylink);
        cancel_work_sync(&lp->dma_err_task);
        dev_err(lp->dev, "request_irq() failed\n");
        return ret;
 }
 
+/**
+ * axienet_open - Driver open routine.
+ * @ndev:      Pointer to net_device structure
+ *
+ * Return: 0, on success.
+ *         non-zero error value on failure
+ *
+ * This is the driver open routine. It calls phylink_start to start the
+ * PHY device.
+ * It also allocates interrupt service routines, enables the interrupt lines
+ * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
+ * descriptors are initialized.
+ */
+static int axienet_open(struct net_device *ndev)
+{
+       int ret;
+       struct axienet_local *lp = netdev_priv(ndev);
+
+       dev_dbg(&ndev->dev, "%s\n", __func__);
+
+       /* When we do an Axi Ethernet reset, it resets the complete core
+        * including the MDIO. MDIO must be disabled before resetting.
+        * Hold MDIO bus lock to avoid MDIO accesses during the reset.
+        */
+       axienet_lock_mii(lp);
+       ret = axienet_device_reset(ndev);
+       axienet_unlock_mii(lp);
+
+       ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
+       if (ret) {
+               dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
+               return ret;
+       }
+
+       phylink_start(lp->phylink);
+
+       if (!lp->use_dmaengine) {
+               ret = axienet_init_legacy_dma(ndev);
+               if (ret)
+                       goto err_phy;
+       }
+
+       return 0;
+
+err_phy:
+       phylink_stop(lp->phylink);
+       phylink_disconnect_phy(lp->phylink);
+       return ret;
+}
+
 /**
  * axienet_stop - Driver stop routine.
  * @ndev:      Pointer to net_device structure
@@ -1216,8 +1246,10 @@ static int axienet_stop(struct net_device *ndev)
 
        dev_dbg(&ndev->dev, "axienet_close()\n");
 
-       napi_disable(&lp->napi_tx);
-       napi_disable(&lp->napi_rx);
+       if (!lp->use_dmaengine) {
+               napi_disable(&lp->napi_tx);
+               napi_disable(&lp->napi_rx);
+       }
 
        phylink_stop(lp->phylink);
        phylink_disconnect_phy(lp->phylink);
@@ -1225,18 +1257,18 @@ static int axienet_stop(struct net_device *ndev)
        axienet_setoptions(ndev, lp->options &
                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 
-       axienet_dma_stop(lp);
+       if (!lp->use_dmaengine) {
+               axienet_dma_stop(lp);
+               cancel_work_sync(&lp->dma_err_task);
+               free_irq(lp->tx_irq, ndev);
+               free_irq(lp->rx_irq, ndev);
+               axienet_dma_bd_release(ndev);
+       }
 
        axienet_iow(lp, XAE_IE_OFFSET, 0);
 
-       cancel_work_sync(&lp->dma_err_task);
-
        if (lp->eth_irq > 0)
                free_irq(lp->eth_irq, ndev);
-       free_irq(lp->tx_irq, ndev);
-       free_irq(lp->rx_irq, ndev);
-
-       axienet_dma_bd_release(ndev);
        return 0;
 }
 
@@ -1412,14 +1444,16 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
        data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
        data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
        data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
-       data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
-       data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
-       data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
-       data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
-       data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
-       data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
-       data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
-       data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
+       if (!lp->use_dmaengine) {
+               data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+               data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+               data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
+               data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
+               data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+               data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+               data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
+               data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
+       }
 }
 
 static void
@@ -1880,9 +1914,6 @@ static int axienet_probe(struct platform_device *pdev)
        u64_stats_init(&lp->rx_stat_sync);
        u64_stats_init(&lp->tx_stat_sync);
 
-       netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
-       netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
-
        lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
        if (!lp->axi_clk) {
                /* For backward compatibility, if named AXI clock is not present,
@@ -2008,80 +2039,85 @@ static int axienet_probe(struct platform_device *pdev)
                goto cleanup_clk;
        }
 
-       /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
-       np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
-       if (np) {
-               struct resource dmares;
+       if (!of_find_property(pdev->dev.of_node, "dmas", NULL)) {
+               /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
+               np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
 
-               ret = of_address_to_resource(np, 0, &dmares);
-               if (ret) {
-                       dev_err(&pdev->dev,
-                               "unable to get DMA resource\n");
+               if (np) {
+                       struct resource dmares;
+
+                       ret = of_address_to_resource(np, 0, &dmares);
+                       if (ret) {
+                               dev_err(&pdev->dev,
+                                       "unable to get DMA resource\n");
+                               of_node_put(np);
+                               goto cleanup_clk;
+                       }
+                       lp->dma_regs = devm_ioremap_resource(&pdev->dev,
+                                                            &dmares);
+                       lp->rx_irq = irq_of_parse_and_map(np, 1);
+                       lp->tx_irq = irq_of_parse_and_map(np, 0);
                        of_node_put(np);
+                       lp->eth_irq = platform_get_irq_optional(pdev, 0);
+               } else {
+                       /* Check for these resources directly on the Ethernet node. */
+                       lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+                       lp->rx_irq = platform_get_irq(pdev, 1);
+                       lp->tx_irq = platform_get_irq(pdev, 0);
+                       lp->eth_irq = platform_get_irq_optional(pdev, 2);
+               }
+               if (IS_ERR(lp->dma_regs)) {
+                       dev_err(&pdev->dev, "could not map DMA regs\n");
+                       ret = PTR_ERR(lp->dma_regs);
+                       goto cleanup_clk;
+               }
+               if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
+                       dev_err(&pdev->dev, "could not determine irqs\n");
+                       ret = -ENOMEM;
                        goto cleanup_clk;
                }
-               lp->dma_regs = devm_ioremap_resource(&pdev->dev,
-                                                    &dmares);
-               lp->rx_irq = irq_of_parse_and_map(np, 1);
-               lp->tx_irq = irq_of_parse_and_map(np, 0);
-               of_node_put(np);
-               lp->eth_irq = platform_get_irq_optional(pdev, 0);
-       } else {
-               /* Check for these resources directly on the Ethernet node. */
-               lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
-               lp->rx_irq = platform_get_irq(pdev, 1);
-               lp->tx_irq = platform_get_irq(pdev, 0);
-               lp->eth_irq = platform_get_irq_optional(pdev, 2);
-       }
-       if (IS_ERR(lp->dma_regs)) {
-               dev_err(&pdev->dev, "could not map DMA regs\n");
-               ret = PTR_ERR(lp->dma_regs);
-               goto cleanup_clk;
-       }
-       if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
-               dev_err(&pdev->dev, "could not determine irqs\n");
-               ret = -ENOMEM;
-               goto cleanup_clk;
-       }
 
-       /* Reset core now that clocks are enabled, prior to accessing MDIO */
-       ret = __axienet_device_reset(lp);
-       if (ret)
-               goto cleanup_clk;
+               /* Reset core now that clocks are enabled, prior to accessing MDIO */
+               ret = __axienet_device_reset(lp);
+               if (ret)
+                       goto cleanup_clk;
+
+               /* Autodetect the need for 64-bit DMA pointers.
+                * When the IP is configured for a bus width bigger than 32 bits,
+                * writing the MSB registers is mandatory, even if they are all 0.
+                * We can detect this case by writing all 1's to one such register
+                * and see if that sticks: when the IP is configured for 32 bits
+                * only, those registers are RES0.
+                * Those MSB registers were introduced in IP v7.1, which we check first.
+                */
+               if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
+                       void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
 
-       /* Autodetect the need for 64-bit DMA pointers.
-        * When the IP is configured for a bus width bigger than 32 bits,
-        * writing the MSB registers is mandatory, even if they are all 0.
-        * We can detect this case by writing all 1's to one such register
-        * and see if that sticks: when the IP is configured for 32 bits
-        * only, those registers are RES0.
-        * Those MSB registers were introduced in IP v7.1, which we check first.
-        */
-       if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
-               void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
-
-               iowrite32(0x0, desc);
-               if (ioread32(desc) == 0) {      /* sanity check */
-                       iowrite32(0xffffffff, desc);
-                       if (ioread32(desc) > 0) {
-                               lp->features |= XAE_FEATURE_DMA_64BIT;
-                               addr_width = 64;
-                               dev_info(&pdev->dev,
-                                        "autodetected 64-bit DMA range\n");
-                       }
                        iowrite32(0x0, desc);
+                       if (ioread32(desc) == 0) {      /* sanity check */
+                               iowrite32(0xffffffff, desc);
+                               if (ioread32(desc) > 0) {
+                                       lp->features |= XAE_FEATURE_DMA_64BIT;
+                                       addr_width = 64;
+                                       dev_info(&pdev->dev,
+                                                "autodetected 64-bit DMA range\n");
+                               }
+                               iowrite32(0x0, desc);
+                       }
+               }
+               if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
+                       dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
+                       ret = -EINVAL;
+                       goto cleanup_clk;
                }
-       }
-       if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
-               dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
-               ret = -EINVAL;
-               goto cleanup_clk;
-       }
 
-       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
-       if (ret) {
-               dev_err(&pdev->dev, "No suitable DMA available\n");
-               goto cleanup_clk;
+               ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
+               if (ret) {
+                       dev_err(&pdev->dev, "No suitable DMA available\n");
+                       goto cleanup_clk;
+               }
+               netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
+               netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
        }
 
        /* Check for Ethernet core IRQ (optional) */
@@ -2099,8 +2135,8 @@ static int axienet_probe(struct platform_device *pdev)
        }
 
        lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
-       lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
        lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+       lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
        lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
 
        ret = axienet_mdio_setup(lp);