net: stmmac: Use interrupt mode INTM=1 for per channel irq
authorSwee Leong Ching <leong.ching.swee@intel.com>
Fri, 5 Jan 2024 07:09:25 +0000 (15:09 +0800)
committerDavid S. Miller <davem@davemloft.net>
Sun, 7 Jan 2024 16:33:50 +0000 (16:33 +0000)
Enable per DMA channel interrupt that uses shared peripheral
interrupt (SPI), so only per channel TX and RX intr (TI/RI)
are handled by TX/RX ISR without calling common interrupt ISR.

Signed-off-by: Teoh Ji Sheng <ji.sheng.teoh@intel.com>
Signed-off-by: Swee Leong Ching <leong.ching.swee@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c

index 207ff17..04bf731 100644 (file)
 /* DMA Registers */
 #define XGMAC_DMA_MODE                 0x00003000
 #define XGMAC_SWR                      BIT(0)
+#define XGMAC_DMA_MODE_INTM_MASK       GENMASK(13, 12)
+#define XGMAC_DMA_MODE_INTM_SHIFT      12
+#define XGMAC_DMA_MODE_INTM_MODE1      0x1
 #define XGMAC_DMA_SYSBUS_MODE          0x00003004
 #define XGMAC_WR_OSR_LMT               GENMASK(29, 24)
 #define XGMAC_WR_OSR_LMT_SHIFT         24
index 3cde695..dcb9f09 100644 (file)
@@ -31,6 +31,13 @@ static void dwxgmac2_dma_init(void __iomem *ioaddr,
                value |= XGMAC_EAME;
 
        writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+
+       if (dma_cfg->multi_irq_en) {
+               value = readl(ioaddr + XGMAC_DMA_MODE);
+               value &= ~XGMAC_DMA_MODE_INTM_MASK;
+               value |= (XGMAC_DMA_MODE_INTM_MODE1 << XGMAC_DMA_MODE_INTM_SHIFT);
+               writel(value, ioaddr + XGMAC_DMA_MODE);
+       }
 }
 
 static void dwxgmac2_dma_init_chan(struct stmmac_priv *priv,
@@ -365,19 +372,18 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
        }
 
        /* TX/RX NORMAL interrupts */
-       if (likely(intr_status & XGMAC_NIS)) {
-               if (likely(intr_status & XGMAC_RI)) {
-                       u64_stats_update_begin(&rxq_stats->syncp);
-                       rxq_stats->rx_normal_irq_n++;
-                       u64_stats_update_end(&rxq_stats->syncp);
-                       ret |= handle_rx;
-               }
-               if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
-                       u64_stats_update_begin(&txq_stats->syncp);
-                       txq_stats->tx_normal_irq_n++;
-                       u64_stats_update_end(&txq_stats->syncp);
-                       ret |= handle_tx;
-               }
+       if (likely(intr_status & XGMAC_RI)) {
+               u64_stats_update_begin(&rxq_stats->syncp);
+               rxq_stats->rx_normal_irq_n++;
+               u64_stats_update_end(&rxq_stats->syncp);
+               ret |= handle_rx;
+       }
+
+       if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
+               u64_stats_update_begin(&txq_stats->syncp);
+               txq_stats->tx_normal_irq_n++;
+               u64_stats_update_end(&txq_stats->syncp);
+               ret |= handle_tx;
        }
 
        /* Clear interrupts */