rtase: Implement the interrupt routine and rtase_poll
authorJustin Lai <justinlai0215@realtek.com>
Wed, 4 Sep 2024 03:21:05 +0000 (11:21 +0800)
committerJakub Kicinski <kuba@kernel.org>
Fri, 6 Sep 2024 05:02:38 +0000 (22:02 -0700)
1. Implement rtase_interrupt to handle txQ0/rxQ0, txQ4~txQ7 interrupts,
and implement rtase_q_interrupt to handle txQ1/rxQ1, txQ2/rxQ2 and
txQ3/rxQ3 interrupts.
2. Implement rtase_poll to call ring_handler to process the tx or
rx packet of each ring. If the returned value is budget,it means that
there is still work of a certain ring that has not yet been completed.

Signed-off-by: Justin Lai <justinlai0215@realtek.com>
Link: https://patch.msgid.link/20240904032114.247117-5-justinlai0215@realtek.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/realtek/rtase/rtase_main.c

index 7f683c2..6e7f30b 100644 (file)
@@ -571,6 +571,75 @@ static void rtase_hw_start(const struct net_device *dev)
        rtase_enable_hw_interrupt(tp);
 }
 
+/*  the interrupt handler does RXQ0 and TXQ0, TXQ4~7 interrutp status
+ */
+static irqreturn_t rtase_interrupt(int irq, void *dev_instance)
+{
+       const struct rtase_private *tp;
+       struct rtase_int_vector *ivec;
+       u32 status;
+
+       ivec = dev_instance;
+       tp = ivec->tp;
+       status = rtase_r32(tp, ivec->isr_addr);
+
+       rtase_w32(tp, ivec->imr_addr, 0x0);
+       rtase_w32(tp, ivec->isr_addr, status & ~RTASE_FOVW);
+
+       if (napi_schedule_prep(&ivec->napi))
+               __napi_schedule(&ivec->napi);
+
+       return IRQ_HANDLED;
+}
+
+/*  the interrupt handler does RXQ1&TXQ1 or RXQ2&TXQ2 or RXQ3&TXQ3 interrupt
+ *  status according to interrupt vector
+ */
+static irqreturn_t rtase_q_interrupt(int irq, void *dev_instance)
+{
+       const struct rtase_private *tp;
+       struct rtase_int_vector *ivec;
+       u16 status;
+
+       ivec = dev_instance;
+       tp = ivec->tp;
+       status = rtase_r16(tp, ivec->isr_addr);
+
+       rtase_w16(tp, ivec->imr_addr, 0x0);
+       rtase_w16(tp, ivec->isr_addr, status);
+
+       if (napi_schedule_prep(&ivec->napi))
+               __napi_schedule(&ivec->napi);
+
+       return IRQ_HANDLED;
+}
+
+static int rtase_poll(struct napi_struct *napi, int budget)
+{
+       const struct rtase_int_vector *ivec;
+       const struct rtase_private *tp;
+       struct rtase_ring *ring;
+       int total_workdone = 0;
+
+       ivec = container_of(napi, struct rtase_int_vector, napi);
+       tp = ivec->tp;
+
+       list_for_each_entry(ring, &ivec->ring_list, ring_entry)
+               total_workdone += ring->ring_handler(ring, budget);
+
+       if (total_workdone >= budget)
+               return budget;
+
+       if (napi_complete_done(napi, total_workdone)) {
+               if (!ivec->index)
+                       rtase_w32(tp, ivec->imr_addr, ivec->imr);
+               else
+                       rtase_w16(tp, ivec->imr_addr, ivec->imr);
+       }
+
+       return total_workdone;
+}
+
 static int rtase_open(struct net_device *dev)
 {
        struct rtase_private *tp = netdev_priv(dev);