macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
}
+static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
+{
+ struct macb_queue *queue = dev_id;
+ struct macb *bp = queue->bp;
+ u32 status;
+
+ status = queue_readl(queue, ISR);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ spin_lock(&bp->lock);
+
+ if (status & MACB_BIT(WOL)) {
+ queue_writel(queue, IDR, MACB_BIT(WOL));
+ macb_writel(bp, WOL, 0);
+ netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
+ (unsigned int)(queue - bp->queues),
+ (unsigned long)status);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(WOL));
+ pm_wakeup_event(&bp->pdev->dev, 0);
+ }
+
+ spin_unlock(&bp->lock);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
/* Change interrupt handler and
* Enable WoL IRQ on queue 0
*/
+ devm_free_irq(dev, bp->queues[0].irq, bp->queues);
if (macb_is_gem(bp)) {
- devm_free_irq(dev, bp->queues[0].irq, bp->queues);
err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
IRQF_SHARED, netdev->name, bp->queues);
if (err) {
queue_writel(bp->queues, IER, GEM_BIT(WOL));
gem_writel(bp, WOL, MACB_BIT(MAG));
} else {
+ err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
+ IRQF_SHARED, netdev->name, bp->queues);
+ if (err) {
+ dev_err(dev,
+ "Unable to request IRQ %d (error %d)\n",
+ bp->queues[0].irq, err);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ return err;
+ }
queue_writel(bp->queues, IER, MACB_BIT(WOL));
macb_writel(bp, WOL, MACB_BIT(MAG));
}