/*
* If ksoftirqd is scheduled, we do not want to process pending softirqs
- * right now. Let ksoftirqd handle this at its own rate, to get fairness.
+ * right now. Let ksoftirqd handle this at its own rate, to get fairness,
+ * unless we're doing some of the synchronous softirqs.
*/
-static bool ksoftirqd_running(void)
+#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
+static bool ksoftirqd_running(unsigned long pending)
{
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
+ if (pending & SOFTIRQ_NOW_MASK)
+ return false;
return tsk && (tsk->state == TASK_RUNNING);
}
{
lockdep_assert_irqs_disabled();
+ if (preempt_count() == cnt)
+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+
if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_on(_RET_IP_);
- preempt_count_sub(cnt);
+
+ __preempt_count_sub(cnt);
}
/*
pending = local_softirq_pending();
- if (pending && !ksoftirqd_running())
+ if (pending && !ksoftirqd_running(pending))
do_softirq_own_stack();
local_irq_restore(flags);
static inline void invoke_softirq(void)
{
- if (ksoftirqd_running())
+ if (ksoftirqd_running(local_softirq_pending()))
return;
if (!force_irqthreads) {