Merge tag 'pm-5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-microblaze.git] / kernel / softirq.c
index 0f1d3a3..9d71046 100644 (file)
@@ -92,6 +92,13 @@ static bool ksoftirqd_running(unsigned long pending)
                !__kthread_should_park(tsk);
 }
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+DEFINE_PER_CPU(int, hardirqs_enabled);
+DEFINE_PER_CPU(int, hardirq_context);
+EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
+EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
+#endif
+
 /*
  * preempt_count and SOFTIRQ_OFFSET usage:
  * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
@@ -102,17 +109,11 @@ static bool ksoftirqd_running(unsigned long pending)
  * softirq and whether we just have bh disabled.
  */
 
+#ifdef CONFIG_TRACE_IRQFLAGS
 /*
- * This one is for softirq.c-internal use,
- * where hardirqs are disabled legitimately:
+ * This is for softirq.c-internal use, where hardirqs are disabled
+ * legitimately:
  */
-#ifdef CONFIG_TRACE_IRQFLAGS
-
-DEFINE_PER_CPU(int, hardirqs_enabled);
-DEFINE_PER_CPU(int, hardirq_context);
-EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
-EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
-
 void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 {
        unsigned long flags;
@@ -203,6 +204,50 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 }
 EXPORT_SYMBOL(__local_bh_enable_ip);
 
+static inline void invoke_softirq(void)
+{
+       if (ksoftirqd_running(local_softirq_pending()))
+               return;
+
+       if (!force_irqthreads) {
+#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
+               /*
+                * We can safely execute softirq on the current stack if
+                * it is the irq stack, because it should be near empty
+                * at this stage.
+                */
+               __do_softirq();
+#else
+               /*
+                * Otherwise, irq_exit() is called on the task stack that can
+                * be potentially deep already. So call softirq in its own stack
+                * to prevent from any overrun.
+                */
+               do_softirq_own_stack();
+#endif
+       } else {
+               wakeup_softirqd();
+       }
+}
+
+asmlinkage __visible void do_softirq(void)
+{
+       __u32 pending;
+       unsigned long flags;
+
+       if (in_interrupt())
+               return;
+
+       local_irq_save(flags);
+
+       pending = local_softirq_pending();
+
+       if (pending && !ksoftirqd_running(pending))
+               do_softirq_own_stack();
+
+       local_irq_restore(flags);
+}
+
 /*
  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
  * but break the loop if need_resched() is set or after 2 ms.
@@ -270,10 +315,10 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
        current->flags &= ~PF_MEMALLOC;
 
        pending = local_softirq_pending();
-       account_irq_enter_time(current);
 
        __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
        in_hardirq = lockdep_softirq_start();
+       account_softirq_enter(current);
 
 restart:
        /* Reset the pending bitmask before enabling irqs */
@@ -320,46 +365,24 @@ restart:
                wakeup_softirqd();
        }
 
+       account_softirq_exit(current);
        lockdep_softirq_end(in_hardirq);
-       account_irq_exit_time(current);
        __local_bh_enable(SOFTIRQ_OFFSET);
        WARN_ON_ONCE(in_interrupt());
        current_restore_flags(old_flags, PF_MEMALLOC);
 }
 
-asmlinkage __visible void do_softirq(void)
-{
-       __u32 pending;
-       unsigned long flags;
-
-       if (in_interrupt())
-               return;
-
-       local_irq_save(flags);
-
-       pending = local_softirq_pending();
-
-       if (pending && !ksoftirqd_running(pending))
-               do_softirq_own_stack();
-
-       local_irq_restore(flags);
-}
-
 /**
  * irq_enter_rcu - Enter an interrupt context with RCU watching
  */
 void irq_enter_rcu(void)
 {
-       if (is_idle_task(current) && !in_interrupt()) {
-               /*
-                * Prevent raise_softirq from needlessly waking up ksoftirqd
-                * here, as softirq will be serviced on return from interrupt.
-                */
-               local_bh_disable();
+       __irq_enter_raw();
+
+       if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))
                tick_irq_enter();
-               _local_bh_enable();
-       }
-       __irq_enter();
+
+       account_hardirq_enter(current);
 }
 
 /**
@@ -371,32 +394,6 @@ void irq_enter(void)
        irq_enter_rcu();
 }
 
-static inline void invoke_softirq(void)
-{
-       if (ksoftirqd_running(local_softirq_pending()))
-               return;
-
-       if (!force_irqthreads) {
-#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
-               /*
-                * We can safely execute softirq on the current stack if
-                * it is the irq stack, because it should be near empty
-                * at this stage.
-                */
-               __do_softirq();
-#else
-               /*
-                * Otherwise, irq_exit() is called on the task stack that can
-                * be potentially deep already. So call softirq in its own stack
-                * to prevent from any overrun.
-                */
-               do_softirq_own_stack();
-#endif
-       } else {
-               wakeup_softirqd();
-       }
-}
-
 static inline void tick_irq_exit(void)
 {
 #ifdef CONFIG_NO_HZ_COMMON
@@ -417,7 +414,7 @@ static inline void __irq_exit_rcu(void)
 #else
        lockdep_assert_irqs_disabled();
 #endif
-       account_irq_exit_time(current);
+       account_hardirq_exit(current);
        preempt_count_sub(HARDIRQ_OFFSET);
        if (!in_interrupt() && local_softirq_pending())
                invoke_softirq();