Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 27 Dec 2020 17:00:47 +0000 (09:00 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 27 Dec 2020 17:00:47 +0000 (09:00 -0800)
Pull scheduler fix from Ingo Molnar:
 "Fix a context switch performance regression"

* tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Optimize finish_lock_switch()

1  2 
kernel/sched/core.c

diff --combined kernel/sched/core.c
@@@ -3985,15 -3985,20 +3985,20 @@@ static void do_balance_callbacks(struc
        }
  }
  
+ static void balance_push(struct rq *rq);
+ struct callback_head balance_push_callback = {
+       .next = NULL,
+       .func = (void (*)(struct callback_head *))balance_push,
+ };
  static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
  {
        struct callback_head *head = rq->balance_callback;
  
        lockdep_assert_held(&rq->lock);
-       if (head) {
+       if (head)
                rq->balance_callback = NULL;
-               rq->balance_flags &= ~BALANCE_WORK;
-       }
  
        return head;
  }
@@@ -4014,21 -4019,6 +4019,6 @@@ static inline void balance_callbacks(st
        }
  }
  
- static void balance_push(struct rq *rq);
- static inline void balance_switch(struct rq *rq)
- {
-       if (likely(!rq->balance_flags))
-               return;
-       if (rq->balance_flags & BALANCE_PUSH) {
-               balance_push(rq);
-               return;
-       }
-       __balance_callbacks(rq);
- }
  #else
  
  static inline void __balance_callbacks(struct rq *rq)
@@@ -4044,10 -4034,6 +4034,6 @@@ static inline void balance_callbacks(st
  {
  }
  
- static inline void balance_switch(struct rq *rq)
- {
- }
  #endif
  
  static inline void
@@@ -4075,7 -4061,7 +4061,7 @@@ static inline void finish_lock_switch(s
         * prev into current:
         */
        spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-       balance_switch(rq);
+       __balance_callbacks(rq);
        raw_spin_unlock_irq(&rq->lock);
  }
  
  # define finish_arch_post_lock_switch()       do { } while (0)
  #endif
  
 +static inline void kmap_local_sched_out(void)
 +{
 +#ifdef CONFIG_KMAP_LOCAL
 +      if (unlikely(current->kmap_ctrl.idx))
 +              __kmap_local_sched_out();
 +#endif
 +}
 +
 +static inline void kmap_local_sched_in(void)
 +{
 +#ifdef CONFIG_KMAP_LOCAL
 +      if (unlikely(current->kmap_ctrl.idx))
 +              __kmap_local_sched_in();
 +#endif
 +}
 +
  /**
   * prepare_task_switch - prepare to switch tasks
   * @rq: the runqueue preparing to switch
@@@ -4129,7 -4099,6 +4115,7 @@@ prepare_task_switch(struct rq *rq, stru
        perf_event_task_sched_out(prev, next);
        rseq_preempt(prev);
        fire_sched_out_preempt_notifiers(prev, next);
 +      kmap_local_sched_out();
        prepare_task(next);
        prepare_arch_switch(next);
  }
@@@ -4196,14 -4165,6 +4182,14 @@@ static struct rq *finish_task_switch(st
        finish_lock_switch(rq);
        finish_arch_post_lock_switch();
        kcov_finish_switch(current);
 +      /*
 +       * kmap_local_sched_out() is invoked with rq::lock held and
 +       * interrupts disabled. There is no requirement for that, but the
 +       * sched out code does not have an interrupt enabled section.
 +       * Restoring the maps on sched in does not require interrupts being
 +       * disabled either.
 +       */
 +      kmap_local_sched_in();
  
        fire_sched_in_preempt_notifiers(current);
        /*
@@@ -4844,7 -4805,6 +4830,7 @@@ static inline void schedule_debug(struc
                preempt_count_set(PREEMPT_DISABLED);
        }
        rcu_sleep_check();
 +      SCHED_WARN_ON(ct_state() == CONTEXT_USER);
  
        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
  
@@@ -5186,7 -5146,7 +5172,7 @@@ void __sched schedule_idle(void
        } while (need_resched());
  }
  
 -#ifdef CONFIG_CONTEXT_TRACKING
 +#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK)
  asmlinkage __visible void __sched schedule_user(void)
  {
        /*
@@@ -7282,6 -7242,10 +7268,10 @@@ static void balance_push(struct rq *rq
  
        lockdep_assert_held(&rq->lock);
        SCHED_WARN_ON(rq->cpu != smp_processor_id());
+       /*
+        * Ensure the thing is persistent until balance_push_set(.on = false);
+        */
+       rq->balance_callback = &balance_push_callback;
  
        /*
         * Both the cpu-hotplug and stop task are in this case and are
@@@ -7331,9 -7295,9 +7321,9 @@@ static void balance_push_set(int cpu, b
  
        rq_lock_irqsave(rq, &rf);
        if (on)
-               rq->balance_flags |= BALANCE_PUSH;
+               rq->balance_callback = &balance_push_callback;
        else
-               rq->balance_flags &= ~BALANCE_PUSH;
+               rq->balance_callback = NULL;
        rq_unlock_irqrestore(rq, &rf);
  }