Merge tag 'sched-core-2022-05-23' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / kernel / sched / core.c
index 2a05096..a247f8d 100644 (file)
 #include <linux/topology.h>
 #include <linux/sched/clock.h>
 #include <linux/sched/cond_resched.h>
+#include <linux/sched/cputime.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/init.h>
 #include <linux/sched/isolation.h>
 #include <linux/sched/loadavg.h>
 #include <linux/sched/mm.h>
@@ -610,10 +613,10 @@ void double_rq_lock(struct rq *rq1, struct rq *rq2)
                swap(rq1, rq2);
 
        raw_spin_rq_lock(rq1);
-       if (__rq_lockp(rq1) == __rq_lockp(rq2))
-               return;
+       if (__rq_lockp(rq1) != __rq_lockp(rq2))
+               raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
 
-       raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
+       double_rq_clock_clear_update(rq1, rq2);
 }
 #endif
 
@@ -2190,7 +2193,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 {
        if (p->sched_class == rq->curr->sched_class)
                rq->curr->sched_class->check_preempt_curr(rq, p, flags);
-       else if (p->sched_class > rq->curr->sched_class)
+       else if (sched_class_above(p->sched_class, rq->curr->sched_class))
                resched_curr(rq);
 
        /*
@@ -2408,7 +2411,7 @@ static int migration_cpu_stop(void *data)
         * __migrate_task() such that we will not miss enforcing cpus_ptr
         * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
         */
-       flush_smp_call_function_from_idle();
+       flush_smp_call_function_queue();
 
        raw_spin_lock(&p->pi_lock);
        rq_lock(rq, &rf);
@@ -5689,7 +5692,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
         * higher scheduling class, because otherwise those lose the
         * opportunity to pull in more work from other CPUs.
         */
-       if (likely(prev->sched_class <= &fair_sched_class &&
+       if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) &&
                   rq->nr_running == rq->cfs.h_nr_running)) {
 
                p = pick_next_task_fair(rq, prev, rf);
@@ -9469,11 +9472,11 @@ void __init sched_init(void)
        int i;
 
        /* Make sure the linker didn't screw up */
-       BUG_ON(&idle_sched_class + 1 != &fair_sched_class ||
-              &fair_sched_class + 1 != &rt_sched_class ||
-              &rt_sched_class + 1   != &dl_sched_class);
+       BUG_ON(&idle_sched_class != &fair_sched_class + 1 ||
+              &fair_sched_class != &rt_sched_class + 1 ||
+              &rt_sched_class   != &dl_sched_class + 1);
 #ifdef CONFIG_SMP
-       BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
+       BUG_ON(&dl_sched_class != &stop_sched_class + 1);
 #endif
 
        wait_bit_init();