sched: Provide rt_mutex specific scheduler helpers
authorPeter Zijlstra <peterz@infradead.org>
Fri, 8 Sep 2023 16:22:51 +0000 (18:22 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 20 Sep 2023 07:31:12 +0000 (09:31 +0200)
With PREEMPT_RT there is a rt_mutex recursion problem where
sched_submit_work() can use an rtlock (aka spinlock_t). More
specifically what happens is:

  mutex_lock() /* really rt_mutex */
    ...
      __rt_mutex_slowlock_locked()
task_blocks_on_rt_mutex()
          // enqueue current task as waiter
          // do PI chain walk
        rt_mutex_slowlock_block()
          schedule()
            sched_submit_work()
              ...
              spin_lock() /* really rtlock */
                ...
                  __rt_mutex_slowlock_locked()
                    task_blocks_on_rt_mutex()
                      // enqueue current task as waiter *AGAIN*
                      // *CONFUSION*

Fix this by making rt_mutex do the sched_submit_work() early, before
it enqueues itself as a waiter -- before it even knows *if* it will
wait.

[[ basically Thomas' patch but with different naming and a few asserts
   added ]]

Originally-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20230908162254.999499-5-bigeasy@linutronix.de
include/linux/sched.h
include/linux/sched/rt.h
kernel/sched/core.c

index 77f01ac..67623ff 100644 (file)
@@ -911,6 +911,9 @@ struct task_struct {
         * ->sched_remote_wakeup gets used, so it can be in this word.
         */
        unsigned                        sched_remote_wakeup:1;
+#ifdef CONFIG_RT_MUTEXES
+       unsigned                        sched_rt_mutex:1;
+#endif
 
        /* Bit to tell LSMs we're in execve(): */
        unsigned                        in_execve:1;
index 994c256..b2b9e6e 100644 (file)
@@ -30,6 +30,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
 }
 
 #ifdef CONFIG_RT_MUTEXES
+extern void rt_mutex_pre_schedule(void);
+extern void rt_mutex_schedule(void);
+extern void rt_mutex_post_schedule(void);
+
 /*
  * Must hold either p->pi_lock or task_rq(p)->lock.
  */
index 1ea7ba5..58d0346 100644 (file)
@@ -6723,9 +6723,6 @@ static inline void sched_submit_work(struct task_struct *tsk)
        static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
        unsigned int task_flags;
 
-       if (task_is_running(tsk))
-               return;
-
        /*
         * Establish LD_WAIT_CONFIG context to ensure none of the code called
         * will use a blocking primitive -- which would lead to recursion.
@@ -6783,7 +6780,12 @@ asmlinkage __visible void __sched schedule(void)
 {
        struct task_struct *tsk = current;
 
-       sched_submit_work(tsk);
+#ifdef CONFIG_RT_MUTEXES
+       lockdep_assert(!tsk->sched_rt_mutex);
+#endif
+
+       if (!task_is_running(tsk))
+               sched_submit_work(tsk);
        __schedule_loop(SM_NONE);
        sched_update_worker(tsk);
 }
@@ -7044,6 +7046,32 @@ static void __setscheduler_prio(struct task_struct *p, int prio)
 
 #ifdef CONFIG_RT_MUTEXES
 
+/*
+ * Would be more useful with typeof()/auto_type but they don't mix with
+ * bit-fields. Since it's a local thing, use int. Keep the generic sounding
+ * name such that if someone were to implement this function we get to compare
+ * notes.
+ */
+#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
+
+void rt_mutex_pre_schedule(void)
+{
+       lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
+       sched_submit_work(current);
+}
+
+void rt_mutex_schedule(void)
+{
+       lockdep_assert(current->sched_rt_mutex);
+       __schedule_loop(SM_NONE);
+}
+
+void rt_mutex_post_schedule(void)
+{
+       sched_update_worker(current);
+       lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
+}
+
 static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
 {
        if (pi_task)