sched/deadline: Implement "runtime overrun signal" support
authorJuri Lelli <juri.lelli@gmail.com>
Tue, 12 Dec 2017 11:10:24 +0000 (12:10 +0100)
committerIngo Molnar <mingo@kernel.org>
Wed, 10 Jan 2018 10:30:31 +0000 (11:30 +0100)
This patch adds the possibility of getting the delivery of a SIGXCPU
signal whenever there is a runtime overrun. The request is done through
the sched_flags field within the sched_attr structure.

Forward port of https://lkml.org/lkml/2009/10/16/170

Tested-by: Mathieu Poirier <mathieu.poirier@linaro.org>
Signed-off-by: Juri Lelli <juri.lelli@gmail.com>
Signed-off-by: Claudio Scordino <claudio@evidence.eu.com>
Signed-off-by: Luca Abeni <luca.abeni@santannapisa.it>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tommaso Cucinotta <tommaso.cucinotta@sssup.it>
Link: http://lkml.kernel.org/r/1513077024-25461-1-git-send-email-claudio@evidence.eu.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/sched.h
include/uapi/linux/sched.h
kernel/sched/core.c
kernel/sched/deadline.c
kernel/time/posix-cpu-timers.c

index d258826..274a449 100644 (file)
@@ -472,11 +472,15 @@ struct sched_dl_entity {
         * has not been executed yet. This flag is useful to avoid race
         * conditions between the inactive timer handler and the wakeup
         * code.
+        *
+        * @dl_overrun tells if the task asked to be informed about runtime
+        * overruns.
         */
        unsigned int                    dl_throttled      : 1;
        unsigned int                    dl_boosted        : 1;
        unsigned int                    dl_yielded        : 1;
        unsigned int                    dl_non_contending : 1;
+       unsigned int                    dl_overrun        : 1;
 
        /*
         * Bandwidth enforcement timer. Each -deadline task has its
index 30a9e51..22627f8 100644 (file)
  */
 #define SCHED_FLAG_RESET_ON_FORK       0x01
 #define SCHED_FLAG_RECLAIM             0x02
+#define SCHED_FLAG_DL_OVERRUN          0x04
+
+#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK       | \
+                        SCHED_FLAG_RECLAIM             | \
+                        SCHED_FLAG_DL_OVERRUN)
 
 #endif /* _UAPI_LINUX_SCHED_H */
index a794f81..e28391b 100644 (file)
@@ -4085,8 +4085,7 @@ recheck:
                        return -EINVAL;
        }
 
-       if (attr->sched_flags &
-               ~(SCHED_FLAG_RESET_ON_FORK | SCHED_FLAG_RECLAIM))
+       if (attr->sched_flags & ~SCHED_FLAG_ALL)
                return -EINVAL;
 
        /*
index 2473736..4c666db 100644 (file)
@@ -1155,6 +1155,12 @@ static void update_curr_dl(struct rq *rq)
 throttle:
        if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
                dl_se->dl_throttled = 1;
+
+               /* If requested, inform the user about runtime overruns. */
+               if (dl_runtime_exceeded(dl_se) &&
+                   (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
+                       dl_se->dl_overrun = 1;
+
                __dequeue_task_dl(rq, curr, 0);
                if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
                        enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
@@ -2566,6 +2572,7 @@ void __dl_clear_params(struct task_struct *p)
        dl_se->dl_throttled = 0;
        dl_se->dl_yielded = 0;
        dl_se->dl_non_contending = 0;
+       dl_se->dl_overrun = 0;
 }
 
 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
index 1f27887..cf50ea3 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/tick.h>
 #include <linux/workqueue.h>
 #include <linux/compat.h>
+#include <linux/sched/deadline.h>
 
 #include "posix-timers.h"
 
@@ -791,6 +792,14 @@ check_timers_list(struct list_head *timers,
        return 0;
 }
 
+static inline void check_dl_overrun(struct task_struct *tsk)
+{
+       if (tsk->dl.dl_overrun) {
+               tsk->dl.dl_overrun = 0;
+               __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+       }
+}
+
 /*
  * Check for any per-thread CPU timers that have fired and move them off
  * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
@@ -804,6 +813,9 @@ static void check_thread_timers(struct task_struct *tsk,
        u64 expires;
        unsigned long soft;
 
+       if (dl_task(tsk))
+               check_dl_overrun(tsk);
+
        /*
         * If cputime_expires is zero, then there are no active
         * per thread CPU timers.
@@ -906,6 +918,9 @@ static void check_process_timers(struct task_struct *tsk,
        struct task_cputime cputime;
        unsigned long soft;
 
+       if (dl_task(tsk))
+               check_dl_overrun(tsk);
+
        /*
         * If cputimer is not running, then there are no active
         * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
@@ -1111,6 +1126,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
                        return 1;
        }
 
+       if (dl_task(tsk) && tsk->dl.dl_overrun)
+               return 1;
+
        return 0;
 }