posix-timers: Unify overrun/requeue_pending handling
authorThomas Gleixner <tglx@linutronix.de>
Tue, 30 May 2017 21:15:42 +0000 (23:15 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Sun, 4 Jun 2017 13:40:24 +0000 (15:40 +0200)
hrtimer based posix-timers and posix-cpu-timers handle the update of the
rearming and overflow related status fields differently.

Move that update to the common rearming code.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: John Stultz <john.stultz@linaro.org>
Link: http://lkml.kernel.org/r/20170530211656.484936964@linutronix.de
kernel/time/posix-cpu-timers.c
kernel/time/posix-timers.c

index a77a792..1683e50 100644 (file)
@@ -527,6 +527,7 @@ static void cpu_timer_fire(struct k_itimer *timer)
                 * ticking in case the signal is deliverable next time.
                 */
                posix_cpu_timer_schedule(timer);
+               ++timer->it_requeue_pending;
        }
 }
 
@@ -997,12 +998,12 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
                cpu_clock_sample(timer->it_clock, p, &now);
                bump_cpu_timer(timer, now);
                if (unlikely(p->exit_state))
-                       goto out;
+                       return;
 
                /* Protect timer list r/w in arm_timer() */
                sighand = lock_task_sighand(p, &flags);
                if (!sighand)
-                       goto out;
+                       return;
        } else {
                /*
                 * Protect arm_timer() and timer sampling in case of call to
@@ -1015,11 +1016,10 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
                         * We can't even collect a sample any more.
                         */
                        timer->it.cpu.expires = 0;
-                       goto out;
+                       return;
                } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
-                       unlock_task_sighand(p, &flags);
-                       /* Optimizations: if the process is dying, no need to rearm */
-                       goto out;
+                       /* If the process is dying, no need to rearm */
+                       goto unlock;
                }
                cpu_timer_sample_group(timer->it_clock, p, &now);
                bump_cpu_timer(timer, now);
@@ -1031,12 +1031,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
         */
        WARN_ON_ONCE(!irqs_disabled());
        arm_timer(timer);
+unlock:
        unlock_task_sighand(p, &flags);
-
-out:
-       timer->it_overrun_last = timer->it_overrun;
-       timer->it_overrun = -1;
-       ++timer->it_requeue_pending;
 }
 
 /**
index dee6a0d..79a00e0 100644 (file)
@@ -291,10 +291,6 @@ static void schedule_next_timer(struct k_itimer *timr)
        timr->it_overrun += (unsigned int) hrtimer_forward(timer,
                                                timer->base->get_time(),
                                                timr->it.real.interval);
-
-       timr->it_overrun_last = timr->it_overrun;
-       timr->it_overrun = -1;
-       ++timr->it_requeue_pending;
        hrtimer_restart(timer);
 }
 
@@ -315,18 +311,23 @@ void do_schedule_next_timer(struct siginfo *info)
        unsigned long flags;
 
        timr = lock_timer(info->si_tid, &flags);
+       if (!timr)
+               return;
 
-       if (timr && timr->it_requeue_pending == info->si_sys_private) {
+       if (timr->it_requeue_pending == info->si_sys_private) {
                if (timr->it_clock < 0)
                        posix_cpu_timer_schedule(timr);
                else
                        schedule_next_timer(timr);
 
+               timr->it_overrun_last = timr->it_overrun;
+               timr->it_overrun = -1;
+               ++timr->it_requeue_pending;
+
                info->si_overrun += timr->it_overrun_last;
        }
 
-       if (timr)
-               unlock_timer(timr, flags);
+       unlock_timer(timr, flags);
 }
 
 int posix_timer_event(struct k_itimer *timr, int si_private)