Merge branch 'linus' into timers/core
authorIngo Molnar <mingo@kernel.org>
Sun, 12 Jan 2014 13:12:44 +0000 (14:12 +0100)
committerIngo Molnar <mingo@kernel.org>
Sun, 12 Jan 2014 13:12:44 +0000 (14:12 +0100)
Pick up the latest fixes and refresh the branch.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/context_tracking.h
include/linux/context_tracking_state.h
include/linux/tick.h
include/linux/vtime.h
init/Kconfig
kernel/context_tracking.c
kernel/posix-cpu-timers.c
kernel/softirq.c
kernel/time/tick-broadcast.c
kernel/time/tick-internal.h
kernel/time/tick-sched.c

index 1581587..37b81bd 100644 (file)
@@ -17,13 +17,13 @@ extern void __context_tracking_task_switch(struct task_struct *prev,
 
 static inline void user_enter(void)
 {
-       if (static_key_false(&context_tracking_enabled))
+       if (context_tracking_is_enabled())
                context_tracking_user_enter();
 
 }
 static inline void user_exit(void)
 {
-       if (static_key_false(&context_tracking_enabled))
+       if (context_tracking_is_enabled())
                context_tracking_user_exit();
 }
 
@@ -31,7 +31,7 @@ static inline enum ctx_state exception_enter(void)
 {
        enum ctx_state prev_ctx;
 
-       if (!static_key_false(&context_tracking_enabled))
+       if (!context_tracking_is_enabled())
                return 0;
 
        prev_ctx = this_cpu_read(context_tracking.state);
@@ -42,7 +42,7 @@ static inline enum ctx_state exception_enter(void)
 
 static inline void exception_exit(enum ctx_state prev_ctx)
 {
-       if (static_key_false(&context_tracking_enabled)) {
+       if (context_tracking_is_enabled()) {
                if (prev_ctx == IN_USER)
                        context_tracking_user_enter();
        }
@@ -51,7 +51,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
 static inline void context_tracking_task_switch(struct task_struct *prev,
                                                struct task_struct *next)
 {
-       if (static_key_false(&context_tracking_enabled))
+       if (context_tracking_is_enabled())
                __context_tracking_task_switch(prev, next);
 }
 #else
index 0f1979d..97a8122 100644 (file)
@@ -22,15 +22,20 @@ struct context_tracking {
 extern struct static_key context_tracking_enabled;
 DECLARE_PER_CPU(struct context_tracking, context_tracking);
 
-static inline bool context_tracking_in_user(void)
+static inline bool context_tracking_is_enabled(void)
 {
-       return __this_cpu_read(context_tracking.state) == IN_USER;
+       return static_key_false(&context_tracking_enabled);
 }
 
-static inline bool context_tracking_active(void)
+static inline bool context_tracking_cpu_is_enabled(void)
 {
        return __this_cpu_read(context_tracking.active);
 }
+
+static inline bool context_tracking_in_user(void)
+{
+       return __this_cpu_read(context_tracking.state) == IN_USER;
+}
 #else
 static inline bool context_tracking_in_user(void) { return false; }
 static inline bool context_tracking_active(void) { return false; }
index 5128d33..0175d86 100644 (file)
@@ -104,7 +104,7 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
 extern void tick_clock_notify(void);
 extern int tick_check_oneshot_change(int allow_nohz);
 extern struct tick_sched *tick_get_tick_sched(int cpu);
-extern void tick_check_idle(int cpu);
+extern void tick_check_idle(void);
 extern int tick_oneshot_mode_active(void);
 #  ifndef arch_needs_cpu
 #   define arch_needs_cpu(cpu) (0)
@@ -112,7 +112,7 @@ extern int tick_oneshot_mode_active(void);
 # else
 static inline void tick_clock_notify(void) { }
 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
-static inline void tick_check_idle(int cpu) { }
+static inline void tick_check_idle(void) { }
 static inline int tick_oneshot_mode_active(void) { return 0; }
 # endif
 
@@ -121,7 +121,7 @@ static inline void tick_init(void) { }
 static inline void tick_cancel_sched_timer(int cpu) { }
 static inline void tick_clock_notify(void) { }
 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
-static inline void tick_check_idle(int cpu) { }
+static inline void tick_check_idle(void) { }
 static inline int tick_oneshot_mode_active(void) { return 0; }
 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
 
@@ -165,7 +165,7 @@ extern cpumask_var_t tick_nohz_full_mask;
 
 static inline bool tick_nohz_full_enabled(void)
 {
-       if (!static_key_false(&context_tracking_enabled))
+       if (!context_tracking_is_enabled())
                return false;
 
        return tick_nohz_full_running;
index f5b72b3..c5165fd 100644 (file)
@@ -19,8 +19,8 @@ static inline bool vtime_accounting_enabled(void) { return true; }
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 static inline bool vtime_accounting_enabled(void)
 {
-       if (static_key_false(&context_tracking_enabled)) {
-               if (context_tracking_active())
+       if (context_tracking_is_enabled()) {
+               if (context_tracking_cpu_is_enabled())
                        return true;
        }
 
index 4e5d96a..5236dc5 100644 (file)
@@ -532,7 +532,7 @@ config CONTEXT_TRACKING_FORCE
          dynticks subsystem by forcing the context tracking on all
          CPUs in the system.
 
-         Say Y only if you're working on the developpement of an
+         Say Y only if you're working on the development of an
          architecture backend for the context tracking.
 
          Say N otherwise, this option brings an overhead that you
index e5f3917..6cb20d2 100644 (file)
@@ -53,10 +53,10 @@ void context_tracking_user_enter(void)
        /*
         * Repeat the user_enter() check here because some archs may be calling
         * this from asm and if no CPU needs context tracking, they shouldn't
-        * go further. Repeat the check here until they support the static key
-        * check.
+        * go further. Repeat the check here until they support the inline static
+        * key check.
         */
-       if (!static_key_false(&context_tracking_enabled))
+       if (!context_tracking_is_enabled())
                return;
 
        /*
@@ -160,7 +160,7 @@ void context_tracking_user_exit(void)
 {
        unsigned long flags;
 
-       if (!static_key_false(&context_tracking_enabled))
+       if (!context_tracking_is_enabled())
                return;
 
        if (in_interrupt())
index c7f31aa..3b89464 100644 (file)
@@ -233,7 +233,8 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
 
 /*
  * Sample a process (thread group) clock for the given group_leader task.
- * Must be called with tasklist_lock held for reading.
+ * Must be called with task sighand lock held for safe while_each_thread()
+ * traversal.
  */
 static int cpu_clock_sample_group(const clockid_t which_clock,
                                  struct task_struct *p,
@@ -260,30 +261,53 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
        return 0;
 }
 
+static int posix_cpu_clock_get_task(struct task_struct *tsk,
+                                   const clockid_t which_clock,
+                                   struct timespec *tp)
+{
+       int err = -EINVAL;
+       unsigned long long rtn;
+
+       if (CPUCLOCK_PERTHREAD(which_clock)) {
+               if (same_thread_group(tsk, current))
+                       err = cpu_clock_sample(which_clock, tsk, &rtn);
+       } else {
+               unsigned long flags;
+               struct sighand_struct *sighand;
+
+               /*
+                * while_each_thread() is not yet entirely RCU safe,
+                * keep locking the group while sampling process
+                * clock for now.
+                */
+               sighand = lock_task_sighand(tsk, &flags);
+               if (!sighand)
+                       return err;
+
+               if (tsk == current || thread_group_leader(tsk))
+                       err = cpu_clock_sample_group(which_clock, tsk, &rtn);
+
+               unlock_task_sighand(tsk, &flags);
+       }
+
+       if (!err)
+               sample_to_timespec(which_clock, rtn, tp);
+
+       return err;
+}
+
 
 static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
 {
        const pid_t pid = CPUCLOCK_PID(which_clock);
-       int error = -EINVAL;
-       unsigned long long rtn;
+       int err = -EINVAL;
 
        if (pid == 0) {
                /*
                 * Special case constant value for our own clocks.
                 * We don't have to do any lookup to find ourselves.
                 */
-               if (CPUCLOCK_PERTHREAD(which_clock)) {
-                       /*
-                        * Sampling just ourselves we can do with no locking.
-                        */
-                       error = cpu_clock_sample(which_clock,
-                                                current, &rtn);
-               } else {
-                       read_lock(&tasklist_lock);
-                       error = cpu_clock_sample_group(which_clock,
-                                                      current, &rtn);
-                       read_unlock(&tasklist_lock);
-               }
+               err = posix_cpu_clock_get_task(current, which_clock, tp);
        } else {
                /*
                 * Find the given PID, and validate that the caller
@@ -292,29 +316,12 @@ static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
                struct task_struct *p;
                rcu_read_lock();
                p = find_task_by_vpid(pid);
-               if (p) {
-                       if (CPUCLOCK_PERTHREAD(which_clock)) {
-                               if (same_thread_group(p, current)) {
-                                       error = cpu_clock_sample(which_clock,
-                                                                p, &rtn);
-                               }
-                       } else {
-                               read_lock(&tasklist_lock);
-                               if (thread_group_leader(p) && p->sighand) {
-                                       error =
-                                           cpu_clock_sample_group(which_clock,
-                                                                  p, &rtn);
-                               }
-                               read_unlock(&tasklist_lock);
-                       }
-               }
+               if (p)
+                       err = posix_cpu_clock_get_task(p, which_clock, tp);
                rcu_read_unlock();
        }
 
-       if (error)
-               return error;
-       sample_to_timespec(which_clock, rtn, tp);
-       return 0;
+       return err;
 }
 
 
@@ -371,36 +378,40 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
  */
 static int posix_cpu_timer_del(struct k_itimer *timer)
 {
-       struct task_struct *p = timer->it.cpu.task;
        int ret = 0;
+       unsigned long flags;
+       struct sighand_struct *sighand;
+       struct task_struct *p = timer->it.cpu.task;
 
-       if (likely(p != NULL)) {
-               read_lock(&tasklist_lock);
-               if (unlikely(p->sighand == NULL)) {
-                       /*
-                        * We raced with the reaping of the task.
-                        * The deletion should have cleared us off the list.
-                        */
-                       BUG_ON(!list_empty(&timer->it.cpu.entry));
-               } else {
-                       spin_lock(&p->sighand->siglock);
-                       if (timer->it.cpu.firing)
-                               ret = TIMER_RETRY;
-                       else
-                               list_del(&timer->it.cpu.entry);
-                       spin_unlock(&p->sighand->siglock);
-               }
-               read_unlock(&tasklist_lock);
+       WARN_ON_ONCE(p == NULL);
 
-               if (!ret)
-                       put_task_struct(p);
+       /*
+        * Protect against sighand release/switch in exit/exec and process/
+        * thread timer list entry concurrent read/writes.
+        */
+       sighand = lock_task_sighand(p, &flags);
+       if (unlikely(sighand == NULL)) {
+               /*
+                * We raced with the reaping of the task.
+                * The deletion should have cleared us off the list.
+                */
+               WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
+       } else {
+               if (timer->it.cpu.firing)
+                       ret = TIMER_RETRY;
+               else
+                       list_del(&timer->it.cpu.entry);
+
+               unlock_task_sighand(p, &flags);
        }
 
+       if (!ret)
+               put_task_struct(p);
+
        return ret;
 }
 
-static void cleanup_timers_list(struct list_head *head,
-                               unsigned long long curr)
+static void cleanup_timers_list(struct list_head *head)
 {
        struct cpu_timer_list *timer, *next;
 
@@ -414,16 +425,11 @@ static void cleanup_timers_list(struct list_head *head,
  * time for later timer_gettime calls to return.
  * This must be called with the siglock held.
  */
-static void cleanup_timers(struct list_head *head,
-                          cputime_t utime, cputime_t stime,
-                          unsigned long long sum_exec_runtime)
+static void cleanup_timers(struct list_head *head)
 {
-
-       cputime_t ptime = utime + stime;
-
-       cleanup_timers_list(head, cputime_to_expires(ptime));
-       cleanup_timers_list(++head, cputime_to_expires(utime));
-       cleanup_timers_list(++head, sum_exec_runtime);
+       cleanup_timers_list(head);
+       cleanup_timers_list(++head);
+       cleanup_timers_list(++head);
 }
 
 /*
@@ -433,41 +439,14 @@ static void cleanup_timers(struct list_head *head,
  */
 void posix_cpu_timers_exit(struct task_struct *tsk)
 {
-       cputime_t utime, stime;
-
        add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
                                                sizeof(unsigned long long));
-       task_cputime(tsk, &utime, &stime);
-       cleanup_timers(tsk->cpu_timers,
-                      utime, stime, tsk->se.sum_exec_runtime);
+       cleanup_timers(tsk->cpu_timers);
 
 }
 void posix_cpu_timers_exit_group(struct task_struct *tsk)
 {
-       struct signal_struct *const sig = tsk->signal;
-       cputime_t utime, stime;
-
-       task_cputime(tsk, &utime, &stime);
-       cleanup_timers(tsk->signal->cpu_timers,
-                      utime + sig->utime, stime + sig->stime,
-                      tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
-}
-
-static void clear_dead_task(struct k_itimer *itimer, unsigned long long now)
-{
-       struct cpu_timer_list *timer = &itimer->it.cpu;
-
-       /*
-        * That's all for this thread or process.
-        * We leave our residual in expires to be reported.
-        */
-       put_task_struct(timer->task);
-       timer->task = NULL;
-       if (timer->expires < now) {
-               timer->expires = 0;
-       } else {
-               timer->expires -= now;
-       }
+       cleanup_timers(tsk->signal->cpu_timers);
 }
 
 static inline int expires_gt(cputime_t expires, cputime_t new_exp)
@@ -477,8 +456,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
 
 /*
  * Insert the timer on the appropriate list before any timers that
- * expire later.  This must be called with the tasklist_lock held
- * for reading, interrupts disabled and p->sighand->siglock taken.
+ * expire later.  This must be called with the sighand lock held.
  */
 static void arm_timer(struct k_itimer *timer)
 {
@@ -569,7 +547,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
 
 /*
  * Sample a process (thread group) timer for the given group_leader task.
- * Must be called with tasklist_lock held for reading.
+ * Must be called with task sighand lock held for safe while_each_thread()
+ * traversal.
  */
 static int cpu_timer_sample_group(const clockid_t which_clock,
                                  struct task_struct *p,
@@ -608,7 +587,8 @@ static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
  */
 static void posix_cpu_timer_kick_nohz(void)
 {
-       schedule_work(&nohz_kick_work);
+       if (context_tracking_is_enabled())
+               schedule_work(&nohz_kick_work);
 }
 
 bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
@@ -631,43 +611,39 @@ static inline void posix_cpu_timer_kick_nohz(void) { }
  * If we return TIMER_RETRY, it's necessary to release the timer's lock
  * and try again.  (This happens when the timer is in the middle of firing.)
  */
-static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
+static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
                               struct itimerspec *new, struct itimerspec *old)
 {
+       unsigned long flags;
+       struct sighand_struct *sighand;
        struct task_struct *p = timer->it.cpu.task;
        unsigned long long old_expires, new_expires, old_incr, val;
        int ret;
 
-       if (unlikely(p == NULL)) {
-               /*
-                * Timer refers to a dead task's clock.
-                */
-               return -ESRCH;
-       }
+       WARN_ON_ONCE(p == NULL);
 
        new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
 
-       read_lock(&tasklist_lock);
        /*
-        * We need the tasklist_lock to protect against reaping that
-        * clears p->sighand.  If p has just been reaped, we can no
+        * Protect against sighand release/switch in exit/exec and p->cpu_timers
+        * and p->signal->cpu_timers read/write in arm_timer()
+        */
+       sighand = lock_task_sighand(p, &flags);
+       /*
+        * If p has just been reaped, we can no
         * longer get any information about it at all.
         */
-       if (unlikely(p->sighand == NULL)) {
-               read_unlock(&tasklist_lock);
-               put_task_struct(p);
-               timer->it.cpu.task = NULL;
+       if (unlikely(sighand == NULL)) {
                return -ESRCH;
        }
 
        /*
         * Disarm any old timer after extracting its expiry time.
         */
-       BUG_ON(!irqs_disabled());
+       WARN_ON_ONCE(!irqs_disabled());
 
        ret = 0;
        old_incr = timer->it.cpu.incr;
-       spin_lock(&p->sighand->siglock);
        old_expires = timer->it.cpu.expires;
        if (unlikely(timer->it.cpu.firing)) {
                timer->it.cpu.firing = -1;
@@ -724,12 +700,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
                 * disable this firing since we are already reporting
                 * it as an overrun (thanks to bump_cpu_timer above).
                 */
-               spin_unlock(&p->sighand->siglock);
-               read_unlock(&tasklist_lock);
+               unlock_task_sighand(p, &flags);
                goto out;
        }
 
-       if (new_expires != 0 && !(flags & TIMER_ABSTIME)) {
+       if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
                new_expires += val;
        }
 
@@ -743,9 +718,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
                arm_timer(timer);
        }
 
-       spin_unlock(&p->sighand->siglock);
-       read_unlock(&tasklist_lock);
-
+       unlock_task_sighand(p, &flags);
        /*
         * Install the new reload setting, and
         * set up the signal and overrun bookkeeping.
@@ -787,7 +760,8 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
 {
        unsigned long long now;
        struct task_struct *p = timer->it.cpu.task;
-       int clear_dead;
+
+       WARN_ON_ONCE(p == NULL);
 
        /*
         * Easy part: convert the reload time.
@@ -800,52 +774,34 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
                return;
        }
 
-       if (unlikely(p == NULL)) {
-               /*
-                * This task already died and the timer will never fire.
-                * In this case, expires is actually the dead value.
-                */
-       dead:
-               sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
-                                  &itp->it_value);
-               return;
-       }
-
        /*
         * Sample the clock to take the difference with the expiry time.
         */
        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
                cpu_clock_sample(timer->it_clock, p, &now);
-               clear_dead = p->exit_state;
        } else {
-               read_lock(&tasklist_lock);
-               if (unlikely(p->sighand == NULL)) {
+               struct sighand_struct *sighand;
+               unsigned long flags;
+
+               /*
+                * Protect against sighand release/switch in exit/exec and
+                * also make timer sampling safe if it ends up calling
+                * thread_group_cputime().
+                */
+               sighand = lock_task_sighand(p, &flags);
+               if (unlikely(sighand == NULL)) {
                        /*
                         * The process has been reaped.
                         * We can't even collect a sample any more.
                         * Call the timer disarmed, nothing else to do.
                         */
-                       put_task_struct(p);
-                       timer->it.cpu.task = NULL;
                        timer->it.cpu.expires = 0;
-                       read_unlock(&tasklist_lock);
-                       goto dead;
+                       sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
+                                          &itp->it_value);
                } else {
                        cpu_timer_sample_group(timer->it_clock, p, &now);
-                       clear_dead = (unlikely(p->exit_state) &&
-                                     thread_group_empty(p));
+                       unlock_task_sighand(p, &flags);
                }
-               read_unlock(&tasklist_lock);
-       }
-
-       if (unlikely(clear_dead)) {
-               /*
-                * We've noticed that the thread is dead, but
-                * not yet reaped.  Take this opportunity to
-                * drop our task ref.
-                */
-               clear_dead_task(timer, now);
-               goto dead;
        }
 
        if (now < timer->it.cpu.expires) {
@@ -1059,14 +1015,12 @@ static void check_process_timers(struct task_struct *tsk,
  */
 void posix_cpu_timer_schedule(struct k_itimer *timer)
 {
+       struct sighand_struct *sighand;
+       unsigned long flags;
        struct task_struct *p = timer->it.cpu.task;
        unsigned long long now;
 
-       if (unlikely(p == NULL))
-               /*
-                * The task was cleaned up already, no future firings.
-                */
-               goto out;
+       WARN_ON_ONCE(p == NULL);
 
        /*
         * Fetch the current sample and update the timer's expiry time.
@@ -1074,49 +1028,45 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
        if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
                cpu_clock_sample(timer->it_clock, p, &now);
                bump_cpu_timer(timer, now);
-               if (unlikely(p->exit_state)) {
-                       clear_dead_task(timer, now);
+               if (unlikely(p->exit_state))
+                       goto out;
+
+               /* Protect timer list r/w in arm_timer() */
+               sighand = lock_task_sighand(p, &flags);
+               if (!sighand)
                        goto out;
-               }
-               read_lock(&tasklist_lock); /* arm_timer needs it.  */
-               spin_lock(&p->sighand->siglock);
        } else {
-               read_lock(&tasklist_lock);
-               if (unlikely(p->sighand == NULL)) {
+               /*
+                * Protect arm_timer() and timer sampling in case of call to
+                * thread_group_cputime().
+                */
+               sighand = lock_task_sighand(p, &flags);
+               if (unlikely(sighand == NULL)) {
                        /*
                         * The process has been reaped.
                         * We can't even collect a sample any more.
                         */
-                       put_task_struct(p);
-                       timer->it.cpu.task = p = NULL;
                        timer->it.cpu.expires = 0;
-                       goto out_unlock;
+                       goto out;
                } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
-                       /*
-                        * We've noticed that the thread is dead, but
-                        * not yet reaped.  Take this opportunity to
-                        * drop our task ref.
-                        */
-                       cpu_timer_sample_group(timer->it_clock, p, &now);
-                       clear_dead_task(timer, now);
-                       goto out_unlock;
+                       unlock_task_sighand(p, &flags);
+                       /* Optimizations: if the process is dying, no need to rearm */
+                       goto out;
                }
-               spin_lock(&p->sighand->siglock);
                cpu_timer_sample_group(timer->it_clock, p, &now);
                bump_cpu_timer(timer, now);
-               /* Leave the tasklist_lock locked for the call below.  */
+               /* Leave the sighand locked for the call below.  */
        }
 
        /*
         * Now re-arm for the new expiry time.
         */
-       BUG_ON(!irqs_disabled());
+       WARN_ON_ONCE(!irqs_disabled());
        arm_timer(timer);
-       spin_unlock(&p->sighand->siglock);
-
-out_unlock:
-       read_unlock(&tasklist_lock);
+       unlock_task_sighand(p, &flags);
 
+       /* Kick full dynticks CPUs in case they need to tick on the new timer */
+       posix_cpu_timer_kick_nohz();
 out:
        timer->it_overrun_last = timer->it_overrun;
        timer->it_overrun = -1;
@@ -1200,7 +1150,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
        struct k_itimer *timer, *next;
        unsigned long flags;
 
-       BUG_ON(!irqs_disabled());
+       WARN_ON_ONCE(!irqs_disabled());
 
        /*
         * The fast path checks that there are no expired thread or thread
@@ -1256,13 +1206,6 @@ void run_posix_cpu_timers(struct task_struct *tsk)
                        cpu_timer_fire(timer);
                spin_unlock(&timer->it_lock);
        }
-
-       /*
-        * In case some timers were rescheduled after the queue got emptied,
-        * wake up full dynticks CPUs.
-        */
-       if (tsk->signal->cputimer.running)
-               posix_cpu_timer_kick_nohz();
 }
 
 /*
@@ -1274,7 +1217,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
 {
        unsigned long long now;
 
-       BUG_ON(clock_idx == CPUCLOCK_SCHED);
+       WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
        cpu_timer_sample_group(clock_idx, tsk, &now);
 
        if (oldval) {
index 11025cc..11348de 100644 (file)
@@ -311,8 +311,6 @@ asmlinkage void do_softirq(void)
  */
 void irq_enter(void)
 {
-       int cpu = smp_processor_id();
-
        rcu_irq_enter();
        if (is_idle_task(current) && !in_interrupt()) {
                /*
@@ -320,7 +318,7 @@ void irq_enter(void)
                 * here, as softirq will be serviced on return from interrupt.
                 */
                local_bh_disable();
-               tick_check_idle(cpu);
+               tick_check_idle();
                _local_bh_enable();
        }
 
index 9532690..43780ab 100644 (file)
@@ -538,10 +538,10 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
  * Called from irq_enter() when idle was interrupted to reenable the
  * per cpu device.
  */
-void tick_check_oneshot_broadcast(int cpu)
+void tick_check_oneshot_broadcast_this_cpu(void)
 {
-       if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
-               struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
+       if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
+               struct tick_device *td = &__get_cpu_var(tick_cpu_device);
 
                /*
                 * We might be in the middle of switching over from
index 18e71f7..e2bced5 100644 (file)
@@ -51,7 +51,7 @@ extern void tick_broadcast_switch_to_oneshot(void);
 extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
 extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
 extern int tick_broadcast_oneshot_active(void);
-extern void tick_check_oneshot_broadcast(int cpu);
+extern void tick_check_oneshot_broadcast_this_cpu(void);
 bool tick_broadcast_oneshot_available(void);
 # else /* BROADCAST */
 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
@@ -62,7 +62,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
 static inline void tick_broadcast_switch_to_oneshot(void) { }
 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
-static inline void tick_check_oneshot_broadcast(int cpu) { }
+static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
 static inline bool tick_broadcast_oneshot_available(void) { return true; }
 # endif /* !BROADCAST */
 
index ea20f7d..52cee12 100644 (file)
@@ -391,11 +391,9 @@ __setup("nohz=", setup_tick_nohz);
  */
 static void tick_nohz_update_jiffies(ktime_t now)
 {
-       int cpu = smp_processor_id();
-       struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
        unsigned long flags;
 
-       ts->idle_waketime = now;
+       __this_cpu_write(tick_cpu_sched.idle_waketime, now);
 
        local_irq_save(flags);
        tick_do_update_jiffies64(now);
@@ -426,17 +424,15 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda
 
 }
 
-static void tick_nohz_stop_idle(int cpu, ktime_t now)
+static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
 {
-       struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
-
-       update_ts_time_stats(cpu, ts, now, NULL);
+       update_ts_time_stats(smp_processor_id(), ts, now, NULL);
        ts->idle_active = 0;
 
        sched_clock_idle_wakeup_event(0);
 }
 
-static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
+static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
 {
        ktime_t now = ktime_get();
 
@@ -754,7 +750,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
        ktime_t now, expires;
        int cpu = smp_processor_id();
 
-       now = tick_nohz_start_idle(cpu, ts);
+       now = tick_nohz_start_idle(ts);
 
        if (can_stop_idle_tick(cpu, ts)) {
                int was_stopped = ts->tick_stopped;
@@ -911,8 +907,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
  */
 void tick_nohz_idle_exit(void)
 {
-       int cpu = smp_processor_id();
-       struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+       struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
        ktime_t now;
 
        local_irq_disable();
@@ -925,7 +920,7 @@ void tick_nohz_idle_exit(void)
                now = ktime_get();
 
        if (ts->idle_active)
-               tick_nohz_stop_idle(cpu, now);
+               tick_nohz_stop_idle(ts, now);
 
        if (ts->tick_stopped) {
                tick_nohz_restart_sched_tick(ts, now);
@@ -1009,12 +1004,10 @@ static void tick_nohz_switch_to_nohz(void)
  * timer and do not touch the other magic bits which need to be done
  * when idle is left.
  */
-static void tick_nohz_kick_tick(int cpu, ktime_t now)
+static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
 {
 #if 0
        /* Switch back to 2.6.27 behaviour */
-
-       struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
        ktime_t delta;
 
        /*
@@ -1029,36 +1022,36 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now)
 #endif
 }
 
-static inline void tick_check_nohz(int cpu)
+static inline void tick_check_nohz_this_cpu(void)
 {
-       struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
+       struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
        ktime_t now;
 
        if (!ts->idle_active && !ts->tick_stopped)
                return;
        now = ktime_get();
        if (ts->idle_active)
-               tick_nohz_stop_idle(cpu, now);
+               tick_nohz_stop_idle(ts, now);
        if (ts->tick_stopped) {
                tick_nohz_update_jiffies(now);
-               tick_nohz_kick_tick(cpu, now);
+               tick_nohz_kick_tick(ts, now);
        }
 }
 
 #else
 
 static inline void tick_nohz_switch_to_nohz(void) { }
-static inline void tick_check_nohz(int cpu) { }
+static inline void tick_check_nohz_this_cpu(void) { }
 
 #endif /* CONFIG_NO_HZ_COMMON */
 
 /*
  * Called from irq_enter to notify about the possible interruption of idle()
  */
-void tick_check_idle(int cpu)
+void tick_check_idle(void)
 {
-       tick_check_oneshot_broadcast(cpu);
-       tick_check_nohz(cpu);
+       tick_check_oneshot_broadcast_this_cpu();
+       tick_check_nohz_this_cpu();
 }
 
 /*