Merge branch 'pm-cpufreq'
[linux-2.6-microblaze.git] / kernel / time / tick-sched.c
index 81632cd..0302829 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/sched/clock.h>
 #include <linux/sched/stat.h>
 #include <linux/sched/nohz.h>
+#include <linux/sched/loadavg.h>
 #include <linux/module.h>
 #include <linux/irq_work.h>
 #include <linux/posix-timers.h>
@@ -44,7 +45,9 @@ struct tick_sched *tick_get_tick_sched(int cpu)
 
 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
 /*
- * The time, when the last jiffy update happened. Protected by jiffies_lock.
+ * The time, when the last jiffy update happened. Write access must hold
+ * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a
+ * consistent view of jiffies and last_jiffies_update.
  */
 static ktime_t last_jiffies_update;
 
@@ -53,50 +56,97 @@ static ktime_t last_jiffies_update;
  */
 static void tick_do_update_jiffies64(ktime_t now)
 {
-       unsigned long ticks = 0;
-       ktime_t delta;
+       unsigned long ticks = 1;
+       ktime_t delta, nextp;
 
        /*
-        * Do a quick check without holding jiffies_lock:
-        * The READ_ONCE() pairs with two updates done later in this function.
+        * 64bit can do a quick check without holding jiffies lock and
+        * without looking at the sequence count. The smp_load_acquire()
+        * pairs with the update done later in this function.
+        *
+        * 32bit cannot do that because the store of tick_next_period
+        * consists of two 32bit stores and the first store could move it
+        * to a random point in the future.
         */
-       delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
-       if (delta < tick_period)
-               return;
+       if (IS_ENABLED(CONFIG_64BIT)) {
+               if (ktime_before(now, smp_load_acquire(&tick_next_period)))
+                       return;
+       } else {
+               unsigned int seq;
 
-       /* Reevaluate with jiffies_lock held */
+               /*
+                * Avoid contention on jiffies_lock and protect the quick
+                * check with the sequence count.
+                */
+               do {
+                       seq = read_seqcount_begin(&jiffies_seq);
+                       nextp = tick_next_period;
+               } while (read_seqcount_retry(&jiffies_seq, seq));
+
+               if (ktime_before(now, nextp))
+                       return;
+       }
+
+       /* Quick check failed, i.e. update is required. */
        raw_spin_lock(&jiffies_lock);
+       /*
+        * Reevaluate with the lock held. Another CPU might have done the
+        * update already.
+        */
+       if (ktime_before(now, tick_next_period)) {
+               raw_spin_unlock(&jiffies_lock);
+               return;
+       }
+
        write_seqcount_begin(&jiffies_seq);
 
-       delta = ktime_sub(now, last_jiffies_update);
-       if (delta >= tick_period) {
+       delta = ktime_sub(now, tick_next_period);
+       if (unlikely(delta >= TICK_NSEC)) {
+               /* Slow path for long idle sleep times */
+               s64 incr = TICK_NSEC;
 
-               delta = ktime_sub(delta, tick_period);
-               /* Pairs with the lockless read in this function. */
-               WRITE_ONCE(last_jiffies_update,
-                          ktime_add(last_jiffies_update, tick_period));
+               ticks += ktime_divns(delta, incr);
 
-               /* Slow path for long timeouts */
-               if (unlikely(delta >= tick_period)) {
-                       s64 incr = ktime_to_ns(tick_period);
+               last_jiffies_update = ktime_add_ns(last_jiffies_update,
+                                                  incr * ticks);
+       } else {
+               last_jiffies_update = ktime_add_ns(last_jiffies_update,
+                                                  TICK_NSEC);
+       }
 
-                       ticks = ktime_divns(delta, incr);
+       /* Advance jiffies to complete the jiffies_seq protected job */
+       jiffies_64 += ticks;
 
-                       /* Pairs with the lockless read in this function. */
-                       WRITE_ONCE(last_jiffies_update,
-                                  ktime_add_ns(last_jiffies_update,
-                                               incr * ticks));
-               }
-               do_timer(++ticks);
+       /*
+        * Keep the tick_next_period variable up to date.
+        */
+       nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC);
 
-               /* Keep the tick_next_period variable up to date */
-               tick_next_period = ktime_add(last_jiffies_update, tick_period);
+       if (IS_ENABLED(CONFIG_64BIT)) {
+               /*
+                * Pairs with smp_load_acquire() in the lockless quick
+                * check above and ensures that the update to jiffies_64 is
+                * not reordered vs. the store to tick_next_period, neither
+                * by the compiler nor by the CPU.
+                */
+               smp_store_release(&tick_next_period, nextp);
        } else {
-               write_seqcount_end(&jiffies_seq);
-               raw_spin_unlock(&jiffies_lock);
-               return;
+               /*
+                * A plain store is good enough on 32bit as the quick check
+                * above is protected by the sequence count.
+                */
+               tick_next_period = nextp;
        }
+
+       /*
+        * Release the sequence count. calc_global_load() below is not
+        * protected by it, but jiffies_lock needs to be held to prevent
+        * concurrent invocations.
+        */
        write_seqcount_end(&jiffies_seq);
+
+       calc_global_load();
+
        raw_spin_unlock(&jiffies_lock);
        update_wall_time();
 }
@@ -243,10 +293,8 @@ static void nohz_full_kick_func(struct irq_work *work)
        /* Empty, the tick restart happens on tick_nohz_irq_exit() */
 }
 
-static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
-       .func = nohz_full_kick_func,
-       .flags = ATOMIC_INIT(IRQ_WORK_HARD_IRQ),
-};
+static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) =
+       IRQ_WORK_INIT_HARD(nohz_full_kick_func);
 
 /*
  * Kick this CPU if it's full dynticks in order to force it to
@@ -661,7 +709,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
        hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
 
        /* Forward the time to expire in the future */
-       hrtimer_forward(&ts->sched_timer, now, tick_period);
+       hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
 
        if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
                hrtimer_start_expires(&ts->sched_timer,
@@ -1230,7 +1278,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
        if (unlikely(ts->tick_stopped))
                return;
 
-       hrtimer_forward(&ts->sched_timer, now, tick_period);
+       hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
        tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
 }
 
@@ -1267,7 +1315,7 @@ static void tick_nohz_switch_to_nohz(void)
        next = tick_init_jiffy_update();
 
        hrtimer_set_expires(&ts->sched_timer, next);
-       hrtimer_forward_now(&ts->sched_timer, tick_period);
+       hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
        tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
        tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
 }
@@ -1333,7 +1381,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
        if (unlikely(ts->tick_stopped))
                return HRTIMER_NORESTART;
 
-       hrtimer_forward(timer, now, tick_period);
+       hrtimer_forward(timer, now, TICK_NSEC);
 
        return HRTIMER_RESTART;
 }
@@ -1367,13 +1415,13 @@ void tick_setup_sched_timer(void)
 
        /* Offset the tick to avert jiffies_lock contention. */
        if (sched_skew_tick) {
-               u64 offset = ktime_to_ns(tick_period) >> 1;
+               u64 offset = TICK_NSEC >> 1;
                do_div(offset, num_possible_cpus());
                offset *= smp_processor_id();
                hrtimer_add_expires_ns(&ts->sched_timer, offset);
        }
 
-       hrtimer_forward(&ts->sched_timer, now, tick_period);
+       hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
        hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
        tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
 }