sched/core: Give DCE a fighting chance
authorPeter Zijlstra <peterz@infradead.org>
Wed, 5 Dec 2018 10:23:56 +0000 (11:23 +0100)
committerIngo Molnar <mingo@kernel.org>
Sun, 27 Jan 2019 11:29:37 +0000 (12:29 +0100)
All that fancy new Energy-Aware scheduling foo is hidden behind a
static_key, which is awesome if you have the stuff enabled in your
config.

However, when you lack all the prerequisites it doesn't make any sense
to pretend we'll ever actually run this, so provide a little more clue
to the compiler so it can more agressively delete the code.

   text    data     bss     dec     hex filename
  50297     976      96   51369    c8a9 defconfig-build/kernel/sched/fair.o
  49227     944      96   50267    c45b defconfig-build/kernel/sched/fair.o

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/fair.c
kernel/sched/sched.h
kernel/sched/topology.c

index 50aa2ab..b1374fb 100644 (file)
@@ -6607,7 +6607,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
        if (sd_flag & SD_BALANCE_WAKE) {
                record_wakee(p);
 
-               if (static_branch_unlikely(&sched_energy_present)) {
+               if (sched_energy_enabled()) {
                        new_cpu = find_energy_efficient_cpu(p, prev_cpu);
                        if (new_cpu >= 0)
                                return new_cpu;
@@ -8635,7 +8635,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
         */
        update_sd_lb_stats(env, &sds);
 
-       if (static_branch_unlikely(&sched_energy_present)) {
+       if (sched_energy_enabled()) {
                struct root_domain *rd = env->dst_rq->rd;
 
                if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
index d04530b..d27c1a5 100644 (file)
@@ -2299,11 +2299,19 @@ unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned
 #endif
 
 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+
 #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
-#else
+
+DECLARE_STATIC_KEY_FALSE(sched_energy_present);
+
+static inline bool sched_energy_enabled(void)
+{
+       return static_branch_unlikely(&sched_energy_present);
+}
+
+#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */
+
 #define perf_domain_span(pd) NULL
-#endif
+static inline bool sched_energy_enabled(void) { return false; }
 
-#ifdef CONFIG_SMP
-extern struct static_key_false sched_energy_present;
-#endif
+#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
index 50c3fc3..4ae9403 100644 (file)
@@ -201,8 +201,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
        return 1;
 }
 
-DEFINE_STATIC_KEY_FALSE(sched_energy_present);
 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+DEFINE_STATIC_KEY_FALSE(sched_energy_present);
 unsigned int sysctl_sched_energy_aware = 1;
 DEFINE_MUTEX(sched_energy_mutex);
 bool sched_energy_update;