2 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <trace/events/power.h>
18 struct sugov_tunables {
19 struct gov_attr_set attr_set;
20 unsigned int rate_limit_us;
24 struct cpufreq_policy *policy;
26 struct sugov_tunables *tunables;
27 struct list_head tunables_hook;
29 raw_spinlock_t update_lock; /* For shared policies */
30 u64 last_freq_update_time;
31 s64 freq_update_delay_ns;
32 unsigned int next_freq;
33 unsigned int cached_raw_freq;
35 /* The next fields are only needed if fast switch cannot be used: */
36 struct irq_work irq_work;
37 struct kthread_work work;
38 struct mutex work_lock;
39 struct kthread_worker worker;
40 struct task_struct *thread;
41 bool work_in_progress;
43 bool need_freq_update;
47 struct update_util_data update_util;
48 struct sugov_policy *sg_policy;
51 bool iowait_boost_pending;
52 unsigned int iowait_boost;
53 unsigned int iowait_boost_max;
56 /* The fields below are only needed when sharing a policy: */
57 unsigned long util_cfs;
58 unsigned long util_dl;
61 /* The field below is for single-CPU policies only: */
62 #ifdef CONFIG_NO_HZ_COMMON
63 unsigned long saved_idle_calls;
67 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
69 /************************ Governor internals ***********************/
71 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
76 * Since cpufreq_update_util() is called with rq->lock held for
77 * the @target_cpu, our per-CPU data is fully serialized.
79 * However, drivers cannot in general deal with cross-CPU
80 * requests, so while get_next_freq() will work, our
81 * sugov_update_commit() call may not for the fast switching platforms.
83 * Hence stop here for remote requests if they aren't supported
84 * by the hardware, as calculating the frequency is pointless if
85 * we cannot in fact act on it.
87 * For the slow switching platforms, the kthread is always scheduled on
88 * the right set of CPUs and any CPU can find the next frequency and
89 * schedule the kthread.
91 if (sg_policy->policy->fast_switch_enabled &&
92 !cpufreq_can_do_remote_dvfs(sg_policy->policy))
95 if (sg_policy->work_in_progress)
98 if (unlikely(sg_policy->need_freq_update)) {
99 sg_policy->need_freq_update = false;
101 * This happens when limits change, so forget the previous
102 * next_freq value and force an update.
104 sg_policy->next_freq = UINT_MAX;
108 delta_ns = time - sg_policy->last_freq_update_time;
110 return delta_ns >= sg_policy->freq_update_delay_ns;
113 static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
114 unsigned int next_freq)
116 struct cpufreq_policy *policy = sg_policy->policy;
118 if (sg_policy->next_freq == next_freq)
121 sg_policy->next_freq = next_freq;
122 sg_policy->last_freq_update_time = time;
124 if (policy->fast_switch_enabled) {
125 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
129 policy->cur = next_freq;
130 trace_cpu_frequency(next_freq, smp_processor_id());
132 sg_policy->work_in_progress = true;
133 irq_work_queue(&sg_policy->irq_work);
138 * get_next_freq - Compute a new frequency for a given cpufreq policy.
139 * @sg_policy: schedutil policy object to compute the new frequency for.
140 * @util: Current CPU utilization.
141 * @max: CPU capacity.
143 * If the utilization is frequency-invariant, choose the new frequency to be
144 * proportional to it, that is
146 * next_freq = C * max_freq * util / max
148 * Otherwise, approximate the would-be frequency-invariant utilization by
149 * util_raw * (curr_freq / max_freq) which leads to
151 * next_freq = C * curr_freq * util_raw / max
153 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
155 * The lowest driver-supported frequency which is equal or greater than the raw
156 * next_freq (as calculated above) is returned, subject to policy min/max and
157 * cpufreq driver limitations.
159 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
160 unsigned long util, unsigned long max)
162 struct cpufreq_policy *policy = sg_policy->policy;
163 unsigned int freq = arch_scale_freq_invariant() ?
164 policy->cpuinfo.max_freq : policy->cur;
166 freq = (freq + (freq >> 2)) * util / max;
168 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
169 return sg_policy->next_freq;
170 sg_policy->cached_raw_freq = freq;
171 return cpufreq_driver_resolve_freq(policy, freq);
174 static void sugov_get_util(struct sugov_cpu *sg_cpu)
176 struct rq *rq = cpu_rq(sg_cpu->cpu);
178 sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
179 sg_cpu->util_cfs = cpu_util_cfs(rq);
180 sg_cpu->util_dl = cpu_util_dl(rq);
183 static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
185 struct rq *rq = cpu_rq(sg_cpu->cpu);
187 if (rq->rt.rt_nr_running)
191 * Utilization required by DEADLINE must always be granted while, for
192 * FAIR, we use blocked utilization of IDLE CPUs as a mechanism to
193 * gracefully reduce the frequency when no tasks show up for longer
196 * Ideally we would like to set util_dl as min/guaranteed freq and
197 * util_cfs + util_dl as requested freq. However, cpufreq is not yet
198 * ready for such an interface. So, we only do the latter for now.
200 return min(sg_cpu->max, (sg_cpu->util_dl + sg_cpu->util_cfs));
203 static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags)
205 if (flags & SCHED_CPUFREQ_IOWAIT) {
206 if (sg_cpu->iowait_boost_pending)
209 sg_cpu->iowait_boost_pending = true;
211 if (sg_cpu->iowait_boost) {
212 sg_cpu->iowait_boost <<= 1;
213 if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
214 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
216 sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
218 } else if (sg_cpu->iowait_boost) {
219 s64 delta_ns = time - sg_cpu->last_update;
221 /* Clear iowait_boost if the CPU apprears to have been idle. */
222 if (delta_ns > TICK_NSEC) {
223 sg_cpu->iowait_boost = 0;
224 sg_cpu->iowait_boost_pending = false;
229 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
232 unsigned int boost_util, boost_max;
234 if (!sg_cpu->iowait_boost)
237 if (sg_cpu->iowait_boost_pending) {
238 sg_cpu->iowait_boost_pending = false;
240 sg_cpu->iowait_boost >>= 1;
241 if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
242 sg_cpu->iowait_boost = 0;
247 boost_util = sg_cpu->iowait_boost;
248 boost_max = sg_cpu->iowait_boost_max;
250 if (*util * boost_max < *max * boost_util) {
256 #ifdef CONFIG_NO_HZ_COMMON
257 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
259 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
260 bool ret = idle_calls == sg_cpu->saved_idle_calls;
262 sg_cpu->saved_idle_calls = idle_calls;
266 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
267 #endif /* CONFIG_NO_HZ_COMMON */
270 * Make sugov_should_update_freq() ignore the rate limit when DL
271 * has increased the utilization.
273 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
275 if (cpu_util_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->util_dl)
276 sg_policy->need_freq_update = true;
279 static void sugov_update_single(struct update_util_data *hook, u64 time,
282 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
283 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
284 unsigned long util, max;
288 sugov_set_iowait_boost(sg_cpu, time, flags);
289 sg_cpu->last_update = time;
291 ignore_dl_rate_limit(sg_cpu, sg_policy);
293 if (!sugov_should_update_freq(sg_policy, time))
296 busy = sugov_cpu_is_busy(sg_cpu);
298 sugov_get_util(sg_cpu);
300 util = sugov_aggregate_util(sg_cpu);
301 sugov_iowait_boost(sg_cpu, &util, &max);
302 next_f = get_next_freq(sg_policy, util, max);
304 * Do not reduce the frequency if the CPU has not been idle
305 * recently, as the reduction is likely to be premature then.
307 if (busy && next_f < sg_policy->next_freq &&
308 sg_policy->next_freq != UINT_MAX) {
309 next_f = sg_policy->next_freq;
311 /* Reset cached freq as next_freq has changed */
312 sg_policy->cached_raw_freq = 0;
315 sugov_update_commit(sg_policy, time, next_f);
318 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
320 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
321 struct cpufreq_policy *policy = sg_policy->policy;
322 unsigned long util = 0, max = 1;
325 for_each_cpu(j, policy->cpus) {
326 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
327 unsigned long j_util, j_max;
330 sugov_get_util(j_sg_cpu);
333 * If the CFS CPU utilization was last updated before the
334 * previous frequency update and the time elapsed between the
335 * last update of the CPU utilization and the last frequency
336 * update is long enough, reset iowait_boost and util_cfs, as
337 * they are now probably stale. However, still consider the
338 * CPU contribution if it has some DEADLINE utilization
341 delta_ns = time - j_sg_cpu->last_update;
342 if (delta_ns > TICK_NSEC) {
343 j_sg_cpu->iowait_boost = 0;
344 j_sg_cpu->iowait_boost_pending = false;
347 j_max = j_sg_cpu->max;
348 j_util = sugov_aggregate_util(j_sg_cpu);
349 sugov_iowait_boost(j_sg_cpu, &j_util, &j_max);
350 if (j_util * max > j_max * util) {
356 return get_next_freq(sg_policy, util, max);
360 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
362 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
363 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
366 raw_spin_lock(&sg_policy->update_lock);
368 sugov_set_iowait_boost(sg_cpu, time, flags);
369 sg_cpu->last_update = time;
371 ignore_dl_rate_limit(sg_cpu, sg_policy);
373 if (sugov_should_update_freq(sg_policy, time)) {
374 next_f = sugov_next_freq_shared(sg_cpu, time);
375 sugov_update_commit(sg_policy, time, next_f);
378 raw_spin_unlock(&sg_policy->update_lock);
381 static void sugov_work(struct kthread_work *work)
383 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
385 mutex_lock(&sg_policy->work_lock);
386 __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
388 mutex_unlock(&sg_policy->work_lock);
390 sg_policy->work_in_progress = false;
393 static void sugov_irq_work(struct irq_work *irq_work)
395 struct sugov_policy *sg_policy;
397 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
399 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
402 /************************** sysfs interface ************************/
404 static struct sugov_tunables *global_tunables;
405 static DEFINE_MUTEX(global_tunables_lock);
407 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
409 return container_of(attr_set, struct sugov_tunables, attr_set);
412 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
414 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
416 return sprintf(buf, "%u\n", tunables->rate_limit_us);
420 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
422 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
423 struct sugov_policy *sg_policy;
424 unsigned int rate_limit_us;
426 if (kstrtouint(buf, 10, &rate_limit_us))
429 tunables->rate_limit_us = rate_limit_us;
431 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
432 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
437 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
439 static struct attribute *sugov_attributes[] = {
444 static struct kobj_type sugov_tunables_ktype = {
445 .default_attrs = sugov_attributes,
446 .sysfs_ops = &governor_sysfs_ops,
449 /********************** cpufreq governor interface *********************/
451 static struct cpufreq_governor schedutil_gov;
453 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
455 struct sugov_policy *sg_policy;
457 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
461 sg_policy->policy = policy;
462 raw_spin_lock_init(&sg_policy->update_lock);
466 static void sugov_policy_free(struct sugov_policy *sg_policy)
471 static int sugov_kthread_create(struct sugov_policy *sg_policy)
473 struct task_struct *thread;
474 struct sched_attr attr = {
475 .size = sizeof(struct sched_attr),
476 .sched_policy = SCHED_DEADLINE,
477 .sched_flags = SCHED_FLAG_SUGOV,
481 * Fake (unused) bandwidth; workaround to "fix"
482 * priority inheritance.
484 .sched_runtime = 1000000,
485 .sched_deadline = 10000000,
486 .sched_period = 10000000,
488 struct cpufreq_policy *policy = sg_policy->policy;
491 /* kthread only required for slow path */
492 if (policy->fast_switch_enabled)
495 kthread_init_work(&sg_policy->work, sugov_work);
496 kthread_init_worker(&sg_policy->worker);
497 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
499 cpumask_first(policy->related_cpus));
500 if (IS_ERR(thread)) {
501 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
502 return PTR_ERR(thread);
505 ret = sched_setattr_nocheck(thread, &attr);
507 kthread_stop(thread);
508 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
512 sg_policy->thread = thread;
514 /* Kthread is bound to all CPUs by default */
515 if (!policy->dvfs_possible_from_any_cpu)
516 kthread_bind_mask(thread, policy->related_cpus);
518 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
519 mutex_init(&sg_policy->work_lock);
521 wake_up_process(thread);
526 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
528 /* kthread only required for slow path */
529 if (sg_policy->policy->fast_switch_enabled)
532 kthread_flush_worker(&sg_policy->worker);
533 kthread_stop(sg_policy->thread);
534 mutex_destroy(&sg_policy->work_lock);
537 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
539 struct sugov_tunables *tunables;
541 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
543 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
544 if (!have_governor_per_policy())
545 global_tunables = tunables;
550 static void sugov_tunables_free(struct sugov_tunables *tunables)
552 if (!have_governor_per_policy())
553 global_tunables = NULL;
558 static int sugov_init(struct cpufreq_policy *policy)
560 struct sugov_policy *sg_policy;
561 struct sugov_tunables *tunables;
564 /* State should be equivalent to EXIT */
565 if (policy->governor_data)
568 cpufreq_enable_fast_switch(policy);
570 sg_policy = sugov_policy_alloc(policy);
573 goto disable_fast_switch;
576 ret = sugov_kthread_create(sg_policy);
580 mutex_lock(&global_tunables_lock);
582 if (global_tunables) {
583 if (WARN_ON(have_governor_per_policy())) {
587 policy->governor_data = sg_policy;
588 sg_policy->tunables = global_tunables;
590 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
594 tunables = sugov_tunables_alloc(sg_policy);
600 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
602 policy->governor_data = sg_policy;
603 sg_policy->tunables = tunables;
605 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
606 get_governor_parent_kobj(policy), "%s",
612 mutex_unlock(&global_tunables_lock);
616 policy->governor_data = NULL;
617 sugov_tunables_free(tunables);
620 sugov_kthread_stop(sg_policy);
621 mutex_unlock(&global_tunables_lock);
624 sugov_policy_free(sg_policy);
627 cpufreq_disable_fast_switch(policy);
629 pr_err("initialization failed (error %d)\n", ret);
633 static void sugov_exit(struct cpufreq_policy *policy)
635 struct sugov_policy *sg_policy = policy->governor_data;
636 struct sugov_tunables *tunables = sg_policy->tunables;
639 mutex_lock(&global_tunables_lock);
641 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
642 policy->governor_data = NULL;
644 sugov_tunables_free(tunables);
646 mutex_unlock(&global_tunables_lock);
648 sugov_kthread_stop(sg_policy);
649 sugov_policy_free(sg_policy);
650 cpufreq_disable_fast_switch(policy);
653 static int sugov_start(struct cpufreq_policy *policy)
655 struct sugov_policy *sg_policy = policy->governor_data;
658 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
659 sg_policy->last_freq_update_time = 0;
660 sg_policy->next_freq = UINT_MAX;
661 sg_policy->work_in_progress = false;
662 sg_policy->need_freq_update = false;
663 sg_policy->cached_raw_freq = 0;
665 for_each_cpu(cpu, policy->cpus) {
666 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
668 memset(sg_cpu, 0, sizeof(*sg_cpu));
670 sg_cpu->sg_policy = sg_policy;
671 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
674 for_each_cpu(cpu, policy->cpus) {
675 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
677 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
678 policy_is_shared(policy) ?
679 sugov_update_shared :
680 sugov_update_single);
685 static void sugov_stop(struct cpufreq_policy *policy)
687 struct sugov_policy *sg_policy = policy->governor_data;
690 for_each_cpu(cpu, policy->cpus)
691 cpufreq_remove_update_util_hook(cpu);
695 if (!policy->fast_switch_enabled) {
696 irq_work_sync(&sg_policy->irq_work);
697 kthread_cancel_work_sync(&sg_policy->work);
701 static void sugov_limits(struct cpufreq_policy *policy)
703 struct sugov_policy *sg_policy = policy->governor_data;
705 if (!policy->fast_switch_enabled) {
706 mutex_lock(&sg_policy->work_lock);
707 cpufreq_policy_apply_limits(policy);
708 mutex_unlock(&sg_policy->work_lock);
711 sg_policy->need_freq_update = true;
714 static struct cpufreq_governor schedutil_gov = {
716 .owner = THIS_MODULE,
717 .dynamic_switching = true,
720 .start = sugov_start,
722 .limits = sugov_limits,
725 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
726 struct cpufreq_governor *cpufreq_default_governor(void)
728 return &schedutil_gov;
732 static int __init sugov_register(void)
734 return cpufreq_register_governor(&schedutil_gov);
736 fs_initcall(sugov_register);