1 // SPDX-License-Identifier: GPL-2.0
3 * CPUFreq governor based on scheduler-provided CPU utilization data.
5 * Copyright (C) 2016, Intel Corporation
6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/sched/cpufreq.h>
14 #include <trace/events/power.h>
16 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
18 struct sugov_tunables {
19 struct gov_attr_set attr_set;
20 unsigned int rate_limit_us;
24 struct cpufreq_policy *policy;
26 struct sugov_tunables *tunables;
27 struct list_head tunables_hook;
29 raw_spinlock_t update_lock;
30 u64 last_freq_update_time;
31 s64 freq_update_delay_ns;
32 unsigned int next_freq;
33 unsigned int cached_raw_freq;
35 /* The next fields are only needed if fast switch cannot be used: */
36 struct irq_work irq_work;
37 struct kthread_work work;
38 struct mutex work_lock;
39 struct kthread_worker worker;
40 struct task_struct *thread;
41 bool work_in_progress;
44 bool need_freq_update;
48 struct update_util_data update_util;
49 struct sugov_policy *sg_policy;
52 bool iowait_boost_pending;
53 unsigned int iowait_boost;
60 /* The field below is for single-CPU policies only: */
61 #ifdef CONFIG_NO_HZ_COMMON
62 unsigned long saved_idle_calls;
66 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
68 /************************ Governor internals ***********************/
70 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
75 * Since cpufreq_update_util() is called with rq->lock held for
76 * the @target_cpu, our per-CPU data is fully serialized.
78 * However, drivers cannot in general deal with cross-CPU
79 * requests, so while get_next_freq() will work, our
80 * sugov_update_commit() call may not for the fast switching platforms.
82 * Hence stop here for remote requests if they aren't supported
83 * by the hardware, as calculating the frequency is pointless if
84 * we cannot in fact act on it.
86 * This is needed on the slow switching platforms too to prevent CPUs
87 * going offline from leaving stale IRQ work items behind.
89 if (!cpufreq_this_cpu_can_update(sg_policy->policy))
92 if (unlikely(sg_policy->limits_changed)) {
93 sg_policy->limits_changed = false;
94 sg_policy->need_freq_update = true;
98 delta_ns = time - sg_policy->last_freq_update_time;
100 return delta_ns >= sg_policy->freq_update_delay_ns;
103 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
104 unsigned int next_freq)
106 if (sg_policy->need_freq_update)
107 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
108 else if (sg_policy->next_freq == next_freq)
111 sg_policy->next_freq = next_freq;
112 sg_policy->last_freq_update_time = time;
117 static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
118 unsigned int next_freq)
120 if (sugov_update_next_freq(sg_policy, time, next_freq))
121 cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
124 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
125 unsigned int next_freq)
127 if (!sugov_update_next_freq(sg_policy, time, next_freq))
130 if (!sg_policy->work_in_progress) {
131 sg_policy->work_in_progress = true;
132 irq_work_queue(&sg_policy->irq_work);
137 * get_next_freq - Compute a new frequency for a given cpufreq policy.
138 * @sg_policy: schedutil policy object to compute the new frequency for.
139 * @util: Current CPU utilization.
140 * @max: CPU capacity.
142 * If the utilization is frequency-invariant, choose the new frequency to be
143 * proportional to it, that is
145 * next_freq = C * max_freq * util / max
147 * Otherwise, approximate the would-be frequency-invariant utilization by
148 * util_raw * (curr_freq / max_freq) which leads to
150 * next_freq = C * curr_freq * util_raw / max
152 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
154 * The lowest driver-supported frequency which is equal or greater than the raw
155 * next_freq (as calculated above) is returned, subject to policy min/max and
156 * cpufreq driver limitations.
158 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
159 unsigned long util, unsigned long max)
161 struct cpufreq_policy *policy = sg_policy->policy;
162 unsigned int freq = arch_scale_freq_invariant() ?
163 policy->cpuinfo.max_freq : policy->cur;
165 freq = map_util_freq(util, freq, max);
167 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
168 return sg_policy->next_freq;
170 sg_policy->cached_raw_freq = freq;
171 return cpufreq_driver_resolve_freq(policy, freq);
175 * This function computes an effective utilization for the given CPU, to be
176 * used for frequency selection given the linear relation: f = u * f_max.
178 * The scheduler tracks the following metrics:
180 * cpu_util_{cfs,rt,dl,irq}()
183 * Where the cfs,rt and dl util numbers are tracked with the same metric and
184 * synchronized windows and are thus directly comparable.
186 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
187 * which excludes things like IRQ and steal-time. These latter are then accrued
188 * in the irq utilization.
190 * The DL bandwidth number otoh is not a measured metric but a value computed
191 * based on the task model parameters and gives the minimal utilization
192 * required to meet deadlines.
194 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
195 unsigned long max, enum schedutil_type type,
196 struct task_struct *p)
198 unsigned long dl_util, util, irq;
199 struct rq *rq = cpu_rq(cpu);
201 if (!uclamp_is_used() &&
202 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
207 * Early check to see if IRQ/steal time saturates the CPU, can be
208 * because of inaccuracies in how we track these -- see
209 * update_irq_load_avg().
211 irq = cpu_util_irq(rq);
212 if (unlikely(irq >= max))
216 * Because the time spend on RT/DL tasks is visible as 'lost' time to
217 * CFS tasks and we use the same metric to track the effective
218 * utilization (PELT windows are synchronized) we can directly add them
219 * to obtain the CPU's actual utilization.
221 * CFS and RT utilization can be boosted or capped, depending on
222 * utilization clamp constraints requested by currently RUNNABLE
224 * When there are no CFS RUNNABLE tasks, clamps are released and
225 * frequency will be gracefully reduced with the utilization decay.
227 util = util_cfs + cpu_util_rt(rq);
228 if (type == FREQUENCY_UTIL)
229 util = uclamp_rq_util_with(rq, util, p);
231 dl_util = cpu_util_dl(rq);
234 * For frequency selection we do not make cpu_util_dl() a permanent part
235 * of this sum because we want to use cpu_bw_dl() later on, but we need
236 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
237 * that we select f_max when there is no idle time.
239 * NOTE: numerical errors or stop class might cause us to not quite hit
240 * saturation when we should -- something for later.
242 if (util + dl_util >= max)
246 * OTOH, for energy computation we need the estimated running time, so
247 * include util_dl and ignore dl_bw.
249 if (type == ENERGY_UTIL)
253 * There is still idle time; further improve the number by using the
254 * irq metric. Because IRQ/steal time is hidden from the task clock we
255 * need to scale the task numbers:
258 * U' = irq + --------- * U
261 util = scale_irq_capacity(util, irq, max);
265 * Bandwidth required by DEADLINE must always be granted while, for
266 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
267 * to gracefully reduce the frequency when no tasks show up for longer
270 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
271 * bw_dl as requested freq. However, cpufreq is not yet ready for such
272 * an interface. So, we only do the latter for now.
274 if (type == FREQUENCY_UTIL)
275 util += cpu_bw_dl(rq);
277 return min(max, util);
280 static void sugov_get_util(struct sugov_cpu *sg_cpu)
282 struct rq *rq = cpu_rq(sg_cpu->cpu);
283 unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
286 sg_cpu->bw_dl = cpu_bw_dl(rq);
287 sg_cpu->util = schedutil_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max,
288 FREQUENCY_UTIL, NULL);
292 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
293 * @sg_cpu: the sugov data for the CPU to boost
294 * @time: the update time from the caller
295 * @set_iowait_boost: true if an IO boost has been requested
297 * The IO wait boost of a task is disabled after a tick since the last update
298 * of a CPU. If a new IO wait boost is requested after more then a tick, then
299 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
300 * efficiency by ignoring sporadic wakeups from IO.
302 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
303 bool set_iowait_boost)
305 s64 delta_ns = time - sg_cpu->last_update;
307 /* Reset boost only if a tick has elapsed since last request */
308 if (delta_ns <= TICK_NSEC)
311 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
312 sg_cpu->iowait_boost_pending = set_iowait_boost;
318 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
319 * @sg_cpu: the sugov data for the CPU to boost
320 * @time: the update time from the caller
321 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
323 * Each time a task wakes up after an IO operation, the CPU utilization can be
324 * boosted to a certain utilization which doubles at each "frequent and
325 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
326 * of the maximum OPP.
328 * To keep doubling, an IO boost has to be requested at least once per tick,
329 * otherwise we restart from the utilization of the minimum OPP.
331 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
334 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
336 /* Reset boost if the CPU appears to have been idle enough */
337 if (sg_cpu->iowait_boost &&
338 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
341 /* Boost only tasks waking up after IO */
342 if (!set_iowait_boost)
345 /* Ensure boost doubles only one time at each request */
346 if (sg_cpu->iowait_boost_pending)
348 sg_cpu->iowait_boost_pending = true;
350 /* Double the boost at each request */
351 if (sg_cpu->iowait_boost) {
352 sg_cpu->iowait_boost =
353 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
357 /* First wakeup after IO: start with minimum boost */
358 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
362 * sugov_iowait_apply() - Apply the IO boost to a CPU.
363 * @sg_cpu: the sugov data for the cpu to boost
364 * @time: the update time from the caller
366 * A CPU running a task which woken up after an IO operation can have its
367 * utilization boosted to speed up the completion of those IO operations.
368 * The IO boost value is increased each time a task wakes up from IO, in
369 * sugov_iowait_apply(), and it's instead decreased by this function,
370 * each time an increase has not been requested (!iowait_boost_pending).
372 * A CPU which also appears to have been idle for at least one tick has also
373 * its IO boost utilization reset.
375 * This mechanism is designed to boost high frequently IO waiting tasks, while
376 * being more conservative on tasks which does sporadic IO operations.
378 static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
382 /* No boost currently required */
383 if (!sg_cpu->iowait_boost)
386 /* Reset boost if the CPU appears to have been idle enough */
387 if (sugov_iowait_reset(sg_cpu, time, false))
390 if (!sg_cpu->iowait_boost_pending) {
392 * No boost pending; reduce the boost value.
394 sg_cpu->iowait_boost >>= 1;
395 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
396 sg_cpu->iowait_boost = 0;
401 sg_cpu->iowait_boost_pending = false;
404 * sg_cpu->util is already in capacity scale; convert iowait_boost
405 * into the same scale so we can compare.
407 boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
408 if (sg_cpu->util < boost)
409 sg_cpu->util = boost;
412 #ifdef CONFIG_NO_HZ_COMMON
413 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
415 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
416 bool ret = idle_calls == sg_cpu->saved_idle_calls;
418 sg_cpu->saved_idle_calls = idle_calls;
422 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
423 #endif /* CONFIG_NO_HZ_COMMON */
426 * Make sugov_should_update_freq() ignore the rate limit when DL
427 * has increased the utilization.
429 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
431 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
432 sg_cpu->sg_policy->limits_changed = true;
435 static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
436 u64 time, unsigned int flags)
438 sugov_iowait_boost(sg_cpu, time, flags);
439 sg_cpu->last_update = time;
441 ignore_dl_rate_limit(sg_cpu);
443 if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
446 sugov_get_util(sg_cpu);
447 sugov_iowait_apply(sg_cpu, time);
452 static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
455 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
456 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
457 unsigned int cached_freq = sg_policy->cached_raw_freq;
460 if (!sugov_update_single_common(sg_cpu, time, flags))
463 next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
465 * Do not reduce the frequency if the CPU has not been idle
466 * recently, as the reduction is likely to be premature then.
468 if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
469 next_f = sg_policy->next_freq;
471 /* Restore cached freq as next_freq has changed */
472 sg_policy->cached_raw_freq = cached_freq;
476 * This code runs under rq->lock for the target CPU, so it won't run
477 * concurrently on two different CPUs for the same target and it is not
478 * necessary to acquire the lock in the fast switch case.
480 if (sg_policy->policy->fast_switch_enabled) {
481 sugov_fast_switch(sg_policy, time, next_f);
483 raw_spin_lock(&sg_policy->update_lock);
484 sugov_deferred_update(sg_policy, time, next_f);
485 raw_spin_unlock(&sg_policy->update_lock);
489 static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
492 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
493 unsigned long prev_util = sg_cpu->util;
496 * Fall back to the "frequency" path if frequency invariance is not
497 * supported, because the direct mapping between the utilization and
498 * the performance levels depends on the frequency invariance.
500 if (!arch_scale_freq_invariant()) {
501 sugov_update_single_freq(hook, time, flags);
505 if (!sugov_update_single_common(sg_cpu, time, flags))
509 * Do not reduce the target performance level if the CPU has not been
510 * idle recently, as the reduction is likely to be premature then.
512 if (sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
513 sg_cpu->util = prev_util;
515 cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
516 map_util_perf(sg_cpu->util), sg_cpu->max);
518 sg_cpu->sg_policy->last_freq_update_time = time;
521 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
523 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
524 struct cpufreq_policy *policy = sg_policy->policy;
525 unsigned long util = 0, max = 1;
528 for_each_cpu(j, policy->cpus) {
529 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
530 unsigned long j_util, j_max;
532 sugov_get_util(j_sg_cpu);
533 sugov_iowait_apply(j_sg_cpu, time);
534 j_util = j_sg_cpu->util;
535 j_max = j_sg_cpu->max;
537 if (j_util * max > j_max * util) {
543 return get_next_freq(sg_policy, util, max);
547 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
549 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
550 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
553 raw_spin_lock(&sg_policy->update_lock);
555 sugov_iowait_boost(sg_cpu, time, flags);
556 sg_cpu->last_update = time;
558 ignore_dl_rate_limit(sg_cpu);
560 if (sugov_should_update_freq(sg_policy, time)) {
561 next_f = sugov_next_freq_shared(sg_cpu, time);
563 if (sg_policy->policy->fast_switch_enabled)
564 sugov_fast_switch(sg_policy, time, next_f);
566 sugov_deferred_update(sg_policy, time, next_f);
569 raw_spin_unlock(&sg_policy->update_lock);
572 static void sugov_work(struct kthread_work *work)
574 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
579 * Hold sg_policy->update_lock shortly to handle the case where:
580 * incase sg_policy->next_freq is read here, and then updated by
581 * sugov_deferred_update() just before work_in_progress is set to false
582 * here, we may miss queueing the new update.
584 * Note: If a work was queued after the update_lock is released,
585 * sugov_work() will just be called again by kthread_work code; and the
586 * request will be proceed before the sugov thread sleeps.
588 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
589 freq = sg_policy->next_freq;
590 sg_policy->work_in_progress = false;
591 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
593 mutex_lock(&sg_policy->work_lock);
594 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
595 mutex_unlock(&sg_policy->work_lock);
598 static void sugov_irq_work(struct irq_work *irq_work)
600 struct sugov_policy *sg_policy;
602 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
604 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
607 /************************** sysfs interface ************************/
609 static struct sugov_tunables *global_tunables;
610 static DEFINE_MUTEX(global_tunables_lock);
612 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
614 return container_of(attr_set, struct sugov_tunables, attr_set);
617 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
619 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
621 return sprintf(buf, "%u\n", tunables->rate_limit_us);
625 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
627 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
628 struct sugov_policy *sg_policy;
629 unsigned int rate_limit_us;
631 if (kstrtouint(buf, 10, &rate_limit_us))
634 tunables->rate_limit_us = rate_limit_us;
636 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
637 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
642 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
644 static struct attribute *sugov_attrs[] = {
648 ATTRIBUTE_GROUPS(sugov);
650 static struct kobj_type sugov_tunables_ktype = {
651 .default_groups = sugov_groups,
652 .sysfs_ops = &governor_sysfs_ops,
655 /********************** cpufreq governor interface *********************/
657 struct cpufreq_governor schedutil_gov;
659 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
661 struct sugov_policy *sg_policy;
663 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
667 sg_policy->policy = policy;
668 raw_spin_lock_init(&sg_policy->update_lock);
672 static void sugov_policy_free(struct sugov_policy *sg_policy)
677 static int sugov_kthread_create(struct sugov_policy *sg_policy)
679 struct task_struct *thread;
680 struct sched_attr attr = {
681 .size = sizeof(struct sched_attr),
682 .sched_policy = SCHED_DEADLINE,
683 .sched_flags = SCHED_FLAG_SUGOV,
687 * Fake (unused) bandwidth; workaround to "fix"
688 * priority inheritance.
690 .sched_runtime = 1000000,
691 .sched_deadline = 10000000,
692 .sched_period = 10000000,
694 struct cpufreq_policy *policy = sg_policy->policy;
697 /* kthread only required for slow path */
698 if (policy->fast_switch_enabled)
701 kthread_init_work(&sg_policy->work, sugov_work);
702 kthread_init_worker(&sg_policy->worker);
703 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
705 cpumask_first(policy->related_cpus));
706 if (IS_ERR(thread)) {
707 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
708 return PTR_ERR(thread);
711 ret = sched_setattr_nocheck(thread, &attr);
713 kthread_stop(thread);
714 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
718 sg_policy->thread = thread;
719 kthread_bind_mask(thread, policy->related_cpus);
720 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
721 mutex_init(&sg_policy->work_lock);
723 wake_up_process(thread);
728 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
730 /* kthread only required for slow path */
731 if (sg_policy->policy->fast_switch_enabled)
734 kthread_flush_worker(&sg_policy->worker);
735 kthread_stop(sg_policy->thread);
736 mutex_destroy(&sg_policy->work_lock);
739 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
741 struct sugov_tunables *tunables;
743 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
745 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
746 if (!have_governor_per_policy())
747 global_tunables = tunables;
752 static void sugov_tunables_free(struct sugov_tunables *tunables)
754 if (!have_governor_per_policy())
755 global_tunables = NULL;
760 static int sugov_init(struct cpufreq_policy *policy)
762 struct sugov_policy *sg_policy;
763 struct sugov_tunables *tunables;
766 /* State should be equivalent to EXIT */
767 if (policy->governor_data)
770 cpufreq_enable_fast_switch(policy);
772 sg_policy = sugov_policy_alloc(policy);
775 goto disable_fast_switch;
778 ret = sugov_kthread_create(sg_policy);
782 mutex_lock(&global_tunables_lock);
784 if (global_tunables) {
785 if (WARN_ON(have_governor_per_policy())) {
789 policy->governor_data = sg_policy;
790 sg_policy->tunables = global_tunables;
792 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
796 tunables = sugov_tunables_alloc(sg_policy);
802 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
804 policy->governor_data = sg_policy;
805 sg_policy->tunables = tunables;
807 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
808 get_governor_parent_kobj(policy), "%s",
814 mutex_unlock(&global_tunables_lock);
818 kobject_put(&tunables->attr_set.kobj);
819 policy->governor_data = NULL;
820 sugov_tunables_free(tunables);
823 sugov_kthread_stop(sg_policy);
824 mutex_unlock(&global_tunables_lock);
827 sugov_policy_free(sg_policy);
830 cpufreq_disable_fast_switch(policy);
832 pr_err("initialization failed (error %d)\n", ret);
836 static void sugov_exit(struct cpufreq_policy *policy)
838 struct sugov_policy *sg_policy = policy->governor_data;
839 struct sugov_tunables *tunables = sg_policy->tunables;
842 mutex_lock(&global_tunables_lock);
844 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
845 policy->governor_data = NULL;
847 sugov_tunables_free(tunables);
849 mutex_unlock(&global_tunables_lock);
851 sugov_kthread_stop(sg_policy);
852 sugov_policy_free(sg_policy);
853 cpufreq_disable_fast_switch(policy);
856 static int sugov_start(struct cpufreq_policy *policy)
858 struct sugov_policy *sg_policy = policy->governor_data;
859 void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
862 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
863 sg_policy->last_freq_update_time = 0;
864 sg_policy->next_freq = 0;
865 sg_policy->work_in_progress = false;
866 sg_policy->limits_changed = false;
867 sg_policy->cached_raw_freq = 0;
869 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
871 for_each_cpu(cpu, policy->cpus) {
872 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
874 memset(sg_cpu, 0, sizeof(*sg_cpu));
876 sg_cpu->sg_policy = sg_policy;
879 if (policy_is_shared(policy))
880 uu = sugov_update_shared;
881 else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
882 uu = sugov_update_single_perf;
884 uu = sugov_update_single_freq;
886 for_each_cpu(cpu, policy->cpus) {
887 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
889 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
894 static void sugov_stop(struct cpufreq_policy *policy)
896 struct sugov_policy *sg_policy = policy->governor_data;
899 for_each_cpu(cpu, policy->cpus)
900 cpufreq_remove_update_util_hook(cpu);
904 if (!policy->fast_switch_enabled) {
905 irq_work_sync(&sg_policy->irq_work);
906 kthread_cancel_work_sync(&sg_policy->work);
910 static void sugov_limits(struct cpufreq_policy *policy)
912 struct sugov_policy *sg_policy = policy->governor_data;
914 if (!policy->fast_switch_enabled) {
915 mutex_lock(&sg_policy->work_lock);
916 cpufreq_policy_apply_limits(policy);
917 mutex_unlock(&sg_policy->work_lock);
920 sg_policy->limits_changed = true;
923 struct cpufreq_governor schedutil_gov = {
925 .owner = THIS_MODULE,
926 .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
929 .start = sugov_start,
931 .limits = sugov_limits,
934 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
935 struct cpufreq_governor *cpufreq_default_governor(void)
937 return &schedutil_gov;
941 cpufreq_governor_init(schedutil_gov);
943 #ifdef CONFIG_ENERGY_MODEL
944 static void rebuild_sd_workfn(struct work_struct *work)
946 rebuild_sched_domains_energy();
948 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
951 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
952 * on governor changes to make sure the scheduler knows about it.
954 void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
955 struct cpufreq_governor *old_gov)
957 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
959 * When called from the cpufreq_register_driver() path, the
960 * cpu_hotplug_lock is already held, so use a work item to
961 * avoid nested locking in rebuild_sched_domains().
963 schedule_work(&rebuild_sd_work);