1 // SPDX-License-Identifier: GPL-2.0
3 * CPUFreq governor based on scheduler-provided CPU utilization data.
5 * Copyright (C) 2016, Intel Corporation
6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/sched/cpufreq.h>
14 #include <trace/events/power.h>
16 #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
18 struct sugov_tunables {
19 struct gov_attr_set attr_set;
20 unsigned int rate_limit_us;
24 struct cpufreq_policy *policy;
26 struct sugov_tunables *tunables;
27 struct list_head tunables_hook;
29 raw_spinlock_t update_lock; /* For shared policies */
30 u64 last_freq_update_time;
31 s64 freq_update_delay_ns;
32 unsigned int next_freq;
33 unsigned int cached_raw_freq;
35 /* The next fields are only needed if fast switch cannot be used: */
36 struct irq_work irq_work;
37 struct kthread_work work;
38 struct mutex work_lock;
39 struct kthread_worker worker;
40 struct task_struct *thread;
41 bool work_in_progress;
44 bool need_freq_update;
48 struct update_util_data update_util;
49 struct sugov_policy *sg_policy;
52 bool iowait_boost_pending;
53 unsigned int iowait_boost;
59 /* The field below is for single-CPU policies only: */
60 #ifdef CONFIG_NO_HZ_COMMON
61 unsigned long saved_idle_calls;
65 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
67 /************************ Governor internals ***********************/
69 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
74 * Since cpufreq_update_util() is called with rq->lock held for
75 * the @target_cpu, our per-CPU data is fully serialized.
77 * However, drivers cannot in general deal with cross-CPU
78 * requests, so while get_next_freq() will work, our
79 * sugov_update_commit() call may not for the fast switching platforms.
81 * Hence stop here for remote requests if they aren't supported
82 * by the hardware, as calculating the frequency is pointless if
83 * we cannot in fact act on it.
85 * This is needed on the slow switching platforms too to prevent CPUs
86 * going offline from leaving stale IRQ work items behind.
88 if (!cpufreq_this_cpu_can_update(sg_policy->policy))
91 if (unlikely(sg_policy->limits_changed)) {
92 sg_policy->limits_changed = false;
93 sg_policy->need_freq_update = true;
97 delta_ns = time - sg_policy->last_freq_update_time;
99 return delta_ns >= sg_policy->freq_update_delay_ns;
102 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
103 unsigned int next_freq)
105 if (sg_policy->next_freq == next_freq &&
106 !cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
109 sg_policy->next_freq = next_freq;
110 sg_policy->last_freq_update_time = time;
115 static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
116 unsigned int next_freq)
118 if (sugov_update_next_freq(sg_policy, time, next_freq))
119 cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
122 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
123 unsigned int next_freq)
125 if (!sugov_update_next_freq(sg_policy, time, next_freq))
128 if (!sg_policy->work_in_progress) {
129 sg_policy->work_in_progress = true;
130 irq_work_queue(&sg_policy->irq_work);
135 * get_next_freq - Compute a new frequency for a given cpufreq policy.
136 * @sg_policy: schedutil policy object to compute the new frequency for.
137 * @util: Current CPU utilization.
138 * @max: CPU capacity.
140 * If the utilization is frequency-invariant, choose the new frequency to be
141 * proportional to it, that is
143 * next_freq = C * max_freq * util / max
145 * Otherwise, approximate the would-be frequency-invariant utilization by
146 * util_raw * (curr_freq / max_freq) which leads to
148 * next_freq = C * curr_freq * util_raw / max
150 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
152 * The lowest driver-supported frequency which is equal or greater than the raw
153 * next_freq (as calculated above) is returned, subject to policy min/max and
154 * cpufreq driver limitations.
156 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
157 unsigned long util, unsigned long max)
159 struct cpufreq_policy *policy = sg_policy->policy;
160 unsigned int freq = arch_scale_freq_invariant() ?
161 policy->cpuinfo.max_freq : policy->cur;
163 freq = map_util_freq(util, freq, max);
165 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update &&
166 !cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
167 return sg_policy->next_freq;
169 sg_policy->need_freq_update = false;
170 sg_policy->cached_raw_freq = freq;
171 return cpufreq_driver_resolve_freq(policy, freq);
175 * This function computes an effective utilization for the given CPU, to be
176 * used for frequency selection given the linear relation: f = u * f_max.
178 * The scheduler tracks the following metrics:
180 * cpu_util_{cfs,rt,dl,irq}()
183 * Where the cfs,rt and dl util numbers are tracked with the same metric and
184 * synchronized windows and are thus directly comparable.
186 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
187 * which excludes things like IRQ and steal-time. These latter are then accrued
188 * in the irq utilization.
190 * The DL bandwidth number otoh is not a measured metric but a value computed
191 * based on the task model parameters and gives the minimal utilization
192 * required to meet deadlines.
194 unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
195 unsigned long max, enum schedutil_type type,
196 struct task_struct *p)
198 unsigned long dl_util, util, irq;
199 struct rq *rq = cpu_rq(cpu);
201 if (!uclamp_is_used() &&
202 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
207 * Early check to see if IRQ/steal time saturates the CPU, can be
208 * because of inaccuracies in how we track these -- see
209 * update_irq_load_avg().
211 irq = cpu_util_irq(rq);
212 if (unlikely(irq >= max))
216 * Because the time spend on RT/DL tasks is visible as 'lost' time to
217 * CFS tasks and we use the same metric to track the effective
218 * utilization (PELT windows are synchronized) we can directly add them
219 * to obtain the CPU's actual utilization.
221 * CFS and RT utilization can be boosted or capped, depending on
222 * utilization clamp constraints requested by currently RUNNABLE
224 * When there are no CFS RUNNABLE tasks, clamps are released and
225 * frequency will be gracefully reduced with the utilization decay.
227 util = util_cfs + cpu_util_rt(rq);
228 if (type == FREQUENCY_UTIL)
229 util = uclamp_rq_util_with(rq, util, p);
231 dl_util = cpu_util_dl(rq);
234 * For frequency selection we do not make cpu_util_dl() a permanent part
235 * of this sum because we want to use cpu_bw_dl() later on, but we need
236 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
237 * that we select f_max when there is no idle time.
239 * NOTE: numerical errors or stop class might cause us to not quite hit
240 * saturation when we should -- something for later.
242 if (util + dl_util >= max)
246 * OTOH, for energy computation we need the estimated running time, so
247 * include util_dl and ignore dl_bw.
249 if (type == ENERGY_UTIL)
253 * There is still idle time; further improve the number by using the
254 * irq metric. Because IRQ/steal time is hidden from the task clock we
255 * need to scale the task numbers:
258 * U' = irq + --------- * U
261 util = scale_irq_capacity(util, irq, max);
265 * Bandwidth required by DEADLINE must always be granted while, for
266 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
267 * to gracefully reduce the frequency when no tasks show up for longer
270 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
271 * bw_dl as requested freq. However, cpufreq is not yet ready for such
272 * an interface. So, we only do the latter for now.
274 if (type == FREQUENCY_UTIL)
275 util += cpu_bw_dl(rq);
277 return min(max, util);
280 static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
282 struct rq *rq = cpu_rq(sg_cpu->cpu);
283 unsigned long util = cpu_util_cfs(rq);
284 unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
287 sg_cpu->bw_dl = cpu_bw_dl(rq);
289 return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
293 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
294 * @sg_cpu: the sugov data for the CPU to boost
295 * @time: the update time from the caller
296 * @set_iowait_boost: true if an IO boost has been requested
298 * The IO wait boost of a task is disabled after a tick since the last update
299 * of a CPU. If a new IO wait boost is requested after more then a tick, then
300 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
301 * efficiency by ignoring sporadic wakeups from IO.
303 static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
304 bool set_iowait_boost)
306 s64 delta_ns = time - sg_cpu->last_update;
308 /* Reset boost only if a tick has elapsed since last request */
309 if (delta_ns <= TICK_NSEC)
312 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
313 sg_cpu->iowait_boost_pending = set_iowait_boost;
319 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
320 * @sg_cpu: the sugov data for the CPU to boost
321 * @time: the update time from the caller
322 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
324 * Each time a task wakes up after an IO operation, the CPU utilization can be
325 * boosted to a certain utilization which doubles at each "frequent and
326 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
327 * of the maximum OPP.
329 * To keep doubling, an IO boost has to be requested at least once per tick,
330 * otherwise we restart from the utilization of the minimum OPP.
332 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
335 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
337 /* Reset boost if the CPU appears to have been idle enough */
338 if (sg_cpu->iowait_boost &&
339 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
342 /* Boost only tasks waking up after IO */
343 if (!set_iowait_boost)
346 /* Ensure boost doubles only one time at each request */
347 if (sg_cpu->iowait_boost_pending)
349 sg_cpu->iowait_boost_pending = true;
351 /* Double the boost at each request */
352 if (sg_cpu->iowait_boost) {
353 sg_cpu->iowait_boost =
354 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
358 /* First wakeup after IO: start with minimum boost */
359 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
363 * sugov_iowait_apply() - Apply the IO boost to a CPU.
364 * @sg_cpu: the sugov data for the cpu to boost
365 * @time: the update time from the caller
366 * @util: the utilization to (eventually) boost
367 * @max: the maximum value the utilization can be boosted to
369 * A CPU running a task which woken up after an IO operation can have its
370 * utilization boosted to speed up the completion of those IO operations.
371 * The IO boost value is increased each time a task wakes up from IO, in
372 * sugov_iowait_apply(), and it's instead decreased by this function,
373 * each time an increase has not been requested (!iowait_boost_pending).
375 * A CPU which also appears to have been idle for at least one tick has also
376 * its IO boost utilization reset.
378 * This mechanism is designed to boost high frequently IO waiting tasks, while
379 * being more conservative on tasks which does sporadic IO operations.
381 static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
382 unsigned long util, unsigned long max)
386 /* No boost currently required */
387 if (!sg_cpu->iowait_boost)
390 /* Reset boost if the CPU appears to have been idle enough */
391 if (sugov_iowait_reset(sg_cpu, time, false))
394 if (!sg_cpu->iowait_boost_pending) {
396 * No boost pending; reduce the boost value.
398 sg_cpu->iowait_boost >>= 1;
399 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
400 sg_cpu->iowait_boost = 0;
405 sg_cpu->iowait_boost_pending = false;
408 * @util is already in capacity scale; convert iowait_boost
409 * into the same scale so we can compare.
411 boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
412 return max(boost, util);
415 #ifdef CONFIG_NO_HZ_COMMON
416 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
418 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
419 bool ret = idle_calls == sg_cpu->saved_idle_calls;
421 sg_cpu->saved_idle_calls = idle_calls;
425 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
426 #endif /* CONFIG_NO_HZ_COMMON */
429 * Make sugov_should_update_freq() ignore the rate limit when DL
430 * has increased the utilization.
432 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
434 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
435 sg_policy->limits_changed = true;
438 static void sugov_update_single(struct update_util_data *hook, u64 time,
441 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
442 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
443 unsigned long util, max;
446 unsigned int cached_freq = sg_policy->cached_raw_freq;
448 sugov_iowait_boost(sg_cpu, time, flags);
449 sg_cpu->last_update = time;
451 ignore_dl_rate_limit(sg_cpu, sg_policy);
453 if (!sugov_should_update_freq(sg_policy, time))
456 /* Limits may have changed, don't skip frequency update */
457 busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
459 util = sugov_get_util(sg_cpu);
461 util = sugov_iowait_apply(sg_cpu, time, util, max);
462 next_f = get_next_freq(sg_policy, util, max);
464 * Do not reduce the frequency if the CPU has not been idle
465 * recently, as the reduction is likely to be premature then.
467 if (busy && next_f < sg_policy->next_freq) {
468 next_f = sg_policy->next_freq;
470 /* Restore cached freq as next_freq has changed */
471 sg_policy->cached_raw_freq = cached_freq;
475 * This code runs under rq->lock for the target CPU, so it won't run
476 * concurrently on two different CPUs for the same target and it is not
477 * necessary to acquire the lock in the fast switch case.
479 if (sg_policy->policy->fast_switch_enabled) {
480 sugov_fast_switch(sg_policy, time, next_f);
482 raw_spin_lock(&sg_policy->update_lock);
483 sugov_deferred_update(sg_policy, time, next_f);
484 raw_spin_unlock(&sg_policy->update_lock);
488 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
490 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
491 struct cpufreq_policy *policy = sg_policy->policy;
492 unsigned long util = 0, max = 1;
495 for_each_cpu(j, policy->cpus) {
496 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
497 unsigned long j_util, j_max;
499 j_util = sugov_get_util(j_sg_cpu);
500 j_max = j_sg_cpu->max;
501 j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
503 if (j_util * max > j_max * util) {
509 return get_next_freq(sg_policy, util, max);
513 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
515 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
516 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
519 raw_spin_lock(&sg_policy->update_lock);
521 sugov_iowait_boost(sg_cpu, time, flags);
522 sg_cpu->last_update = time;
524 ignore_dl_rate_limit(sg_cpu, sg_policy);
526 if (sugov_should_update_freq(sg_policy, time)) {
527 next_f = sugov_next_freq_shared(sg_cpu, time);
529 if (sg_policy->policy->fast_switch_enabled)
530 sugov_fast_switch(sg_policy, time, next_f);
532 sugov_deferred_update(sg_policy, time, next_f);
535 raw_spin_unlock(&sg_policy->update_lock);
538 static void sugov_work(struct kthread_work *work)
540 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
545 * Hold sg_policy->update_lock shortly to handle the case where:
546 * incase sg_policy->next_freq is read here, and then updated by
547 * sugov_deferred_update() just before work_in_progress is set to false
548 * here, we may miss queueing the new update.
550 * Note: If a work was queued after the update_lock is released,
551 * sugov_work() will just be called again by kthread_work code; and the
552 * request will be proceed before the sugov thread sleeps.
554 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
555 freq = sg_policy->next_freq;
556 sg_policy->work_in_progress = false;
557 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
559 mutex_lock(&sg_policy->work_lock);
560 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
561 mutex_unlock(&sg_policy->work_lock);
564 static void sugov_irq_work(struct irq_work *irq_work)
566 struct sugov_policy *sg_policy;
568 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
570 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
573 /************************** sysfs interface ************************/
575 static struct sugov_tunables *global_tunables;
576 static DEFINE_MUTEX(global_tunables_lock);
578 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
580 return container_of(attr_set, struct sugov_tunables, attr_set);
583 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
585 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
587 return sprintf(buf, "%u\n", tunables->rate_limit_us);
591 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
593 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
594 struct sugov_policy *sg_policy;
595 unsigned int rate_limit_us;
597 if (kstrtouint(buf, 10, &rate_limit_us))
600 tunables->rate_limit_us = rate_limit_us;
602 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
603 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
608 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
610 static struct attribute *sugov_attrs[] = {
614 ATTRIBUTE_GROUPS(sugov);
616 static struct kobj_type sugov_tunables_ktype = {
617 .default_groups = sugov_groups,
618 .sysfs_ops = &governor_sysfs_ops,
621 /********************** cpufreq governor interface *********************/
623 struct cpufreq_governor schedutil_gov;
625 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
627 struct sugov_policy *sg_policy;
629 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
633 sg_policy->policy = policy;
634 raw_spin_lock_init(&sg_policy->update_lock);
638 static void sugov_policy_free(struct sugov_policy *sg_policy)
643 static int sugov_kthread_create(struct sugov_policy *sg_policy)
645 struct task_struct *thread;
646 struct sched_attr attr = {
647 .size = sizeof(struct sched_attr),
648 .sched_policy = SCHED_DEADLINE,
649 .sched_flags = SCHED_FLAG_SUGOV,
653 * Fake (unused) bandwidth; workaround to "fix"
654 * priority inheritance.
656 .sched_runtime = 1000000,
657 .sched_deadline = 10000000,
658 .sched_period = 10000000,
660 struct cpufreq_policy *policy = sg_policy->policy;
663 /* kthread only required for slow path */
664 if (policy->fast_switch_enabled)
667 kthread_init_work(&sg_policy->work, sugov_work);
668 kthread_init_worker(&sg_policy->worker);
669 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
671 cpumask_first(policy->related_cpus));
672 if (IS_ERR(thread)) {
673 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
674 return PTR_ERR(thread);
677 ret = sched_setattr_nocheck(thread, &attr);
679 kthread_stop(thread);
680 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
684 sg_policy->thread = thread;
685 kthread_bind_mask(thread, policy->related_cpus);
686 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
687 mutex_init(&sg_policy->work_lock);
689 wake_up_process(thread);
694 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
696 /* kthread only required for slow path */
697 if (sg_policy->policy->fast_switch_enabled)
700 kthread_flush_worker(&sg_policy->worker);
701 kthread_stop(sg_policy->thread);
702 mutex_destroy(&sg_policy->work_lock);
705 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
707 struct sugov_tunables *tunables;
709 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
711 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
712 if (!have_governor_per_policy())
713 global_tunables = tunables;
718 static void sugov_tunables_free(struct sugov_tunables *tunables)
720 if (!have_governor_per_policy())
721 global_tunables = NULL;
726 static int sugov_init(struct cpufreq_policy *policy)
728 struct sugov_policy *sg_policy;
729 struct sugov_tunables *tunables;
732 /* State should be equivalent to EXIT */
733 if (policy->governor_data)
736 cpufreq_enable_fast_switch(policy);
738 sg_policy = sugov_policy_alloc(policy);
741 goto disable_fast_switch;
744 ret = sugov_kthread_create(sg_policy);
748 mutex_lock(&global_tunables_lock);
750 if (global_tunables) {
751 if (WARN_ON(have_governor_per_policy())) {
755 policy->governor_data = sg_policy;
756 sg_policy->tunables = global_tunables;
758 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
762 tunables = sugov_tunables_alloc(sg_policy);
768 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
770 policy->governor_data = sg_policy;
771 sg_policy->tunables = tunables;
773 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
774 get_governor_parent_kobj(policy), "%s",
780 mutex_unlock(&global_tunables_lock);
784 kobject_put(&tunables->attr_set.kobj);
785 policy->governor_data = NULL;
786 sugov_tunables_free(tunables);
789 sugov_kthread_stop(sg_policy);
790 mutex_unlock(&global_tunables_lock);
793 sugov_policy_free(sg_policy);
796 cpufreq_disable_fast_switch(policy);
798 pr_err("initialization failed (error %d)\n", ret);
802 static void sugov_exit(struct cpufreq_policy *policy)
804 struct sugov_policy *sg_policy = policy->governor_data;
805 struct sugov_tunables *tunables = sg_policy->tunables;
808 mutex_lock(&global_tunables_lock);
810 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
811 policy->governor_data = NULL;
813 sugov_tunables_free(tunables);
815 mutex_unlock(&global_tunables_lock);
817 sugov_kthread_stop(sg_policy);
818 sugov_policy_free(sg_policy);
819 cpufreq_disable_fast_switch(policy);
822 static int sugov_start(struct cpufreq_policy *policy)
824 struct sugov_policy *sg_policy = policy->governor_data;
827 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
828 sg_policy->last_freq_update_time = 0;
829 sg_policy->next_freq = 0;
830 sg_policy->work_in_progress = false;
831 sg_policy->limits_changed = false;
832 sg_policy->need_freq_update = false;
833 sg_policy->cached_raw_freq = 0;
835 for_each_cpu(cpu, policy->cpus) {
836 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
838 memset(sg_cpu, 0, sizeof(*sg_cpu));
840 sg_cpu->sg_policy = sg_policy;
843 for_each_cpu(cpu, policy->cpus) {
844 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
846 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
847 policy_is_shared(policy) ?
848 sugov_update_shared :
849 sugov_update_single);
854 static void sugov_stop(struct cpufreq_policy *policy)
856 struct sugov_policy *sg_policy = policy->governor_data;
859 for_each_cpu(cpu, policy->cpus)
860 cpufreq_remove_update_util_hook(cpu);
864 if (!policy->fast_switch_enabled) {
865 irq_work_sync(&sg_policy->irq_work);
866 kthread_cancel_work_sync(&sg_policy->work);
870 static void sugov_limits(struct cpufreq_policy *policy)
872 struct sugov_policy *sg_policy = policy->governor_data;
874 if (!policy->fast_switch_enabled) {
875 mutex_lock(&sg_policy->work_lock);
876 cpufreq_policy_apply_limits(policy);
877 mutex_unlock(&sg_policy->work_lock);
880 sg_policy->limits_changed = true;
883 struct cpufreq_governor schedutil_gov = {
885 .owner = THIS_MODULE,
886 .dynamic_switching = true,
889 .start = sugov_start,
891 .limits = sugov_limits,
894 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
895 struct cpufreq_governor *cpufreq_default_governor(void)
897 return &schedutil_gov;
901 cpufreq_governor_init(schedutil_gov);
903 #ifdef CONFIG_ENERGY_MODEL
904 extern bool sched_energy_update;
905 extern struct mutex sched_energy_mutex;
907 static void rebuild_sd_workfn(struct work_struct *work)
909 mutex_lock(&sched_energy_mutex);
910 sched_energy_update = true;
911 rebuild_sched_domains();
912 sched_energy_update = false;
913 mutex_unlock(&sched_energy_mutex);
915 static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
918 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
919 * on governor changes to make sure the scheduler knows about it.
921 void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
922 struct cpufreq_governor *old_gov)
924 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
926 * When called from the cpufreq_register_driver() path, the
927 * cpu_hotplug_lock is already held, so use a work item to
928 * avoid nested locking in rebuild_sched_domains().
930 schedule_work(&rebuild_sd_work);