Merge tag 'for-linus-20180605' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / kernel / sched / cpufreq_schedutil.c
1 /*
2  * CPUFreq governor based on scheduler-provided CPU utilization data.
3  *
4  * Copyright (C) 2016, Intel Corporation
5  * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include "sched.h"
15
16 #include <trace/events/power.h>
17
18 struct sugov_tunables {
19         struct gov_attr_set     attr_set;
20         unsigned int            rate_limit_us;
21 };
22
23 struct sugov_policy {
24         struct cpufreq_policy   *policy;
25
26         struct sugov_tunables   *tunables;
27         struct list_head        tunables_hook;
28
29         raw_spinlock_t          update_lock;    /* For shared policies */
30         u64                     last_freq_update_time;
31         s64                     freq_update_delay_ns;
32         unsigned int            next_freq;
33         unsigned int            cached_raw_freq;
34
35         /* The next fields are only needed if fast switch cannot be used: */
36         struct                  irq_work irq_work;
37         struct                  kthread_work work;
38         struct                  mutex work_lock;
39         struct                  kthread_worker worker;
40         struct task_struct      *thread;
41         bool                    work_in_progress;
42
43         bool                    need_freq_update;
44 };
45
46 struct sugov_cpu {
47         struct update_util_data update_util;
48         struct sugov_policy     *sg_policy;
49         unsigned int            cpu;
50
51         bool                    iowait_boost_pending;
52         unsigned int            iowait_boost;
53         unsigned int            iowait_boost_max;
54         u64 last_update;
55
56         /* The fields below are only needed when sharing a policy: */
57         unsigned long           util_cfs;
58         unsigned long           util_dl;
59         unsigned long           max;
60
61         /* The field below is for single-CPU policies only: */
62 #ifdef CONFIG_NO_HZ_COMMON
63         unsigned long           saved_idle_calls;
64 #endif
65 };
66
67 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
68
69 /************************ Governor internals ***********************/
70
71 static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
72 {
73         s64 delta_ns;
74
75         /*
76          * Since cpufreq_update_util() is called with rq->lock held for
77          * the @target_cpu, our per-CPU data is fully serialized.
78          *
79          * However, drivers cannot in general deal with cross-CPU
80          * requests, so while get_next_freq() will work, our
81          * sugov_update_commit() call may not for the fast switching platforms.
82          *
83          * Hence stop here for remote requests if they aren't supported
84          * by the hardware, as calculating the frequency is pointless if
85          * we cannot in fact act on it.
86          *
87          * For the slow switching platforms, the kthread is always scheduled on
88          * the right set of CPUs and any CPU can find the next frequency and
89          * schedule the kthread.
90          */
91         if (sg_policy->policy->fast_switch_enabled &&
92             !cpufreq_can_do_remote_dvfs(sg_policy->policy))
93                 return false;
94
95         if (sg_policy->work_in_progress)
96                 return false;
97
98         if (unlikely(sg_policy->need_freq_update)) {
99                 sg_policy->need_freq_update = false;
100                 /*
101                  * This happens when limits change, so forget the previous
102                  * next_freq value and force an update.
103                  */
104                 sg_policy->next_freq = UINT_MAX;
105                 return true;
106         }
107
108         delta_ns = time - sg_policy->last_freq_update_time;
109
110         return delta_ns >= sg_policy->freq_update_delay_ns;
111 }
112
113 static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
114                                 unsigned int next_freq)
115 {
116         struct cpufreq_policy *policy = sg_policy->policy;
117
118         if (sg_policy->next_freq == next_freq)
119                 return;
120
121         sg_policy->next_freq = next_freq;
122         sg_policy->last_freq_update_time = time;
123
124         if (policy->fast_switch_enabled) {
125                 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
126                 if (!next_freq)
127                         return;
128
129                 policy->cur = next_freq;
130                 trace_cpu_frequency(next_freq, smp_processor_id());
131         } else {
132                 sg_policy->work_in_progress = true;
133                 irq_work_queue(&sg_policy->irq_work);
134         }
135 }
136
137 /**
138  * get_next_freq - Compute a new frequency for a given cpufreq policy.
139  * @sg_policy: schedutil policy object to compute the new frequency for.
140  * @util: Current CPU utilization.
141  * @max: CPU capacity.
142  *
143  * If the utilization is frequency-invariant, choose the new frequency to be
144  * proportional to it, that is
145  *
146  * next_freq = C * max_freq * util / max
147  *
148  * Otherwise, approximate the would-be frequency-invariant utilization by
149  * util_raw * (curr_freq / max_freq) which leads to
150  *
151  * next_freq = C * curr_freq * util_raw / max
152  *
153  * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
154  *
155  * The lowest driver-supported frequency which is equal or greater than the raw
156  * next_freq (as calculated above) is returned, subject to policy min/max and
157  * cpufreq driver limitations.
158  */
159 static unsigned int get_next_freq(struct sugov_policy *sg_policy,
160                                   unsigned long util, unsigned long max)
161 {
162         struct cpufreq_policy *policy = sg_policy->policy;
163         unsigned int freq = arch_scale_freq_invariant() ?
164                                 policy->cpuinfo.max_freq : policy->cur;
165
166         freq = (freq + (freq >> 2)) * util / max;
167
168         if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
169                 return sg_policy->next_freq;
170         sg_policy->cached_raw_freq = freq;
171         return cpufreq_driver_resolve_freq(policy, freq);
172 }
173
174 static void sugov_get_util(struct sugov_cpu *sg_cpu)
175 {
176         struct rq *rq = cpu_rq(sg_cpu->cpu);
177
178         sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
179         sg_cpu->util_cfs = cpu_util_cfs(rq);
180         sg_cpu->util_dl  = cpu_util_dl(rq);
181 }
182
183 static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
184 {
185         struct rq *rq = cpu_rq(sg_cpu->cpu);
186
187         if (rq->rt.rt_nr_running)
188                 return sg_cpu->max;
189
190         /*
191          * Utilization required by DEADLINE must always be granted while, for
192          * FAIR, we use blocked utilization of IDLE CPUs as a mechanism to
193          * gracefully reduce the frequency when no tasks show up for longer
194          * periods of time.
195          *
196          * Ideally we would like to set util_dl as min/guaranteed freq and
197          * util_cfs + util_dl as requested freq. However, cpufreq is not yet
198          * ready for such an interface. So, we only do the latter for now.
199          */
200         return min(sg_cpu->max, (sg_cpu->util_dl + sg_cpu->util_cfs));
201 }
202
203 static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags)
204 {
205         if (flags & SCHED_CPUFREQ_IOWAIT) {
206                 if (sg_cpu->iowait_boost_pending)
207                         return;
208
209                 sg_cpu->iowait_boost_pending = true;
210
211                 if (sg_cpu->iowait_boost) {
212                         sg_cpu->iowait_boost <<= 1;
213                         if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
214                                 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
215                 } else {
216                         sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
217                 }
218         } else if (sg_cpu->iowait_boost) {
219                 s64 delta_ns = time - sg_cpu->last_update;
220
221                 /* Clear iowait_boost if the CPU apprears to have been idle. */
222                 if (delta_ns > TICK_NSEC) {
223                         sg_cpu->iowait_boost = 0;
224                         sg_cpu->iowait_boost_pending = false;
225                 }
226         }
227 }
228
229 static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
230                                unsigned long *max)
231 {
232         unsigned int boost_util, boost_max;
233
234         if (!sg_cpu->iowait_boost)
235                 return;
236
237         if (sg_cpu->iowait_boost_pending) {
238                 sg_cpu->iowait_boost_pending = false;
239         } else {
240                 sg_cpu->iowait_boost >>= 1;
241                 if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
242                         sg_cpu->iowait_boost = 0;
243                         return;
244                 }
245         }
246
247         boost_util = sg_cpu->iowait_boost;
248         boost_max = sg_cpu->iowait_boost_max;
249
250         if (*util * boost_max < *max * boost_util) {
251                 *util = boost_util;
252                 *max = boost_max;
253         }
254 }
255
256 #ifdef CONFIG_NO_HZ_COMMON
257 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
258 {
259         unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
260         bool ret = idle_calls == sg_cpu->saved_idle_calls;
261
262         sg_cpu->saved_idle_calls = idle_calls;
263         return ret;
264 }
265 #else
266 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
267 #endif /* CONFIG_NO_HZ_COMMON */
268
269 /*
270  * Make sugov_should_update_freq() ignore the rate limit when DL
271  * has increased the utilization.
272  */
273 static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
274 {
275         if (cpu_util_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->util_dl)
276                 sg_policy->need_freq_update = true;
277 }
278
279 static void sugov_update_single(struct update_util_data *hook, u64 time,
280                                 unsigned int flags)
281 {
282         struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
283         struct sugov_policy *sg_policy = sg_cpu->sg_policy;
284         unsigned long util, max;
285         unsigned int next_f;
286         bool busy;
287
288         sugov_set_iowait_boost(sg_cpu, time, flags);
289         sg_cpu->last_update = time;
290
291         ignore_dl_rate_limit(sg_cpu, sg_policy);
292
293         if (!sugov_should_update_freq(sg_policy, time))
294                 return;
295
296         busy = sugov_cpu_is_busy(sg_cpu);
297
298         sugov_get_util(sg_cpu);
299         max = sg_cpu->max;
300         util = sugov_aggregate_util(sg_cpu);
301         sugov_iowait_boost(sg_cpu, &util, &max);
302         next_f = get_next_freq(sg_policy, util, max);
303         /*
304          * Do not reduce the frequency if the CPU has not been idle
305          * recently, as the reduction is likely to be premature then.
306          */
307         if (busy && next_f < sg_policy->next_freq &&
308             sg_policy->next_freq != UINT_MAX) {
309                 next_f = sg_policy->next_freq;
310
311                 /* Reset cached freq as next_freq has changed */
312                 sg_policy->cached_raw_freq = 0;
313         }
314
315         sugov_update_commit(sg_policy, time, next_f);
316 }
317
318 static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
319 {
320         struct sugov_policy *sg_policy = sg_cpu->sg_policy;
321         struct cpufreq_policy *policy = sg_policy->policy;
322         unsigned long util = 0, max = 1;
323         unsigned int j;
324
325         for_each_cpu(j, policy->cpus) {
326                 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
327                 unsigned long j_util, j_max;
328                 s64 delta_ns;
329
330                 sugov_get_util(j_sg_cpu);
331
332                 /*
333                  * If the CFS CPU utilization was last updated before the
334                  * previous frequency update and the time elapsed between the
335                  * last update of the CPU utilization and the last frequency
336                  * update is long enough, reset iowait_boost and util_cfs, as
337                  * they are now probably stale. However, still consider the
338                  * CPU contribution if it has some DEADLINE utilization
339                  * (util_dl).
340                  */
341                 delta_ns = time - j_sg_cpu->last_update;
342                 if (delta_ns > TICK_NSEC) {
343                         j_sg_cpu->iowait_boost = 0;
344                         j_sg_cpu->iowait_boost_pending = false;
345                 }
346
347                 j_max = j_sg_cpu->max;
348                 j_util = sugov_aggregate_util(j_sg_cpu);
349                 sugov_iowait_boost(j_sg_cpu, &j_util, &j_max);
350                 if (j_util * max > j_max * util) {
351                         util = j_util;
352                         max = j_max;
353                 }
354         }
355
356         return get_next_freq(sg_policy, util, max);
357 }
358
359 static void
360 sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
361 {
362         struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
363         struct sugov_policy *sg_policy = sg_cpu->sg_policy;
364         unsigned int next_f;
365
366         raw_spin_lock(&sg_policy->update_lock);
367
368         sugov_set_iowait_boost(sg_cpu, time, flags);
369         sg_cpu->last_update = time;
370
371         ignore_dl_rate_limit(sg_cpu, sg_policy);
372
373         if (sugov_should_update_freq(sg_policy, time)) {
374                 next_f = sugov_next_freq_shared(sg_cpu, time);
375                 sugov_update_commit(sg_policy, time, next_f);
376         }
377
378         raw_spin_unlock(&sg_policy->update_lock);
379 }
380
381 static void sugov_work(struct kthread_work *work)
382 {
383         struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
384
385         mutex_lock(&sg_policy->work_lock);
386         __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
387                                 CPUFREQ_RELATION_L);
388         mutex_unlock(&sg_policy->work_lock);
389
390         sg_policy->work_in_progress = false;
391 }
392
393 static void sugov_irq_work(struct irq_work *irq_work)
394 {
395         struct sugov_policy *sg_policy;
396
397         sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
398
399         kthread_queue_work(&sg_policy->worker, &sg_policy->work);
400 }
401
402 /************************** sysfs interface ************************/
403
404 static struct sugov_tunables *global_tunables;
405 static DEFINE_MUTEX(global_tunables_lock);
406
407 static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
408 {
409         return container_of(attr_set, struct sugov_tunables, attr_set);
410 }
411
412 static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
413 {
414         struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
415
416         return sprintf(buf, "%u\n", tunables->rate_limit_us);
417 }
418
419 static ssize_t
420 rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
421 {
422         struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
423         struct sugov_policy *sg_policy;
424         unsigned int rate_limit_us;
425
426         if (kstrtouint(buf, 10, &rate_limit_us))
427                 return -EINVAL;
428
429         tunables->rate_limit_us = rate_limit_us;
430
431         list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
432                 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
433
434         return count;
435 }
436
437 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
438
439 static struct attribute *sugov_attributes[] = {
440         &rate_limit_us.attr,
441         NULL
442 };
443
444 static struct kobj_type sugov_tunables_ktype = {
445         .default_attrs = sugov_attributes,
446         .sysfs_ops = &governor_sysfs_ops,
447 };
448
449 /********************** cpufreq governor interface *********************/
450
451 static struct cpufreq_governor schedutil_gov;
452
453 static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
454 {
455         struct sugov_policy *sg_policy;
456
457         sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
458         if (!sg_policy)
459                 return NULL;
460
461         sg_policy->policy = policy;
462         raw_spin_lock_init(&sg_policy->update_lock);
463         return sg_policy;
464 }
465
466 static void sugov_policy_free(struct sugov_policy *sg_policy)
467 {
468         kfree(sg_policy);
469 }
470
471 static int sugov_kthread_create(struct sugov_policy *sg_policy)
472 {
473         struct task_struct *thread;
474         struct sched_attr attr = {
475                 .size           = sizeof(struct sched_attr),
476                 .sched_policy   = SCHED_DEADLINE,
477                 .sched_flags    = SCHED_FLAG_SUGOV,
478                 .sched_nice     = 0,
479                 .sched_priority = 0,
480                 /*
481                  * Fake (unused) bandwidth; workaround to "fix"
482                  * priority inheritance.
483                  */
484                 .sched_runtime  =  1000000,
485                 .sched_deadline = 10000000,
486                 .sched_period   = 10000000,
487         };
488         struct cpufreq_policy *policy = sg_policy->policy;
489         int ret;
490
491         /* kthread only required for slow path */
492         if (policy->fast_switch_enabled)
493                 return 0;
494
495         kthread_init_work(&sg_policy->work, sugov_work);
496         kthread_init_worker(&sg_policy->worker);
497         thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
498                                 "sugov:%d",
499                                 cpumask_first(policy->related_cpus));
500         if (IS_ERR(thread)) {
501                 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
502                 return PTR_ERR(thread);
503         }
504
505         ret = sched_setattr_nocheck(thread, &attr);
506         if (ret) {
507                 kthread_stop(thread);
508                 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
509                 return ret;
510         }
511
512         sg_policy->thread = thread;
513
514         /* Kthread is bound to all CPUs by default */
515         if (!policy->dvfs_possible_from_any_cpu)
516                 kthread_bind_mask(thread, policy->related_cpus);
517
518         init_irq_work(&sg_policy->irq_work, sugov_irq_work);
519         mutex_init(&sg_policy->work_lock);
520
521         wake_up_process(thread);
522
523         return 0;
524 }
525
526 static void sugov_kthread_stop(struct sugov_policy *sg_policy)
527 {
528         /* kthread only required for slow path */
529         if (sg_policy->policy->fast_switch_enabled)
530                 return;
531
532         kthread_flush_worker(&sg_policy->worker);
533         kthread_stop(sg_policy->thread);
534         mutex_destroy(&sg_policy->work_lock);
535 }
536
537 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
538 {
539         struct sugov_tunables *tunables;
540
541         tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
542         if (tunables) {
543                 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
544                 if (!have_governor_per_policy())
545                         global_tunables = tunables;
546         }
547         return tunables;
548 }
549
550 static void sugov_tunables_free(struct sugov_tunables *tunables)
551 {
552         if (!have_governor_per_policy())
553                 global_tunables = NULL;
554
555         kfree(tunables);
556 }
557
558 static int sugov_init(struct cpufreq_policy *policy)
559 {
560         struct sugov_policy *sg_policy;
561         struct sugov_tunables *tunables;
562         int ret = 0;
563
564         /* State should be equivalent to EXIT */
565         if (policy->governor_data)
566                 return -EBUSY;
567
568         cpufreq_enable_fast_switch(policy);
569
570         sg_policy = sugov_policy_alloc(policy);
571         if (!sg_policy) {
572                 ret = -ENOMEM;
573                 goto disable_fast_switch;
574         }
575
576         ret = sugov_kthread_create(sg_policy);
577         if (ret)
578                 goto free_sg_policy;
579
580         mutex_lock(&global_tunables_lock);
581
582         if (global_tunables) {
583                 if (WARN_ON(have_governor_per_policy())) {
584                         ret = -EINVAL;
585                         goto stop_kthread;
586                 }
587                 policy->governor_data = sg_policy;
588                 sg_policy->tunables = global_tunables;
589
590                 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
591                 goto out;
592         }
593
594         tunables = sugov_tunables_alloc(sg_policy);
595         if (!tunables) {
596                 ret = -ENOMEM;
597                 goto stop_kthread;
598         }
599
600         tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
601
602         policy->governor_data = sg_policy;
603         sg_policy->tunables = tunables;
604
605         ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
606                                    get_governor_parent_kobj(policy), "%s",
607                                    schedutil_gov.name);
608         if (ret)
609                 goto fail;
610
611 out:
612         mutex_unlock(&global_tunables_lock);
613         return 0;
614
615 fail:
616         policy->governor_data = NULL;
617         sugov_tunables_free(tunables);
618
619 stop_kthread:
620         sugov_kthread_stop(sg_policy);
621         mutex_unlock(&global_tunables_lock);
622
623 free_sg_policy:
624         sugov_policy_free(sg_policy);
625
626 disable_fast_switch:
627         cpufreq_disable_fast_switch(policy);
628
629         pr_err("initialization failed (error %d)\n", ret);
630         return ret;
631 }
632
633 static void sugov_exit(struct cpufreq_policy *policy)
634 {
635         struct sugov_policy *sg_policy = policy->governor_data;
636         struct sugov_tunables *tunables = sg_policy->tunables;
637         unsigned int count;
638
639         mutex_lock(&global_tunables_lock);
640
641         count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
642         policy->governor_data = NULL;
643         if (!count)
644                 sugov_tunables_free(tunables);
645
646         mutex_unlock(&global_tunables_lock);
647
648         sugov_kthread_stop(sg_policy);
649         sugov_policy_free(sg_policy);
650         cpufreq_disable_fast_switch(policy);
651 }
652
653 static int sugov_start(struct cpufreq_policy *policy)
654 {
655         struct sugov_policy *sg_policy = policy->governor_data;
656         unsigned int cpu;
657
658         sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
659         sg_policy->last_freq_update_time        = 0;
660         sg_policy->next_freq                    = UINT_MAX;
661         sg_policy->work_in_progress             = false;
662         sg_policy->need_freq_update             = false;
663         sg_policy->cached_raw_freq              = 0;
664
665         for_each_cpu(cpu, policy->cpus) {
666                 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
667
668                 memset(sg_cpu, 0, sizeof(*sg_cpu));
669                 sg_cpu->cpu                     = cpu;
670                 sg_cpu->sg_policy               = sg_policy;
671                 sg_cpu->iowait_boost_max        = policy->cpuinfo.max_freq;
672         }
673
674         for_each_cpu(cpu, policy->cpus) {
675                 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
676
677                 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
678                                              policy_is_shared(policy) ?
679                                                         sugov_update_shared :
680                                                         sugov_update_single);
681         }
682         return 0;
683 }
684
685 static void sugov_stop(struct cpufreq_policy *policy)
686 {
687         struct sugov_policy *sg_policy = policy->governor_data;
688         unsigned int cpu;
689
690         for_each_cpu(cpu, policy->cpus)
691                 cpufreq_remove_update_util_hook(cpu);
692
693         synchronize_sched();
694
695         if (!policy->fast_switch_enabled) {
696                 irq_work_sync(&sg_policy->irq_work);
697                 kthread_cancel_work_sync(&sg_policy->work);
698         }
699 }
700
701 static void sugov_limits(struct cpufreq_policy *policy)
702 {
703         struct sugov_policy *sg_policy = policy->governor_data;
704
705         if (!policy->fast_switch_enabled) {
706                 mutex_lock(&sg_policy->work_lock);
707                 cpufreq_policy_apply_limits(policy);
708                 mutex_unlock(&sg_policy->work_lock);
709         }
710
711         sg_policy->need_freq_update = true;
712 }
713
714 static struct cpufreq_governor schedutil_gov = {
715         .name                   = "schedutil",
716         .owner                  = THIS_MODULE,
717         .dynamic_switching      = true,
718         .init                   = sugov_init,
719         .exit                   = sugov_exit,
720         .start                  = sugov_start,
721         .stop                   = sugov_stop,
722         .limits                 = sugov_limits,
723 };
724
725 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
726 struct cpufreq_governor *cpufreq_default_governor(void)
727 {
728         return &schedutil_gov;
729 }
730 #endif
731
732 static int __init sugov_register(void)
733 {
734         return cpufreq_register_governor(&schedutil_gov);
735 }
736 fs_initcall(sugov_register);