1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/cpufreq/cpufreq.c
5 * Copyright (C) 2001 Russell King
6 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
9 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
10 * Added handling for CPU hotplug
11 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
12 * Fix handling for CPU hotplug -- affected CPUs
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pm_qos.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
33 static LIST_HEAD(cpufreq_policy_list);
35 /* Macros to iterate over CPU policies */
36 #define for_each_suitable_policy(__policy, __active) \
37 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
38 if ((__active) == !policy_is_inactive(__policy))
40 #define for_each_active_policy(__policy) \
41 for_each_suitable_policy(__policy, true)
42 #define for_each_inactive_policy(__policy) \
43 for_each_suitable_policy(__policy, false)
45 /* Iterate over governors */
46 static LIST_HEAD(cpufreq_governor_list);
47 #define for_each_governor(__governor) \
48 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
50 static char default_governor[CPUFREQ_NAME_LEN];
53 * The "cpufreq driver" - the arch- or hardware-dependent low
54 * level driver of CPUFreq support, and its spinlock. This lock
55 * also protects the cpufreq_cpu_data array.
57 static struct cpufreq_driver *cpufreq_driver;
58 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
59 static DEFINE_RWLOCK(cpufreq_driver_lock);
61 static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
62 bool cpufreq_supports_freq_invariance(void)
64 return static_branch_likely(&cpufreq_freq_invariance);
67 /* Flag to suspend/resume CPUFreq governors */
68 static bool cpufreq_suspended;
70 static inline bool has_target(void)
72 return cpufreq_driver->target_index || cpufreq_driver->target;
75 /* internal prototypes */
76 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
77 static int cpufreq_init_governor(struct cpufreq_policy *policy);
78 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
79 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
80 static int cpufreq_set_policy(struct cpufreq_policy *policy,
81 struct cpufreq_governor *new_gov,
82 unsigned int new_pol);
85 * Two notifier lists: the "policy" list is involved in the
86 * validation process for a new CPU frequency policy; the
87 * "transition" list for kernel code that needs to handle
88 * changes to devices when the CPU clock speed changes.
89 * The mutex locks both lists.
91 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
92 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
94 static int off __read_mostly;
95 static int cpufreq_disabled(void)
99 void disable_cpufreq(void)
103 static DEFINE_MUTEX(cpufreq_governor_mutex);
105 bool have_governor_per_policy(void)
107 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
109 EXPORT_SYMBOL_GPL(have_governor_per_policy);
111 static struct kobject *cpufreq_global_kobject;
113 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
115 if (have_governor_per_policy())
116 return &policy->kobj;
118 return cpufreq_global_kobject;
120 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
122 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
124 struct kernel_cpustat kcpustat;
129 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
131 kcpustat_cpu_fetch(&kcpustat, cpu);
133 busy_time = kcpustat.cpustat[CPUTIME_USER];
134 busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
135 busy_time += kcpustat.cpustat[CPUTIME_IRQ];
136 busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
137 busy_time += kcpustat.cpustat[CPUTIME_STEAL];
138 busy_time += kcpustat.cpustat[CPUTIME_NICE];
140 idle_time = cur_wall_time - busy_time;
142 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
144 return div_u64(idle_time, NSEC_PER_USEC);
147 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
149 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
151 if (idle_time == -1ULL)
152 return get_cpu_idle_time_jiffy(cpu, wall);
154 idle_time += get_cpu_iowait_time_us(cpu, wall);
158 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
161 * This is a generic cpufreq init() routine which can be used by cpufreq
162 * drivers of SMP systems. It will do following:
163 * - validate & show freq table passed
164 * - set policies transition latency
165 * - policy->cpus with all possible CPUs
167 void cpufreq_generic_init(struct cpufreq_policy *policy,
168 struct cpufreq_frequency_table *table,
169 unsigned int transition_latency)
171 policy->freq_table = table;
172 policy->cpuinfo.transition_latency = transition_latency;
175 * The driver only supports the SMP configuration where all processors
176 * share the clock and voltage and clock.
178 cpumask_setall(policy->cpus);
180 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
182 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
184 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
186 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
188 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
190 unsigned int cpufreq_generic_get(unsigned int cpu)
192 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
194 if (!policy || IS_ERR(policy->clk)) {
195 pr_err("%s: No %s associated to cpu: %d\n",
196 __func__, policy ? "clk" : "policy", cpu);
200 return clk_get_rate(policy->clk) / 1000;
202 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
205 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
206 * @cpu: CPU to find the policy for.
208 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
209 * the kobject reference counter of that policy. Return a valid policy on
210 * success or NULL on failure.
212 * The policy returned by this function has to be released with the help of
213 * cpufreq_cpu_put() to balance its kobject reference counter properly.
215 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
217 struct cpufreq_policy *policy = NULL;
220 if (WARN_ON(cpu >= nr_cpu_ids))
223 /* get the cpufreq driver */
224 read_lock_irqsave(&cpufreq_driver_lock, flags);
226 if (cpufreq_driver) {
228 policy = cpufreq_cpu_get_raw(cpu);
230 kobject_get(&policy->kobj);
233 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
237 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
240 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
241 * @policy: cpufreq policy returned by cpufreq_cpu_get().
243 void cpufreq_cpu_put(struct cpufreq_policy *policy)
245 kobject_put(&policy->kobj);
247 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
250 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
251 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
253 void cpufreq_cpu_release(struct cpufreq_policy *policy)
255 if (WARN_ON(!policy))
258 lockdep_assert_held(&policy->rwsem);
260 up_write(&policy->rwsem);
262 cpufreq_cpu_put(policy);
266 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
267 * @cpu: CPU to find the policy for.
269 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
270 * if the policy returned by it is not NULL, acquire its rwsem for writing.
271 * Return the policy if it is active or release it and return NULL otherwise.
273 * The policy returned by this function has to be released with the help of
274 * cpufreq_cpu_release() in order to release its rwsem and balance its usage
277 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
279 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
284 down_write(&policy->rwsem);
286 if (policy_is_inactive(policy)) {
287 cpufreq_cpu_release(policy);
294 /*********************************************************************
295 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
296 *********************************************************************/
299 * adjust_jiffies - Adjust the system "loops_per_jiffy".
300 * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
301 * @ci: Frequency change information.
303 * This function alters the system "loops_per_jiffy" for the clock
304 * speed change. Note that loops_per_jiffy cannot be updated on SMP
305 * systems as each CPU might be scaled differently. So, use the arch
306 * per-CPU loops_per_jiffy value wherever possible.
308 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
311 static unsigned long l_p_j_ref;
312 static unsigned int l_p_j_ref_freq;
314 if (ci->flags & CPUFREQ_CONST_LOOPS)
317 if (!l_p_j_ref_freq) {
318 l_p_j_ref = loops_per_jiffy;
319 l_p_j_ref_freq = ci->old;
320 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
321 l_p_j_ref, l_p_j_ref_freq);
323 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
324 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
326 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
327 loops_per_jiffy, ci->new);
333 * cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
334 * @policy: cpufreq policy to enable fast frequency switching for.
335 * @freqs: contain details of the frequency update.
336 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
338 * This function calls the transition notifiers and adjust_jiffies().
340 * It is called twice on all CPU frequency changes that have external effects.
342 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
343 struct cpufreq_freqs *freqs,
348 BUG_ON(irqs_disabled());
350 if (cpufreq_disabled())
353 freqs->policy = policy;
354 freqs->flags = cpufreq_driver->flags;
355 pr_debug("notification %u of frequency transition to %u kHz\n",
359 case CPUFREQ_PRECHANGE:
361 * Detect if the driver reported a value as "old frequency"
362 * which is not equal to what the cpufreq core thinks is
365 if (policy->cur && policy->cur != freqs->old) {
366 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
367 freqs->old, policy->cur);
368 freqs->old = policy->cur;
371 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
372 CPUFREQ_PRECHANGE, freqs);
374 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
377 case CPUFREQ_POSTCHANGE:
378 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
379 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
380 cpumask_pr_args(policy->cpus));
382 for_each_cpu(cpu, policy->cpus)
383 trace_cpu_frequency(freqs->new, cpu);
385 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
386 CPUFREQ_POSTCHANGE, freqs);
388 cpufreq_stats_record_transition(policy, freqs->new);
389 policy->cur = freqs->new;
393 /* Do post notifications when there are chances that transition has failed */
394 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
395 struct cpufreq_freqs *freqs, int transition_failed)
397 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
398 if (!transition_failed)
401 swap(freqs->old, freqs->new);
402 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
403 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
406 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
407 struct cpufreq_freqs *freqs)
411 * Catch double invocations of _begin() which lead to self-deadlock.
412 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
413 * doesn't invoke _begin() on their behalf, and hence the chances of
414 * double invocations are very low. Moreover, there are scenarios
415 * where these checks can emit false-positive warnings in these
416 * drivers; so we avoid that by skipping them altogether.
418 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
419 && current == policy->transition_task);
422 wait_event(policy->transition_wait, !policy->transition_ongoing);
424 spin_lock(&policy->transition_lock);
426 if (unlikely(policy->transition_ongoing)) {
427 spin_unlock(&policy->transition_lock);
431 policy->transition_ongoing = true;
432 policy->transition_task = current;
434 spin_unlock(&policy->transition_lock);
436 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
438 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
440 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
441 struct cpufreq_freqs *freqs, int transition_failed)
443 if (WARN_ON(!policy->transition_ongoing))
446 cpufreq_notify_post_transition(policy, freqs, transition_failed);
448 arch_set_freq_scale(policy->related_cpus,
450 policy->cpuinfo.max_freq);
452 policy->transition_ongoing = false;
453 policy->transition_task = NULL;
455 wake_up(&policy->transition_wait);
457 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
460 * Fast frequency switching status count. Positive means "enabled", negative
461 * means "disabled" and 0 means "not decided yet".
463 static int cpufreq_fast_switch_count;
464 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
466 static void cpufreq_list_transition_notifiers(void)
468 struct notifier_block *nb;
470 pr_info("Registered transition notifiers:\n");
472 mutex_lock(&cpufreq_transition_notifier_list.mutex);
474 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
475 pr_info("%pS\n", nb->notifier_call);
477 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
481 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
482 * @policy: cpufreq policy to enable fast frequency switching for.
484 * Try to enable fast frequency switching for @policy.
486 * The attempt will fail if there is at least one transition notifier registered
487 * at this point, as fast frequency switching is quite fundamentally at odds
488 * with transition notifiers. Thus if successful, it will make registration of
489 * transition notifiers fail going forward.
491 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
493 lockdep_assert_held(&policy->rwsem);
495 if (!policy->fast_switch_possible)
498 mutex_lock(&cpufreq_fast_switch_lock);
499 if (cpufreq_fast_switch_count >= 0) {
500 cpufreq_fast_switch_count++;
501 policy->fast_switch_enabled = true;
503 pr_warn("CPU%u: Fast frequency switching not enabled\n",
505 cpufreq_list_transition_notifiers();
507 mutex_unlock(&cpufreq_fast_switch_lock);
509 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
512 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
513 * @policy: cpufreq policy to disable fast frequency switching for.
515 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
517 mutex_lock(&cpufreq_fast_switch_lock);
518 if (policy->fast_switch_enabled) {
519 policy->fast_switch_enabled = false;
520 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
521 cpufreq_fast_switch_count--;
523 mutex_unlock(&cpufreq_fast_switch_lock);
525 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
527 static unsigned int __resolve_freq(struct cpufreq_policy *policy,
528 unsigned int target_freq, unsigned int relation)
530 target_freq = clamp_val(target_freq, policy->min, policy->max);
531 policy->cached_target_freq = target_freq;
533 if (cpufreq_driver->target_index) {
536 idx = cpufreq_frequency_table_target(policy, target_freq,
538 policy->cached_resolved_idx = idx;
539 return policy->freq_table[idx].frequency;
542 if (cpufreq_driver->resolve_freq)
543 return cpufreq_driver->resolve_freq(policy, target_freq);
549 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
551 * @policy: associated policy to interrogate
552 * @target_freq: target frequency to resolve.
554 * The target to driver frequency mapping is cached in the policy.
556 * Return: Lowest driver-supported frequency greater than or equal to the
557 * given target_freq, subject to policy (min/max) and driver limitations.
559 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
560 unsigned int target_freq)
562 return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_L);
564 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
566 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
568 unsigned int latency;
570 if (policy->transition_delay_us)
571 return policy->transition_delay_us;
573 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
576 * For platforms that can change the frequency very fast (< 10
577 * us), the above formula gives a decent transition delay. But
578 * for platforms where transition_latency is in milliseconds, it
579 * ends up giving unrealistic values.
581 * Cap the default transition delay to 10 ms, which seems to be
582 * a reasonable amount of time after which we should reevaluate
585 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
588 return LATENCY_MULTIPLIER;
590 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
592 /*********************************************************************
594 *********************************************************************/
595 static ssize_t show_boost(struct kobject *kobj,
596 struct kobj_attribute *attr, char *buf)
598 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
601 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
602 const char *buf, size_t count)
606 ret = sscanf(buf, "%d", &enable);
607 if (ret != 1 || enable < 0 || enable > 1)
610 if (cpufreq_boost_trigger_state(enable)) {
611 pr_err("%s: Cannot %s BOOST!\n",
612 __func__, enable ? "enable" : "disable");
616 pr_debug("%s: cpufreq BOOST %s\n",
617 __func__, enable ? "enabled" : "disabled");
621 define_one_global_rw(boost);
623 static struct cpufreq_governor *find_governor(const char *str_governor)
625 struct cpufreq_governor *t;
628 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
634 static struct cpufreq_governor *get_governor(const char *str_governor)
636 struct cpufreq_governor *t;
638 mutex_lock(&cpufreq_governor_mutex);
639 t = find_governor(str_governor);
643 if (!try_module_get(t->owner))
647 mutex_unlock(&cpufreq_governor_mutex);
652 static unsigned int cpufreq_parse_policy(char *str_governor)
654 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
655 return CPUFREQ_POLICY_PERFORMANCE;
657 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
658 return CPUFREQ_POLICY_POWERSAVE;
660 return CPUFREQ_POLICY_UNKNOWN;
664 * cpufreq_parse_governor - parse a governor string only for has_target()
665 * @str_governor: Governor name.
667 static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
669 struct cpufreq_governor *t;
671 t = get_governor(str_governor);
675 if (request_module("cpufreq_%s", str_governor))
678 return get_governor(str_governor);
682 * cpufreq_per_cpu_attr_read() / show_##file_name() -
683 * print out cpufreq information
685 * Write out information from cpufreq_driver->policy[cpu]; object must be
689 #define show_one(file_name, object) \
690 static ssize_t show_##file_name \
691 (struct cpufreq_policy *policy, char *buf) \
693 return sprintf(buf, "%u\n", policy->object); \
696 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
697 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
698 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
699 show_one(scaling_min_freq, min);
700 show_one(scaling_max_freq, max);
702 __weak unsigned int arch_freq_get_on_cpu(int cpu)
707 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
712 freq = arch_freq_get_on_cpu(policy->cpu);
714 ret = sprintf(buf, "%u\n", freq);
715 else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
716 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
718 ret = sprintf(buf, "%u\n", policy->cur);
723 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
725 #define store_one(file_name, object) \
726 static ssize_t store_##file_name \
727 (struct cpufreq_policy *policy, const char *buf, size_t count) \
732 ret = sscanf(buf, "%lu", &val); \
736 ret = freq_qos_update_request(policy->object##_freq_req, val);\
737 return ret >= 0 ? count : ret; \
740 store_one(scaling_min_freq, min);
741 store_one(scaling_max_freq, max);
744 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
746 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
749 unsigned int cur_freq = __cpufreq_get(policy);
752 return sprintf(buf, "%u\n", cur_freq);
754 return sprintf(buf, "<unknown>\n");
758 * show_scaling_governor - show the current policy for the specified CPU
760 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
762 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
763 return sprintf(buf, "powersave\n");
764 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
765 return sprintf(buf, "performance\n");
766 else if (policy->governor)
767 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
768 policy->governor->name);
773 * store_scaling_governor - store policy for the specified CPU
775 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
776 const char *buf, size_t count)
778 char str_governor[16];
781 ret = sscanf(buf, "%15s", str_governor);
785 if (cpufreq_driver->setpolicy) {
786 unsigned int new_pol;
788 new_pol = cpufreq_parse_policy(str_governor);
792 ret = cpufreq_set_policy(policy, NULL, new_pol);
794 struct cpufreq_governor *new_gov;
796 new_gov = cpufreq_parse_governor(str_governor);
800 ret = cpufreq_set_policy(policy, new_gov,
801 CPUFREQ_POLICY_UNKNOWN);
803 module_put(new_gov->owner);
806 return ret ? ret : count;
810 * show_scaling_driver - show the cpufreq driver currently loaded
812 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
814 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
818 * show_scaling_available_governors - show the available CPUfreq governors
820 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
824 struct cpufreq_governor *t;
827 i += sprintf(buf, "performance powersave");
831 mutex_lock(&cpufreq_governor_mutex);
832 for_each_governor(t) {
833 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
834 - (CPUFREQ_NAME_LEN + 2)))
836 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
838 mutex_unlock(&cpufreq_governor_mutex);
840 i += sprintf(&buf[i], "\n");
844 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
849 for_each_cpu(cpu, mask) {
851 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
852 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
853 if (i >= (PAGE_SIZE - 5))
856 i += sprintf(&buf[i], "\n");
859 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
862 * show_related_cpus - show the CPUs affected by each transition even if
863 * hw coordination is in use
865 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
867 return cpufreq_show_cpus(policy->related_cpus, buf);
871 * show_affected_cpus - show the CPUs affected by each transition
873 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
875 return cpufreq_show_cpus(policy->cpus, buf);
878 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
879 const char *buf, size_t count)
881 unsigned int freq = 0;
884 if (!policy->governor || !policy->governor->store_setspeed)
887 ret = sscanf(buf, "%u", &freq);
891 policy->governor->store_setspeed(policy, freq);
896 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
898 if (!policy->governor || !policy->governor->show_setspeed)
899 return sprintf(buf, "<unsupported>\n");
901 return policy->governor->show_setspeed(policy, buf);
905 * show_bios_limit - show the current cpufreq HW/BIOS limitation
907 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
911 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
913 return sprintf(buf, "%u\n", limit);
914 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
917 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
918 cpufreq_freq_attr_ro(cpuinfo_min_freq);
919 cpufreq_freq_attr_ro(cpuinfo_max_freq);
920 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
921 cpufreq_freq_attr_ro(scaling_available_governors);
922 cpufreq_freq_attr_ro(scaling_driver);
923 cpufreq_freq_attr_ro(scaling_cur_freq);
924 cpufreq_freq_attr_ro(bios_limit);
925 cpufreq_freq_attr_ro(related_cpus);
926 cpufreq_freq_attr_ro(affected_cpus);
927 cpufreq_freq_attr_rw(scaling_min_freq);
928 cpufreq_freq_attr_rw(scaling_max_freq);
929 cpufreq_freq_attr_rw(scaling_governor);
930 cpufreq_freq_attr_rw(scaling_setspeed);
932 static struct attribute *default_attrs[] = {
933 &cpuinfo_min_freq.attr,
934 &cpuinfo_max_freq.attr,
935 &cpuinfo_transition_latency.attr,
936 &scaling_min_freq.attr,
937 &scaling_max_freq.attr,
940 &scaling_governor.attr,
941 &scaling_driver.attr,
942 &scaling_available_governors.attr,
943 &scaling_setspeed.attr,
947 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
948 #define to_attr(a) container_of(a, struct freq_attr, attr)
950 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
952 struct cpufreq_policy *policy = to_policy(kobj);
953 struct freq_attr *fattr = to_attr(attr);
959 down_read(&policy->rwsem);
960 ret = fattr->show(policy, buf);
961 up_read(&policy->rwsem);
966 static ssize_t store(struct kobject *kobj, struct attribute *attr,
967 const char *buf, size_t count)
969 struct cpufreq_policy *policy = to_policy(kobj);
970 struct freq_attr *fattr = to_attr(attr);
971 ssize_t ret = -EINVAL;
977 * cpus_read_trylock() is used here to work around a circular lock
978 * dependency problem with respect to the cpufreq_register_driver().
980 if (!cpus_read_trylock())
983 if (cpu_online(policy->cpu)) {
984 down_write(&policy->rwsem);
985 ret = fattr->store(policy, buf, count);
986 up_write(&policy->rwsem);
994 static void cpufreq_sysfs_release(struct kobject *kobj)
996 struct cpufreq_policy *policy = to_policy(kobj);
997 pr_debug("last reference is dropped\n");
998 complete(&policy->kobj_unregister);
1001 static const struct sysfs_ops sysfs_ops = {
1006 static struct kobj_type ktype_cpufreq = {
1007 .sysfs_ops = &sysfs_ops,
1008 .default_attrs = default_attrs,
1009 .release = cpufreq_sysfs_release,
1012 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
1014 struct device *dev = get_cpu_device(cpu);
1019 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1022 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1023 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1024 dev_err(dev, "cpufreq symlink creation failed\n");
1027 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1030 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1031 sysfs_remove_link(&dev->kobj, "cpufreq");
1034 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1036 struct freq_attr **drv_attr;
1039 /* set up files for this cpu device */
1040 drv_attr = cpufreq_driver->attr;
1041 while (drv_attr && *drv_attr) {
1042 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1047 if (cpufreq_driver->get) {
1048 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1053 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1057 if (cpufreq_driver->bios_limit) {
1058 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1066 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1068 struct cpufreq_governor *gov = NULL;
1069 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1073 /* Update policy governor to the one used before hotplug. */
1074 gov = get_governor(policy->last_governor);
1076 pr_debug("Restoring governor %s for cpu %d\n",
1077 gov->name, policy->cpu);
1079 gov = get_governor(default_governor);
1083 gov = cpufreq_default_governor();
1084 __module_get(gov->owner);
1089 /* Use the default policy if there is no last_policy. */
1090 if (policy->last_policy) {
1091 pol = policy->last_policy;
1093 pol = cpufreq_parse_policy(default_governor);
1095 * In case the default governor is neither "performance"
1096 * nor "powersave", fall back to the initial policy
1097 * value set by the driver.
1099 if (pol == CPUFREQ_POLICY_UNKNOWN)
1100 pol = policy->policy;
1102 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1103 pol != CPUFREQ_POLICY_POWERSAVE)
1107 ret = cpufreq_set_policy(policy, gov, pol);
1109 module_put(gov->owner);
1114 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1118 /* Has this CPU been taken care of already? */
1119 if (cpumask_test_cpu(cpu, policy->cpus))
1122 down_write(&policy->rwsem);
1124 cpufreq_stop_governor(policy);
1126 cpumask_set_cpu(cpu, policy->cpus);
1129 ret = cpufreq_start_governor(policy);
1131 pr_err("%s: Failed to start governor\n", __func__);
1133 up_write(&policy->rwsem);
1137 void refresh_frequency_limits(struct cpufreq_policy *policy)
1139 if (!policy_is_inactive(policy)) {
1140 pr_debug("updating policy for CPU %u\n", policy->cpu);
1142 cpufreq_set_policy(policy, policy->governor, policy->policy);
1145 EXPORT_SYMBOL(refresh_frequency_limits);
1147 static void handle_update(struct work_struct *work)
1149 struct cpufreq_policy *policy =
1150 container_of(work, struct cpufreq_policy, update);
1152 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1153 down_write(&policy->rwsem);
1154 refresh_frequency_limits(policy);
1155 up_write(&policy->rwsem);
1158 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1161 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1163 schedule_work(&policy->update);
1167 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1170 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1172 schedule_work(&policy->update);
1176 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1178 struct kobject *kobj;
1179 struct completion *cmp;
1181 down_write(&policy->rwsem);
1182 cpufreq_stats_free_table(policy);
1183 kobj = &policy->kobj;
1184 cmp = &policy->kobj_unregister;
1185 up_write(&policy->rwsem);
1189 * We need to make sure that the underlying kobj is
1190 * actually not referenced anymore by anybody before we
1191 * proceed with unloading.
1193 pr_debug("waiting for dropping of refcount\n");
1194 wait_for_completion(cmp);
1195 pr_debug("wait complete\n");
1198 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1200 struct cpufreq_policy *policy;
1201 struct device *dev = get_cpu_device(cpu);
1207 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1211 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1212 goto err_free_policy;
1214 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1215 goto err_free_cpumask;
1217 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1218 goto err_free_rcpumask;
1220 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1221 cpufreq_global_kobject, "policy%u", cpu);
1223 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1225 * The entire policy object will be freed below, but the extra
1226 * memory allocated for the kobject name needs to be freed by
1227 * releasing the kobject.
1229 kobject_put(&policy->kobj);
1230 goto err_free_real_cpus;
1233 freq_constraints_init(&policy->constraints);
1235 policy->nb_min.notifier_call = cpufreq_notifier_min;
1236 policy->nb_max.notifier_call = cpufreq_notifier_max;
1238 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1241 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1242 ret, cpumask_pr_args(policy->cpus));
1243 goto err_kobj_remove;
1246 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1249 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1250 ret, cpumask_pr_args(policy->cpus));
1251 goto err_min_qos_notifier;
1254 INIT_LIST_HEAD(&policy->policy_list);
1255 init_rwsem(&policy->rwsem);
1256 spin_lock_init(&policy->transition_lock);
1257 init_waitqueue_head(&policy->transition_wait);
1258 init_completion(&policy->kobj_unregister);
1259 INIT_WORK(&policy->update, handle_update);
1264 err_min_qos_notifier:
1265 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1268 cpufreq_policy_put_kobj(policy);
1270 free_cpumask_var(policy->real_cpus);
1272 free_cpumask_var(policy->related_cpus);
1274 free_cpumask_var(policy->cpus);
1281 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1283 unsigned long flags;
1286 /* Remove policy from list */
1287 write_lock_irqsave(&cpufreq_driver_lock, flags);
1288 list_del(&policy->policy_list);
1290 for_each_cpu(cpu, policy->related_cpus)
1291 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1292 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1294 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1296 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1299 /* Cancel any pending policy->update work before freeing the policy. */
1300 cancel_work_sync(&policy->update);
1302 if (policy->max_freq_req) {
1304 * CPUFREQ_CREATE_POLICY notification is sent only after
1305 * successfully adding max_freq_req request.
1307 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1308 CPUFREQ_REMOVE_POLICY, policy);
1309 freq_qos_remove_request(policy->max_freq_req);
1312 freq_qos_remove_request(policy->min_freq_req);
1313 kfree(policy->min_freq_req);
1315 cpufreq_policy_put_kobj(policy);
1316 free_cpumask_var(policy->real_cpus);
1317 free_cpumask_var(policy->related_cpus);
1318 free_cpumask_var(policy->cpus);
1322 static int cpufreq_online(unsigned int cpu)
1324 struct cpufreq_policy *policy;
1326 unsigned long flags;
1330 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1332 /* Check if this CPU already has a policy to manage it */
1333 policy = per_cpu(cpufreq_cpu_data, cpu);
1335 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1336 if (!policy_is_inactive(policy))
1337 return cpufreq_add_policy_cpu(policy, cpu);
1339 /* This is the only online CPU for the policy. Start over. */
1341 down_write(&policy->rwsem);
1343 policy->governor = NULL;
1344 up_write(&policy->rwsem);
1347 policy = cpufreq_policy_alloc(cpu);
1352 if (!new_policy && cpufreq_driver->online) {
1353 ret = cpufreq_driver->online(policy);
1355 pr_debug("%s: %d: initialization failed\n", __func__,
1357 goto out_exit_policy;
1360 /* Recover policy->cpus using related_cpus */
1361 cpumask_copy(policy->cpus, policy->related_cpus);
1363 cpumask_copy(policy->cpus, cpumask_of(cpu));
1366 * Call driver. From then on the cpufreq must be able
1367 * to accept all calls to ->verify and ->setpolicy for this CPU.
1369 ret = cpufreq_driver->init(policy);
1371 pr_debug("%s: %d: initialization failed\n", __func__,
1373 goto out_free_policy;
1377 * The initialization has succeeded and the policy is online.
1378 * If there is a problem with its frequency table, take it
1379 * offline and drop it.
1381 ret = cpufreq_table_validate_and_sort(policy);
1383 goto out_offline_policy;
1385 /* related_cpus should at least include policy->cpus. */
1386 cpumask_copy(policy->related_cpus, policy->cpus);
1389 down_write(&policy->rwsem);
1391 * affected cpus must always be the one, which are online. We aren't
1392 * managing offline cpus here.
1394 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1397 for_each_cpu(j, policy->related_cpus) {
1398 per_cpu(cpufreq_cpu_data, j) = policy;
1399 add_cpu_dev_symlink(policy, j);
1402 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1404 if (!policy->min_freq_req) {
1406 goto out_destroy_policy;
1409 ret = freq_qos_add_request(&policy->constraints,
1410 policy->min_freq_req, FREQ_QOS_MIN,
1414 * So we don't call freq_qos_remove_request() for an
1415 * uninitialized request.
1417 kfree(policy->min_freq_req);
1418 policy->min_freq_req = NULL;
1419 goto out_destroy_policy;
1423 * This must be initialized right here to avoid calling
1424 * freq_qos_remove_request() on uninitialized request in case
1427 policy->max_freq_req = policy->min_freq_req + 1;
1429 ret = freq_qos_add_request(&policy->constraints,
1430 policy->max_freq_req, FREQ_QOS_MAX,
1433 policy->max_freq_req = NULL;
1434 goto out_destroy_policy;
1437 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1438 CPUFREQ_CREATE_POLICY, policy);
1441 if (cpufreq_driver->get && has_target()) {
1442 policy->cur = cpufreq_driver->get(policy->cpu);
1445 pr_err("%s: ->get() failed\n", __func__);
1446 goto out_destroy_policy;
1451 * Sometimes boot loaders set CPU frequency to a value outside of
1452 * frequency table present with cpufreq core. In such cases CPU might be
1453 * unstable if it has to run on that frequency for long duration of time
1454 * and so its better to set it to a frequency which is specified in
1455 * freq-table. This also makes cpufreq stats inconsistent as
1456 * cpufreq-stats would fail to register because current frequency of CPU
1457 * isn't found in freq-table.
1459 * Because we don't want this change to effect boot process badly, we go
1460 * for the next freq which is >= policy->cur ('cur' must be set by now,
1461 * otherwise we will end up setting freq to lowest of the table as 'cur'
1462 * is initialized to zero).
1464 * We are passing target-freq as "policy->cur - 1" otherwise
1465 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1466 * equal to target-freq.
1468 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1470 unsigned int old_freq = policy->cur;
1472 /* Are we running at unknown frequency ? */
1473 ret = cpufreq_frequency_table_get_index(policy, old_freq);
1474 if (ret == -EINVAL) {
1475 ret = __cpufreq_driver_target(policy, old_freq - 1,
1476 CPUFREQ_RELATION_L);
1479 * Reaching here after boot in a few seconds may not
1480 * mean that system will remain stable at "unknown"
1481 * frequency for longer duration. Hence, a BUG_ON().
1484 pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1485 __func__, policy->cpu, old_freq, policy->cur);
1490 ret = cpufreq_add_dev_interface(policy);
1492 goto out_destroy_policy;
1494 cpufreq_stats_create_table(policy);
1496 write_lock_irqsave(&cpufreq_driver_lock, flags);
1497 list_add(&policy->policy_list, &cpufreq_policy_list);
1498 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1501 ret = cpufreq_init_policy(policy);
1503 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1504 __func__, cpu, ret);
1505 goto out_destroy_policy;
1508 up_write(&policy->rwsem);
1510 kobject_uevent(&policy->kobj, KOBJ_ADD);
1512 /* Callback for handling stuff after policy is ready */
1513 if (cpufreq_driver->ready)
1514 cpufreq_driver->ready(policy);
1516 if (cpufreq_thermal_control_enabled(cpufreq_driver))
1517 policy->cdev = of_cpufreq_cooling_register(policy);
1519 pr_debug("initialization complete\n");
1524 for_each_cpu(j, policy->real_cpus)
1525 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1527 up_write(&policy->rwsem);
1530 if (cpufreq_driver->offline)
1531 cpufreq_driver->offline(policy);
1534 if (cpufreq_driver->exit)
1535 cpufreq_driver->exit(policy);
1538 cpufreq_policy_free(policy);
1543 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1545 * @sif: Subsystem interface structure pointer (not used)
1547 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1549 struct cpufreq_policy *policy;
1550 unsigned cpu = dev->id;
1553 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1555 if (cpu_online(cpu)) {
1556 ret = cpufreq_online(cpu);
1561 /* Create sysfs link on CPU registration */
1562 policy = per_cpu(cpufreq_cpu_data, cpu);
1564 add_cpu_dev_symlink(policy, cpu);
1569 static int cpufreq_offline(unsigned int cpu)
1571 struct cpufreq_policy *policy;
1574 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1576 policy = cpufreq_cpu_get_raw(cpu);
1578 pr_debug("%s: No cpu_data found\n", __func__);
1582 down_write(&policy->rwsem);
1584 cpufreq_stop_governor(policy);
1586 cpumask_clear_cpu(cpu, policy->cpus);
1588 if (policy_is_inactive(policy)) {
1590 strncpy(policy->last_governor, policy->governor->name,
1593 policy->last_policy = policy->policy;
1594 } else if (cpu == policy->cpu) {
1595 /* Nominate new CPU */
1596 policy->cpu = cpumask_any(policy->cpus);
1599 /* Start governor again for active policy */
1600 if (!policy_is_inactive(policy)) {
1602 ret = cpufreq_start_governor(policy);
1604 pr_err("%s: Failed to start governor\n", __func__);
1610 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1611 cpufreq_cooling_unregister(policy->cdev);
1612 policy->cdev = NULL;
1616 cpufreq_exit_governor(policy);
1619 * Perform the ->offline() during light-weight tear-down, as
1620 * that allows fast recovery when the CPU comes back.
1622 if (cpufreq_driver->offline) {
1623 cpufreq_driver->offline(policy);
1624 } else if (cpufreq_driver->exit) {
1625 cpufreq_driver->exit(policy);
1626 policy->freq_table = NULL;
1630 up_write(&policy->rwsem);
1635 * cpufreq_remove_dev - remove a CPU device
1637 * Removes the cpufreq interface for a CPU device.
1639 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1641 unsigned int cpu = dev->id;
1642 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1647 if (cpu_online(cpu))
1648 cpufreq_offline(cpu);
1650 cpumask_clear_cpu(cpu, policy->real_cpus);
1651 remove_cpu_dev_symlink(policy, dev);
1653 if (cpumask_empty(policy->real_cpus)) {
1654 /* We did light-weight exit earlier, do full tear down now */
1655 if (cpufreq_driver->offline)
1656 cpufreq_driver->exit(policy);
1658 cpufreq_policy_free(policy);
1663 * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1664 * @policy: Policy managing CPUs.
1665 * @new_freq: New CPU frequency.
1667 * Adjust to the current frequency first and clean up later by either calling
1668 * cpufreq_update_policy(), or scheduling handle_update().
1670 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1671 unsigned int new_freq)
1673 struct cpufreq_freqs freqs;
1675 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1676 policy->cur, new_freq);
1678 freqs.old = policy->cur;
1679 freqs.new = new_freq;
1681 cpufreq_freq_transition_begin(policy, &freqs);
1682 cpufreq_freq_transition_end(policy, &freqs, 0);
1685 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1687 unsigned int new_freq;
1689 new_freq = cpufreq_driver->get(policy->cpu);
1694 * If fast frequency switching is used with the given policy, the check
1695 * against policy->cur is pointless, so skip it in that case.
1697 if (policy->fast_switch_enabled || !has_target())
1700 if (policy->cur != new_freq) {
1701 cpufreq_out_of_sync(policy, new_freq);
1703 schedule_work(&policy->update);
1710 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1713 * This is the last known freq, without actually getting it from the driver.
1714 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1716 unsigned int cpufreq_quick_get(unsigned int cpu)
1718 struct cpufreq_policy *policy;
1719 unsigned int ret_freq = 0;
1720 unsigned long flags;
1722 read_lock_irqsave(&cpufreq_driver_lock, flags);
1724 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1725 ret_freq = cpufreq_driver->get(cpu);
1726 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1730 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1732 policy = cpufreq_cpu_get(cpu);
1734 ret_freq = policy->cur;
1735 cpufreq_cpu_put(policy);
1740 EXPORT_SYMBOL(cpufreq_quick_get);
1743 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1746 * Just return the max possible frequency for a given CPU.
1748 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1750 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1751 unsigned int ret_freq = 0;
1754 ret_freq = policy->max;
1755 cpufreq_cpu_put(policy);
1760 EXPORT_SYMBOL(cpufreq_quick_get_max);
1763 * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1766 * The default return value is the max_freq field of cpuinfo.
1768 __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1770 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1771 unsigned int ret_freq = 0;
1774 ret_freq = policy->cpuinfo.max_freq;
1775 cpufreq_cpu_put(policy);
1780 EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1782 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1784 if (unlikely(policy_is_inactive(policy)))
1787 return cpufreq_verify_current_freq(policy, true);
1791 * cpufreq_get - get the current CPU frequency (in kHz)
1794 * Get the CPU current (static) CPU frequency
1796 unsigned int cpufreq_get(unsigned int cpu)
1798 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1799 unsigned int ret_freq = 0;
1802 down_read(&policy->rwsem);
1803 if (cpufreq_driver->get)
1804 ret_freq = __cpufreq_get(policy);
1805 up_read(&policy->rwsem);
1807 cpufreq_cpu_put(policy);
1812 EXPORT_SYMBOL(cpufreq_get);
1814 static struct subsys_interface cpufreq_interface = {
1816 .subsys = &cpu_subsys,
1817 .add_dev = cpufreq_add_dev,
1818 .remove_dev = cpufreq_remove_dev,
1822 * In case platform wants some specific frequency to be configured
1825 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1829 if (!policy->suspend_freq) {
1830 pr_debug("%s: suspend_freq not defined\n", __func__);
1834 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1835 policy->suspend_freq);
1837 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1838 CPUFREQ_RELATION_H);
1840 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1841 __func__, policy->suspend_freq, ret);
1845 EXPORT_SYMBOL(cpufreq_generic_suspend);
1848 * cpufreq_suspend() - Suspend CPUFreq governors.
1850 * Called during system wide Suspend/Hibernate cycles for suspending governors
1851 * as some platforms can't change frequency after this point in suspend cycle.
1852 * Because some of the devices (like: i2c, regulators, etc) they use for
1853 * changing frequency are suspended quickly after this point.
1855 void cpufreq_suspend(void)
1857 struct cpufreq_policy *policy;
1859 if (!cpufreq_driver)
1862 if (!has_target() && !cpufreq_driver->suspend)
1865 pr_debug("%s: Suspending Governors\n", __func__);
1867 for_each_active_policy(policy) {
1869 down_write(&policy->rwsem);
1870 cpufreq_stop_governor(policy);
1871 up_write(&policy->rwsem);
1874 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1875 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1876 cpufreq_driver->name);
1880 cpufreq_suspended = true;
1884 * cpufreq_resume() - Resume CPUFreq governors.
1886 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1887 * are suspended with cpufreq_suspend().
1889 void cpufreq_resume(void)
1891 struct cpufreq_policy *policy;
1894 if (!cpufreq_driver)
1897 if (unlikely(!cpufreq_suspended))
1900 cpufreq_suspended = false;
1902 if (!has_target() && !cpufreq_driver->resume)
1905 pr_debug("%s: Resuming Governors\n", __func__);
1907 for_each_active_policy(policy) {
1908 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1909 pr_err("%s: Failed to resume driver: %p\n", __func__,
1911 } else if (has_target()) {
1912 down_write(&policy->rwsem);
1913 ret = cpufreq_start_governor(policy);
1914 up_write(&policy->rwsem);
1917 pr_err("%s: Failed to start governor for policy: %p\n",
1924 * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
1925 * @flags: Flags to test against the current cpufreq driver's flags.
1927 * Assumes that the driver is there, so callers must ensure that this is the
1930 bool cpufreq_driver_test_flags(u16 flags)
1932 return !!(cpufreq_driver->flags & flags);
1936 * cpufreq_get_current_driver - Return the current driver's name.
1938 * Return the name string of the currently registered cpufreq driver or NULL if
1941 const char *cpufreq_get_current_driver(void)
1944 return cpufreq_driver->name;
1948 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1951 * cpufreq_get_driver_data - Return current driver data.
1953 * Return the private data of the currently registered cpufreq driver, or NULL
1954 * if no cpufreq driver has been registered.
1956 void *cpufreq_get_driver_data(void)
1959 return cpufreq_driver->driver_data;
1963 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1965 /*********************************************************************
1966 * NOTIFIER LISTS INTERFACE *
1967 *********************************************************************/
1970 * cpufreq_register_notifier - Register a notifier with cpufreq.
1971 * @nb: notifier function to register.
1972 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
1974 * Add a notifier to one of two lists: either a list of notifiers that run on
1975 * clock rate changes (once before and once after every transition), or a list
1976 * of notifiers that ron on cpufreq policy changes.
1978 * This function may sleep and it has the same return values as
1979 * blocking_notifier_chain_register().
1981 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1985 if (cpufreq_disabled())
1989 case CPUFREQ_TRANSITION_NOTIFIER:
1990 mutex_lock(&cpufreq_fast_switch_lock);
1992 if (cpufreq_fast_switch_count > 0) {
1993 mutex_unlock(&cpufreq_fast_switch_lock);
1996 ret = srcu_notifier_chain_register(
1997 &cpufreq_transition_notifier_list, nb);
1999 cpufreq_fast_switch_count--;
2001 mutex_unlock(&cpufreq_fast_switch_lock);
2003 case CPUFREQ_POLICY_NOTIFIER:
2004 ret = blocking_notifier_chain_register(
2005 &cpufreq_policy_notifier_list, nb);
2013 EXPORT_SYMBOL(cpufreq_register_notifier);
2016 * cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2017 * @nb: notifier block to be unregistered.
2018 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2020 * Remove a notifier from one of the cpufreq notifier lists.
2022 * This function may sleep and it has the same return values as
2023 * blocking_notifier_chain_unregister().
2025 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2029 if (cpufreq_disabled())
2033 case CPUFREQ_TRANSITION_NOTIFIER:
2034 mutex_lock(&cpufreq_fast_switch_lock);
2036 ret = srcu_notifier_chain_unregister(
2037 &cpufreq_transition_notifier_list, nb);
2038 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2039 cpufreq_fast_switch_count++;
2041 mutex_unlock(&cpufreq_fast_switch_lock);
2043 case CPUFREQ_POLICY_NOTIFIER:
2044 ret = blocking_notifier_chain_unregister(
2045 &cpufreq_policy_notifier_list, nb);
2053 EXPORT_SYMBOL(cpufreq_unregister_notifier);
2056 /*********************************************************************
2058 *********************************************************************/
2061 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2062 * @policy: cpufreq policy to switch the frequency for.
2063 * @target_freq: New frequency to set (may be approximate).
2065 * Carry out a fast frequency switch without sleeping.
2067 * The driver's ->fast_switch() callback invoked by this function must be
2068 * suitable for being called from within RCU-sched read-side critical sections
2069 * and it is expected to select the minimum available frequency greater than or
2070 * equal to @target_freq (CPUFREQ_RELATION_L).
2072 * This function must not be called if policy->fast_switch_enabled is unset.
2074 * Governors calling this function must guarantee that it will never be invoked
2075 * twice in parallel for the same policy and that it will never be called in
2076 * parallel with either ->target() or ->target_index() for the same policy.
2078 * Returns the actual frequency set for the CPU.
2080 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2081 * error condition, the hardware configuration must be preserved.
2083 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2084 unsigned int target_freq)
2089 target_freq = clamp_val(target_freq, policy->min, policy->max);
2090 freq = cpufreq_driver->fast_switch(policy, target_freq);
2096 arch_set_freq_scale(policy->related_cpus, freq,
2097 policy->cpuinfo.max_freq);
2098 cpufreq_stats_record_transition(policy, freq);
2100 if (trace_cpu_frequency_enabled()) {
2101 for_each_cpu(cpu, policy->cpus)
2102 trace_cpu_frequency(freq, cpu);
2107 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2110 * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2112 * @min_perf: Minimum (required) performance level (units of @capacity).
2113 * @target_perf: Target (desired) performance level (units of @capacity).
2114 * @capacity: Capacity of the target CPU.
2116 * Carry out a fast performance level switch of @cpu without sleeping.
2118 * The driver's ->adjust_perf() callback invoked by this function must be
2119 * suitable for being called from within RCU-sched read-side critical sections
2120 * and it is expected to select a suitable performance level equal to or above
2121 * @min_perf and preferably equal to or below @target_perf.
2123 * This function must not be called if policy->fast_switch_enabled is unset.
2125 * Governors calling this function must guarantee that it will never be invoked
2126 * twice in parallel for the same CPU and that it will never be called in
2127 * parallel with either ->target() or ->target_index() or ->fast_switch() for
2130 void cpufreq_driver_adjust_perf(unsigned int cpu,
2131 unsigned long min_perf,
2132 unsigned long target_perf,
2133 unsigned long capacity)
2135 cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2139 * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2141 * Return 'true' if the ->adjust_perf callback is present for the
2142 * current driver or 'false' otherwise.
2144 bool cpufreq_driver_has_adjust_perf(void)
2146 return !!cpufreq_driver->adjust_perf;
2149 /* Must set freqs->new to intermediate frequency */
2150 static int __target_intermediate(struct cpufreq_policy *policy,
2151 struct cpufreq_freqs *freqs, int index)
2155 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2157 /* We don't need to switch to intermediate freq */
2161 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2162 __func__, policy->cpu, freqs->old, freqs->new);
2164 cpufreq_freq_transition_begin(policy, freqs);
2165 ret = cpufreq_driver->target_intermediate(policy, index);
2166 cpufreq_freq_transition_end(policy, freqs, ret);
2169 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2175 static int __target_index(struct cpufreq_policy *policy, int index)
2177 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2178 unsigned int restore_freq, intermediate_freq = 0;
2179 unsigned int newfreq = policy->freq_table[index].frequency;
2180 int retval = -EINVAL;
2183 if (newfreq == policy->cur)
2186 /* Save last value to restore later on errors */
2187 restore_freq = policy->cur;
2189 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2191 /* Handle switching to intermediate frequency */
2192 if (cpufreq_driver->get_intermediate) {
2193 retval = __target_intermediate(policy, &freqs, index);
2197 intermediate_freq = freqs.new;
2198 /* Set old freq to intermediate */
2199 if (intermediate_freq)
2200 freqs.old = freqs.new;
2203 freqs.new = newfreq;
2204 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2205 __func__, policy->cpu, freqs.old, freqs.new);
2207 cpufreq_freq_transition_begin(policy, &freqs);
2210 retval = cpufreq_driver->target_index(policy, index);
2212 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2216 cpufreq_freq_transition_end(policy, &freqs, retval);
2219 * Failed after setting to intermediate freq? Driver should have
2220 * reverted back to initial frequency and so should we. Check
2221 * here for intermediate_freq instead of get_intermediate, in
2222 * case we haven't switched to intermediate freq at all.
2224 if (unlikely(retval && intermediate_freq)) {
2225 freqs.old = intermediate_freq;
2226 freqs.new = restore_freq;
2227 cpufreq_freq_transition_begin(policy, &freqs);
2228 cpufreq_freq_transition_end(policy, &freqs, 0);
2235 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2236 unsigned int target_freq,
2237 unsigned int relation)
2239 unsigned int old_target_freq = target_freq;
2241 if (cpufreq_disabled())
2244 target_freq = __resolve_freq(policy, target_freq, relation);
2246 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2247 policy->cpu, target_freq, relation, old_target_freq);
2250 * This might look like a redundant call as we are checking it again
2251 * after finding index. But it is left intentionally for cases where
2252 * exactly same freq is called again and so we can save on few function
2255 if (target_freq == policy->cur &&
2256 !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2259 if (cpufreq_driver->target)
2260 return cpufreq_driver->target(policy, target_freq, relation);
2262 if (!cpufreq_driver->target_index)
2265 return __target_index(policy, policy->cached_resolved_idx);
2267 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2269 int cpufreq_driver_target(struct cpufreq_policy *policy,
2270 unsigned int target_freq,
2271 unsigned int relation)
2275 down_write(&policy->rwsem);
2277 ret = __cpufreq_driver_target(policy, target_freq, relation);
2279 up_write(&policy->rwsem);
2283 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2285 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2290 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2294 /* Don't start any governor operations if we are entering suspend */
2295 if (cpufreq_suspended)
2298 * Governor might not be initiated here if ACPI _PPC changed
2299 * notification happened, so check it.
2301 if (!policy->governor)
2304 /* Platform doesn't want dynamic frequency switching ? */
2305 if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2306 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2307 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2310 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2311 policy->governor->name, gov->name);
2312 policy->governor = gov;
2318 if (!try_module_get(policy->governor->owner))
2321 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2323 if (policy->governor->init) {
2324 ret = policy->governor->init(policy);
2326 module_put(policy->governor->owner);
2331 policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2336 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2338 if (cpufreq_suspended || !policy->governor)
2341 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2343 if (policy->governor->exit)
2344 policy->governor->exit(policy);
2346 module_put(policy->governor->owner);
2349 int cpufreq_start_governor(struct cpufreq_policy *policy)
2353 if (cpufreq_suspended)
2356 if (!policy->governor)
2359 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2361 if (cpufreq_driver->get)
2362 cpufreq_verify_current_freq(policy, false);
2364 if (policy->governor->start) {
2365 ret = policy->governor->start(policy);
2370 if (policy->governor->limits)
2371 policy->governor->limits(policy);
2376 void cpufreq_stop_governor(struct cpufreq_policy *policy)
2378 if (cpufreq_suspended || !policy->governor)
2381 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2383 if (policy->governor->stop)
2384 policy->governor->stop(policy);
2387 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2389 if (cpufreq_suspended || !policy->governor)
2392 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2394 if (policy->governor->limits)
2395 policy->governor->limits(policy);
2398 int cpufreq_register_governor(struct cpufreq_governor *governor)
2405 if (cpufreq_disabled())
2408 mutex_lock(&cpufreq_governor_mutex);
2411 if (!find_governor(governor->name)) {
2413 list_add(&governor->governor_list, &cpufreq_governor_list);
2416 mutex_unlock(&cpufreq_governor_mutex);
2419 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2421 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2423 struct cpufreq_policy *policy;
2424 unsigned long flags;
2429 if (cpufreq_disabled())
2432 /* clear last_governor for all inactive policies */
2433 read_lock_irqsave(&cpufreq_driver_lock, flags);
2434 for_each_inactive_policy(policy) {
2435 if (!strcmp(policy->last_governor, governor->name)) {
2436 policy->governor = NULL;
2437 strcpy(policy->last_governor, "\0");
2440 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2442 mutex_lock(&cpufreq_governor_mutex);
2443 list_del(&governor->governor_list);
2444 mutex_unlock(&cpufreq_governor_mutex);
2446 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2449 /*********************************************************************
2450 * POLICY INTERFACE *
2451 *********************************************************************/
2454 * cpufreq_get_policy - get the current cpufreq_policy
2455 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2457 * @cpu: CPU to find the policy for
2459 * Reads the current cpufreq policy.
2461 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2463 struct cpufreq_policy *cpu_policy;
2467 cpu_policy = cpufreq_cpu_get(cpu);
2471 memcpy(policy, cpu_policy, sizeof(*policy));
2473 cpufreq_cpu_put(cpu_policy);
2476 EXPORT_SYMBOL(cpufreq_get_policy);
2479 * cpufreq_set_policy - Modify cpufreq policy parameters.
2480 * @policy: Policy object to modify.
2481 * @new_gov: Policy governor pointer.
2482 * @new_pol: Policy value (for drivers with built-in governors).
2484 * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2485 * limits to be set for the policy, update @policy with the verified limits
2486 * values and either invoke the driver's ->setpolicy() callback (if present) or
2487 * carry out a governor update for @policy. That is, run the current governor's
2488 * ->limits() callback (if @new_gov points to the same object as the one in
2489 * @policy) or replace the governor for @policy with @new_gov.
2491 * The cpuinfo part of @policy is not updated by this function.
2493 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2494 struct cpufreq_governor *new_gov,
2495 unsigned int new_pol)
2497 struct cpufreq_policy_data new_data;
2498 struct cpufreq_governor *old_gov;
2501 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2502 new_data.freq_table = policy->freq_table;
2503 new_data.cpu = policy->cpu;
2505 * PM QoS framework collects all the requests from users and provide us
2506 * the final aggregated value here.
2508 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2509 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2511 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2512 new_data.cpu, new_data.min, new_data.max);
2515 * Verify that the CPU speed can be set within these limits and make sure
2518 ret = cpufreq_driver->verify(&new_data);
2522 policy->min = new_data.min;
2523 policy->max = new_data.max;
2524 trace_cpu_frequency_limits(policy);
2526 policy->cached_target_freq = UINT_MAX;
2528 pr_debug("new min and max freqs are %u - %u kHz\n",
2529 policy->min, policy->max);
2531 if (cpufreq_driver->setpolicy) {
2532 policy->policy = new_pol;
2533 pr_debug("setting range\n");
2534 return cpufreq_driver->setpolicy(policy);
2537 if (new_gov == policy->governor) {
2538 pr_debug("governor limits update\n");
2539 cpufreq_governor_limits(policy);
2543 pr_debug("governor switch\n");
2545 /* save old, working values */
2546 old_gov = policy->governor;
2547 /* end old governor */
2549 cpufreq_stop_governor(policy);
2550 cpufreq_exit_governor(policy);
2553 /* start new governor */
2554 policy->governor = new_gov;
2555 ret = cpufreq_init_governor(policy);
2557 ret = cpufreq_start_governor(policy);
2559 pr_debug("governor change\n");
2560 sched_cpufreq_governor_change(policy, old_gov);
2563 cpufreq_exit_governor(policy);
2566 /* new governor failed, so re-start old one */
2567 pr_debug("starting governor %s failed\n", policy->governor->name);
2569 policy->governor = old_gov;
2570 if (cpufreq_init_governor(policy))
2571 policy->governor = NULL;
2573 cpufreq_start_governor(policy);
2580 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2581 * @cpu: CPU to re-evaluate the policy for.
2583 * Update the current frequency for the cpufreq policy of @cpu and use
2584 * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2585 * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2586 * for the policy in question, among other things.
2588 void cpufreq_update_policy(unsigned int cpu)
2590 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2596 * BIOS might change freq behind our back
2597 * -> ask driver for current freq and notify governors about a change
2599 if (cpufreq_driver->get && has_target() &&
2600 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2603 refresh_frequency_limits(policy);
2606 cpufreq_cpu_release(policy);
2608 EXPORT_SYMBOL(cpufreq_update_policy);
2611 * cpufreq_update_limits - Update policy limits for a given CPU.
2612 * @cpu: CPU to update the policy limits for.
2614 * Invoke the driver's ->update_limits callback if present or call
2615 * cpufreq_update_policy() for @cpu.
2617 void cpufreq_update_limits(unsigned int cpu)
2619 if (cpufreq_driver->update_limits)
2620 cpufreq_driver->update_limits(cpu);
2622 cpufreq_update_policy(cpu);
2624 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2626 /*********************************************************************
2628 *********************************************************************/
2629 static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2633 if (!policy->freq_table)
2636 ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2638 pr_err("%s: Policy frequency update failed\n", __func__);
2642 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2649 int cpufreq_boost_trigger_state(int state)
2651 struct cpufreq_policy *policy;
2652 unsigned long flags;
2655 if (cpufreq_driver->boost_enabled == state)
2658 write_lock_irqsave(&cpufreq_driver_lock, flags);
2659 cpufreq_driver->boost_enabled = state;
2660 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2663 for_each_active_policy(policy) {
2664 ret = cpufreq_driver->set_boost(policy, state);
2666 goto err_reset_state;
2675 write_lock_irqsave(&cpufreq_driver_lock, flags);
2676 cpufreq_driver->boost_enabled = !state;
2677 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2679 pr_err("%s: Cannot %s BOOST\n",
2680 __func__, state ? "enable" : "disable");
2685 static bool cpufreq_boost_supported(void)
2687 return cpufreq_driver->set_boost;
2690 static int create_boost_sysfs_file(void)
2694 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2696 pr_err("%s: cannot register global BOOST sysfs file\n",
2702 static void remove_boost_sysfs_file(void)
2704 if (cpufreq_boost_supported())
2705 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2708 int cpufreq_enable_boost_support(void)
2710 if (!cpufreq_driver)
2713 if (cpufreq_boost_supported())
2716 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2718 /* This will get removed on driver unregister */
2719 return create_boost_sysfs_file();
2721 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2723 int cpufreq_boost_enabled(void)
2725 return cpufreq_driver->boost_enabled;
2727 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2729 /*********************************************************************
2730 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2731 *********************************************************************/
2732 static enum cpuhp_state hp_online;
2734 static int cpuhp_cpufreq_online(unsigned int cpu)
2736 cpufreq_online(cpu);
2741 static int cpuhp_cpufreq_offline(unsigned int cpu)
2743 cpufreq_offline(cpu);
2749 * cpufreq_register_driver - register a CPU Frequency driver
2750 * @driver_data: A struct cpufreq_driver containing the values#
2751 * submitted by the CPU Frequency driver.
2753 * Registers a CPU Frequency driver to this core code. This code
2754 * returns zero on success, -EEXIST when another driver got here first
2755 * (and isn't unregistered in the meantime).
2758 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2760 unsigned long flags;
2763 if (cpufreq_disabled())
2767 * The cpufreq core depends heavily on the availability of device
2768 * structure, make sure they are available before proceeding further.
2770 if (!get_cpu_device(0))
2771 return -EPROBE_DEFER;
2773 if (!driver_data || !driver_data->verify || !driver_data->init ||
2774 !(driver_data->setpolicy || driver_data->target_index ||
2775 driver_data->target) ||
2776 (driver_data->setpolicy && (driver_data->target_index ||
2777 driver_data->target)) ||
2778 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2779 (!driver_data->online != !driver_data->offline))
2782 pr_debug("trying to register driver %s\n", driver_data->name);
2784 /* Protect against concurrent CPU online/offline. */
2787 write_lock_irqsave(&cpufreq_driver_lock, flags);
2788 if (cpufreq_driver) {
2789 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2793 cpufreq_driver = driver_data;
2794 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2797 * Mark support for the scheduler's frequency invariance engine for
2798 * drivers that implement target(), target_index() or fast_switch().
2800 if (!cpufreq_driver->setpolicy) {
2801 static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2802 pr_debug("supports frequency invariance");
2805 if (driver_data->setpolicy)
2806 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2808 if (cpufreq_boost_supported()) {
2809 ret = create_boost_sysfs_file();
2811 goto err_null_driver;
2814 ret = subsys_interface_register(&cpufreq_interface);
2816 goto err_boost_unreg;
2818 if (unlikely(list_empty(&cpufreq_policy_list))) {
2819 /* if all ->init() calls failed, unregister */
2821 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2826 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2828 cpuhp_cpufreq_online,
2829 cpuhp_cpufreq_offline);
2835 pr_debug("driver %s up and running\n", driver_data->name);
2839 subsys_interface_unregister(&cpufreq_interface);
2841 remove_boost_sysfs_file();
2843 write_lock_irqsave(&cpufreq_driver_lock, flags);
2844 cpufreq_driver = NULL;
2845 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2850 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2853 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2855 * Unregister the current CPUFreq driver. Only call this if you have
2856 * the right to do so, i.e. if you have succeeded in initialising before!
2857 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2858 * currently not initialised.
2860 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2862 unsigned long flags;
2864 if (!cpufreq_driver || (driver != cpufreq_driver))
2867 pr_debug("unregistering driver %s\n", driver->name);
2869 /* Protect against concurrent cpu hotplug */
2871 subsys_interface_unregister(&cpufreq_interface);
2872 remove_boost_sysfs_file();
2873 static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2874 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2876 write_lock_irqsave(&cpufreq_driver_lock, flags);
2878 cpufreq_driver = NULL;
2880 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2885 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2887 static int __init cpufreq_core_init(void)
2889 struct cpufreq_governor *gov = cpufreq_default_governor();
2891 if (cpufreq_disabled())
2894 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2895 BUG_ON(!cpufreq_global_kobject);
2897 if (!strlen(default_governor))
2898 strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
2902 module_param(off, int, 0444);
2903 module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
2904 core_initcall(cpufreq_core_init);