1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/cpufreq/cpufreq.c
5 * Copyright (C) 2001 Russell King
6 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
9 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
10 * Added handling for CPU hotplug
11 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
12 * Fix handling for CPU hotplug -- affected CPUs
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pm_qos.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
33 static LIST_HEAD(cpufreq_policy_list);
35 /* Macros to iterate over CPU policies */
36 #define for_each_suitable_policy(__policy, __active) \
37 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
38 if ((__active) == !policy_is_inactive(__policy))
40 #define for_each_active_policy(__policy) \
41 for_each_suitable_policy(__policy, true)
42 #define for_each_inactive_policy(__policy) \
43 for_each_suitable_policy(__policy, false)
45 #define for_each_policy(__policy) \
46 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
48 /* Iterate over governors */
49 static LIST_HEAD(cpufreq_governor_list);
50 #define for_each_governor(__governor) \
51 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
53 static char default_governor[CPUFREQ_NAME_LEN];
56 * The "cpufreq driver" - the arch- or hardware-dependent low
57 * level driver of CPUFreq support, and its spinlock. This lock
58 * also protects the cpufreq_cpu_data array.
60 static struct cpufreq_driver *cpufreq_driver;
61 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
62 static DEFINE_RWLOCK(cpufreq_driver_lock);
64 static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
65 bool cpufreq_supports_freq_invariance(void)
67 return static_branch_likely(&cpufreq_freq_invariance);
70 /* Flag to suspend/resume CPUFreq governors */
71 static bool cpufreq_suspended;
73 static inline bool has_target(void)
75 return cpufreq_driver->target_index || cpufreq_driver->target;
78 /* internal prototypes */
79 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
80 static int cpufreq_init_governor(struct cpufreq_policy *policy);
81 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
82 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
83 static int cpufreq_set_policy(struct cpufreq_policy *policy,
84 struct cpufreq_governor *new_gov,
85 unsigned int new_pol);
88 * Two notifier lists: the "policy" list is involved in the
89 * validation process for a new CPU frequency policy; the
90 * "transition" list for kernel code that needs to handle
91 * changes to devices when the CPU clock speed changes.
92 * The mutex locks both lists.
94 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
95 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
97 static int off __read_mostly;
98 static int cpufreq_disabled(void)
102 void disable_cpufreq(void)
106 static DEFINE_MUTEX(cpufreq_governor_mutex);
108 bool have_governor_per_policy(void)
110 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
112 EXPORT_SYMBOL_GPL(have_governor_per_policy);
114 static struct kobject *cpufreq_global_kobject;
116 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
118 if (have_governor_per_policy())
119 return &policy->kobj;
121 return cpufreq_global_kobject;
123 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
125 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
127 struct kernel_cpustat kcpustat;
132 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
134 kcpustat_cpu_fetch(&kcpustat, cpu);
136 busy_time = kcpustat.cpustat[CPUTIME_USER];
137 busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
138 busy_time += kcpustat.cpustat[CPUTIME_IRQ];
139 busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
140 busy_time += kcpustat.cpustat[CPUTIME_STEAL];
141 busy_time += kcpustat.cpustat[CPUTIME_NICE];
143 idle_time = cur_wall_time - busy_time;
145 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
147 return div_u64(idle_time, NSEC_PER_USEC);
150 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
152 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
154 if (idle_time == -1ULL)
155 return get_cpu_idle_time_jiffy(cpu, wall);
157 idle_time += get_cpu_iowait_time_us(cpu, wall);
161 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
164 * This is a generic cpufreq init() routine which can be used by cpufreq
165 * drivers of SMP systems. It will do following:
166 * - validate & show freq table passed
167 * - set policies transition latency
168 * - policy->cpus with all possible CPUs
170 void cpufreq_generic_init(struct cpufreq_policy *policy,
171 struct cpufreq_frequency_table *table,
172 unsigned int transition_latency)
174 policy->freq_table = table;
175 policy->cpuinfo.transition_latency = transition_latency;
178 * The driver only supports the SMP configuration where all processors
179 * share the clock and voltage and clock.
181 cpumask_setall(policy->cpus);
183 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
185 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
187 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
189 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
191 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
193 unsigned int cpufreq_generic_get(unsigned int cpu)
195 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
197 if (!policy || IS_ERR(policy->clk)) {
198 pr_err("%s: No %s associated to cpu: %d\n",
199 __func__, policy ? "clk" : "policy", cpu);
203 return clk_get_rate(policy->clk) / 1000;
205 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
208 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
209 * @cpu: CPU to find the policy for.
211 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
212 * the kobject reference counter of that policy. Return a valid policy on
213 * success or NULL on failure.
215 * The policy returned by this function has to be released with the help of
216 * cpufreq_cpu_put() to balance its kobject reference counter properly.
218 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
220 struct cpufreq_policy *policy = NULL;
223 if (WARN_ON(cpu >= nr_cpu_ids))
226 /* get the cpufreq driver */
227 read_lock_irqsave(&cpufreq_driver_lock, flags);
229 if (cpufreq_driver) {
231 policy = cpufreq_cpu_get_raw(cpu);
233 kobject_get(&policy->kobj);
236 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
240 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
243 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
244 * @policy: cpufreq policy returned by cpufreq_cpu_get().
246 void cpufreq_cpu_put(struct cpufreq_policy *policy)
248 kobject_put(&policy->kobj);
250 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
253 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
254 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
256 void cpufreq_cpu_release(struct cpufreq_policy *policy)
258 if (WARN_ON(!policy))
261 lockdep_assert_held(&policy->rwsem);
263 up_write(&policy->rwsem);
265 cpufreq_cpu_put(policy);
269 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
270 * @cpu: CPU to find the policy for.
272 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
273 * if the policy returned by it is not NULL, acquire its rwsem for writing.
274 * Return the policy if it is active or release it and return NULL otherwise.
276 * The policy returned by this function has to be released with the help of
277 * cpufreq_cpu_release() in order to release its rwsem and balance its usage
280 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
282 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
287 down_write(&policy->rwsem);
289 if (policy_is_inactive(policy)) {
290 cpufreq_cpu_release(policy);
297 /*********************************************************************
298 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
299 *********************************************************************/
302 * adjust_jiffies - adjust the system "loops_per_jiffy"
304 * This function alters the system "loops_per_jiffy" for the clock
305 * speed change. Note that loops_per_jiffy cannot be updated on SMP
306 * systems as each CPU might be scaled differently. So, use the arch
307 * per-CPU loops_per_jiffy value wherever possible.
309 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
312 static unsigned long l_p_j_ref;
313 static unsigned int l_p_j_ref_freq;
315 if (ci->flags & CPUFREQ_CONST_LOOPS)
318 if (!l_p_j_ref_freq) {
319 l_p_j_ref = loops_per_jiffy;
320 l_p_j_ref_freq = ci->old;
321 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
322 l_p_j_ref, l_p_j_ref_freq);
324 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
325 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
327 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
328 loops_per_jiffy, ci->new);
334 * cpufreq_notify_transition - Notify frequency transition and adjust_jiffies.
335 * @policy: cpufreq policy to enable fast frequency switching for.
336 * @freqs: contain details of the frequency update.
337 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
339 * This function calls the transition notifiers and the "adjust_jiffies"
340 * function. It is called twice on all CPU frequency changes that have
343 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
344 struct cpufreq_freqs *freqs,
349 BUG_ON(irqs_disabled());
351 if (cpufreq_disabled())
354 freqs->policy = policy;
355 freqs->flags = cpufreq_driver->flags;
356 pr_debug("notification %u of frequency transition to %u kHz\n",
360 case CPUFREQ_PRECHANGE:
362 * Detect if the driver reported a value as "old frequency"
363 * which is not equal to what the cpufreq core thinks is
366 if (policy->cur && policy->cur != freqs->old) {
367 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
368 freqs->old, policy->cur);
369 freqs->old = policy->cur;
372 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
373 CPUFREQ_PRECHANGE, freqs);
375 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
378 case CPUFREQ_POSTCHANGE:
379 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
380 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
381 cpumask_pr_args(policy->cpus));
383 for_each_cpu(cpu, policy->cpus)
384 trace_cpu_frequency(freqs->new, cpu);
386 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
387 CPUFREQ_POSTCHANGE, freqs);
389 cpufreq_stats_record_transition(policy, freqs->new);
390 policy->cur = freqs->new;
394 /* Do post notifications when there are chances that transition has failed */
395 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
396 struct cpufreq_freqs *freqs, int transition_failed)
398 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
399 if (!transition_failed)
402 swap(freqs->old, freqs->new);
403 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
404 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
407 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
408 struct cpufreq_freqs *freqs)
412 * Catch double invocations of _begin() which lead to self-deadlock.
413 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
414 * doesn't invoke _begin() on their behalf, and hence the chances of
415 * double invocations are very low. Moreover, there are scenarios
416 * where these checks can emit false-positive warnings in these
417 * drivers; so we avoid that by skipping them altogether.
419 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
420 && current == policy->transition_task);
423 wait_event(policy->transition_wait, !policy->transition_ongoing);
425 spin_lock(&policy->transition_lock);
427 if (unlikely(policy->transition_ongoing)) {
428 spin_unlock(&policy->transition_lock);
432 policy->transition_ongoing = true;
433 policy->transition_task = current;
435 spin_unlock(&policy->transition_lock);
437 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
439 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
441 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
442 struct cpufreq_freqs *freqs, int transition_failed)
444 if (WARN_ON(!policy->transition_ongoing))
447 cpufreq_notify_post_transition(policy, freqs, transition_failed);
449 arch_set_freq_scale(policy->related_cpus,
451 policy->cpuinfo.max_freq);
453 policy->transition_ongoing = false;
454 policy->transition_task = NULL;
456 wake_up(&policy->transition_wait);
458 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
461 * Fast frequency switching status count. Positive means "enabled", negative
462 * means "disabled" and 0 means "not decided yet".
464 static int cpufreq_fast_switch_count;
465 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
467 static void cpufreq_list_transition_notifiers(void)
469 struct notifier_block *nb;
471 pr_info("Registered transition notifiers:\n");
473 mutex_lock(&cpufreq_transition_notifier_list.mutex);
475 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
476 pr_info("%pS\n", nb->notifier_call);
478 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
482 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
483 * @policy: cpufreq policy to enable fast frequency switching for.
485 * Try to enable fast frequency switching for @policy.
487 * The attempt will fail if there is at least one transition notifier registered
488 * at this point, as fast frequency switching is quite fundamentally at odds
489 * with transition notifiers. Thus if successful, it will make registration of
490 * transition notifiers fail going forward.
492 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
494 lockdep_assert_held(&policy->rwsem);
496 if (!policy->fast_switch_possible)
499 mutex_lock(&cpufreq_fast_switch_lock);
500 if (cpufreq_fast_switch_count >= 0) {
501 cpufreq_fast_switch_count++;
502 policy->fast_switch_enabled = true;
504 pr_warn("CPU%u: Fast frequency switching not enabled\n",
506 cpufreq_list_transition_notifiers();
508 mutex_unlock(&cpufreq_fast_switch_lock);
510 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
513 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
514 * @policy: cpufreq policy to disable fast frequency switching for.
516 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
518 mutex_lock(&cpufreq_fast_switch_lock);
519 if (policy->fast_switch_enabled) {
520 policy->fast_switch_enabled = false;
521 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
522 cpufreq_fast_switch_count--;
524 mutex_unlock(&cpufreq_fast_switch_lock);
526 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
529 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
531 * @policy: associated policy to interrogate
532 * @target_freq: target frequency to resolve.
534 * The target to driver frequency mapping is cached in the policy.
536 * Return: Lowest driver-supported frequency greater than or equal to the
537 * given target_freq, subject to policy (min/max) and driver limitations.
539 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
540 unsigned int target_freq)
542 target_freq = clamp_val(target_freq, policy->min, policy->max);
543 policy->cached_target_freq = target_freq;
545 if (cpufreq_driver->target_index) {
548 idx = cpufreq_frequency_table_target(policy, target_freq,
550 policy->cached_resolved_idx = idx;
551 return policy->freq_table[idx].frequency;
554 if (cpufreq_driver->resolve_freq)
555 return cpufreq_driver->resolve_freq(policy, target_freq);
559 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
561 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
563 unsigned int latency;
565 if (policy->transition_delay_us)
566 return policy->transition_delay_us;
568 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
571 * For platforms that can change the frequency very fast (< 10
572 * us), the above formula gives a decent transition delay. But
573 * for platforms where transition_latency is in milliseconds, it
574 * ends up giving unrealistic values.
576 * Cap the default transition delay to 10 ms, which seems to be
577 * a reasonable amount of time after which we should reevaluate
580 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
583 return LATENCY_MULTIPLIER;
585 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
587 /*********************************************************************
589 *********************************************************************/
590 static ssize_t show_boost(struct kobject *kobj,
591 struct kobj_attribute *attr, char *buf)
593 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
596 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
597 const char *buf, size_t count)
601 ret = sscanf(buf, "%d", &enable);
602 if (ret != 1 || enable < 0 || enable > 1)
605 if (cpufreq_boost_trigger_state(enable)) {
606 pr_err("%s: Cannot %s BOOST!\n",
607 __func__, enable ? "enable" : "disable");
611 pr_debug("%s: cpufreq BOOST %s\n",
612 __func__, enable ? "enabled" : "disabled");
616 define_one_global_rw(boost);
618 static struct cpufreq_governor *find_governor(const char *str_governor)
620 struct cpufreq_governor *t;
623 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
629 static struct cpufreq_governor *get_governor(const char *str_governor)
631 struct cpufreq_governor *t;
633 mutex_lock(&cpufreq_governor_mutex);
634 t = find_governor(str_governor);
638 if (!try_module_get(t->owner))
642 mutex_unlock(&cpufreq_governor_mutex);
647 static unsigned int cpufreq_parse_policy(char *str_governor)
649 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
650 return CPUFREQ_POLICY_PERFORMANCE;
652 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
653 return CPUFREQ_POLICY_POWERSAVE;
655 return CPUFREQ_POLICY_UNKNOWN;
659 * cpufreq_parse_governor - parse a governor string only for has_target()
660 * @str_governor: Governor name.
662 static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
664 struct cpufreq_governor *t;
666 t = get_governor(str_governor);
670 if (request_module("cpufreq_%s", str_governor))
673 return get_governor(str_governor);
677 * cpufreq_per_cpu_attr_read() / show_##file_name() -
678 * print out cpufreq information
680 * Write out information from cpufreq_driver->policy[cpu]; object must be
684 #define show_one(file_name, object) \
685 static ssize_t show_##file_name \
686 (struct cpufreq_policy *policy, char *buf) \
688 return sprintf(buf, "%u\n", policy->object); \
691 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
692 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
693 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
694 show_one(scaling_min_freq, min);
695 show_one(scaling_max_freq, max);
697 __weak unsigned int arch_freq_get_on_cpu(int cpu)
702 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
707 freq = arch_freq_get_on_cpu(policy->cpu);
709 ret = sprintf(buf, "%u\n", freq);
710 else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
711 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
713 ret = sprintf(buf, "%u\n", policy->cur);
718 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
720 #define store_one(file_name, object) \
721 static ssize_t store_##file_name \
722 (struct cpufreq_policy *policy, const char *buf, size_t count) \
727 ret = sscanf(buf, "%lu", &val); \
731 ret = freq_qos_update_request(policy->object##_freq_req, val);\
732 return ret >= 0 ? count : ret; \
735 store_one(scaling_min_freq, min);
736 store_one(scaling_max_freq, max);
739 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
741 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
744 unsigned int cur_freq = __cpufreq_get(policy);
747 return sprintf(buf, "%u\n", cur_freq);
749 return sprintf(buf, "<unknown>\n");
753 * show_scaling_governor - show the current policy for the specified CPU
755 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
757 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
758 return sprintf(buf, "powersave\n");
759 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
760 return sprintf(buf, "performance\n");
761 else if (policy->governor)
762 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
763 policy->governor->name);
768 * store_scaling_governor - store policy for the specified CPU
770 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
771 const char *buf, size_t count)
773 char str_governor[16];
776 ret = sscanf(buf, "%15s", str_governor);
780 if (cpufreq_driver->setpolicy) {
781 unsigned int new_pol;
783 new_pol = cpufreq_parse_policy(str_governor);
787 ret = cpufreq_set_policy(policy, NULL, new_pol);
789 struct cpufreq_governor *new_gov;
791 new_gov = cpufreq_parse_governor(str_governor);
795 ret = cpufreq_set_policy(policy, new_gov,
796 CPUFREQ_POLICY_UNKNOWN);
798 module_put(new_gov->owner);
801 return ret ? ret : count;
805 * show_scaling_driver - show the cpufreq driver currently loaded
807 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
809 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
813 * show_scaling_available_governors - show the available CPUfreq governors
815 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
819 struct cpufreq_governor *t;
822 i += sprintf(buf, "performance powersave");
826 mutex_lock(&cpufreq_governor_mutex);
827 for_each_governor(t) {
828 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
829 - (CPUFREQ_NAME_LEN + 2)))
831 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
833 mutex_unlock(&cpufreq_governor_mutex);
835 i += sprintf(&buf[i], "\n");
839 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
844 for_each_cpu(cpu, mask) {
846 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
847 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
848 if (i >= (PAGE_SIZE - 5))
851 i += sprintf(&buf[i], "\n");
854 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
857 * show_related_cpus - show the CPUs affected by each transition even if
858 * hw coordination is in use
860 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
862 return cpufreq_show_cpus(policy->related_cpus, buf);
866 * show_affected_cpus - show the CPUs affected by each transition
868 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
870 return cpufreq_show_cpus(policy->cpus, buf);
873 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
874 const char *buf, size_t count)
876 unsigned int freq = 0;
879 if (!policy->governor || !policy->governor->store_setspeed)
882 ret = sscanf(buf, "%u", &freq);
886 policy->governor->store_setspeed(policy, freq);
891 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
893 if (!policy->governor || !policy->governor->show_setspeed)
894 return sprintf(buf, "<unsupported>\n");
896 return policy->governor->show_setspeed(policy, buf);
900 * show_bios_limit - show the current cpufreq HW/BIOS limitation
902 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
906 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
908 return sprintf(buf, "%u\n", limit);
909 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
912 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
913 cpufreq_freq_attr_ro(cpuinfo_min_freq);
914 cpufreq_freq_attr_ro(cpuinfo_max_freq);
915 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
916 cpufreq_freq_attr_ro(scaling_available_governors);
917 cpufreq_freq_attr_ro(scaling_driver);
918 cpufreq_freq_attr_ro(scaling_cur_freq);
919 cpufreq_freq_attr_ro(bios_limit);
920 cpufreq_freq_attr_ro(related_cpus);
921 cpufreq_freq_attr_ro(affected_cpus);
922 cpufreq_freq_attr_rw(scaling_min_freq);
923 cpufreq_freq_attr_rw(scaling_max_freq);
924 cpufreq_freq_attr_rw(scaling_governor);
925 cpufreq_freq_attr_rw(scaling_setspeed);
927 static struct attribute *default_attrs[] = {
928 &cpuinfo_min_freq.attr,
929 &cpuinfo_max_freq.attr,
930 &cpuinfo_transition_latency.attr,
931 &scaling_min_freq.attr,
932 &scaling_max_freq.attr,
935 &scaling_governor.attr,
936 &scaling_driver.attr,
937 &scaling_available_governors.attr,
938 &scaling_setspeed.attr,
942 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
943 #define to_attr(a) container_of(a, struct freq_attr, attr)
945 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
947 struct cpufreq_policy *policy = to_policy(kobj);
948 struct freq_attr *fattr = to_attr(attr);
954 down_read(&policy->rwsem);
955 ret = fattr->show(policy, buf);
956 up_read(&policy->rwsem);
961 static ssize_t store(struct kobject *kobj, struct attribute *attr,
962 const char *buf, size_t count)
964 struct cpufreq_policy *policy = to_policy(kobj);
965 struct freq_attr *fattr = to_attr(attr);
966 ssize_t ret = -EINVAL;
972 * cpus_read_trylock() is used here to work around a circular lock
973 * dependency problem with respect to the cpufreq_register_driver().
975 if (!cpus_read_trylock())
978 if (cpu_online(policy->cpu)) {
979 down_write(&policy->rwsem);
980 ret = fattr->store(policy, buf, count);
981 up_write(&policy->rwsem);
989 static void cpufreq_sysfs_release(struct kobject *kobj)
991 struct cpufreq_policy *policy = to_policy(kobj);
992 pr_debug("last reference is dropped\n");
993 complete(&policy->kobj_unregister);
996 static const struct sysfs_ops sysfs_ops = {
1001 static struct kobj_type ktype_cpufreq = {
1002 .sysfs_ops = &sysfs_ops,
1003 .default_attrs = default_attrs,
1004 .release = cpufreq_sysfs_release,
1007 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
1009 struct device *dev = get_cpu_device(cpu);
1014 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1017 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1018 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1019 dev_err(dev, "cpufreq symlink creation failed\n");
1022 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1025 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1026 sysfs_remove_link(&dev->kobj, "cpufreq");
1029 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1031 struct freq_attr **drv_attr;
1034 /* set up files for this cpu device */
1035 drv_attr = cpufreq_driver->attr;
1036 while (drv_attr && *drv_attr) {
1037 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1042 if (cpufreq_driver->get) {
1043 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1048 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1052 if (cpufreq_driver->bios_limit) {
1053 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1061 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1063 struct cpufreq_governor *gov = NULL;
1064 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1068 /* Update policy governor to the one used before hotplug. */
1069 gov = get_governor(policy->last_governor);
1071 pr_debug("Restoring governor %s for cpu %d\n",
1072 gov->name, policy->cpu);
1074 gov = get_governor(default_governor);
1078 gov = cpufreq_default_governor();
1079 __module_get(gov->owner);
1084 /* Use the default policy if there is no last_policy. */
1085 if (policy->last_policy) {
1086 pol = policy->last_policy;
1088 pol = cpufreq_parse_policy(default_governor);
1090 * In case the default governor is neither "performance"
1091 * nor "powersave", fall back to the initial policy
1092 * value set by the driver.
1094 if (pol == CPUFREQ_POLICY_UNKNOWN)
1095 pol = policy->policy;
1097 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1098 pol != CPUFREQ_POLICY_POWERSAVE)
1102 ret = cpufreq_set_policy(policy, gov, pol);
1104 module_put(gov->owner);
1109 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1113 /* Has this CPU been taken care of already? */
1114 if (cpumask_test_cpu(cpu, policy->cpus))
1117 down_write(&policy->rwsem);
1119 cpufreq_stop_governor(policy);
1121 cpumask_set_cpu(cpu, policy->cpus);
1124 ret = cpufreq_start_governor(policy);
1126 pr_err("%s: Failed to start governor\n", __func__);
1128 up_write(&policy->rwsem);
1132 void refresh_frequency_limits(struct cpufreq_policy *policy)
1134 if (!policy_is_inactive(policy)) {
1135 pr_debug("updating policy for CPU %u\n", policy->cpu);
1137 cpufreq_set_policy(policy, policy->governor, policy->policy);
1140 EXPORT_SYMBOL(refresh_frequency_limits);
1142 static void handle_update(struct work_struct *work)
1144 struct cpufreq_policy *policy =
1145 container_of(work, struct cpufreq_policy, update);
1147 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1148 down_write(&policy->rwsem);
1149 refresh_frequency_limits(policy);
1150 up_write(&policy->rwsem);
1153 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1156 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1158 schedule_work(&policy->update);
1162 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1165 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1167 schedule_work(&policy->update);
1171 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1173 struct kobject *kobj;
1174 struct completion *cmp;
1176 down_write(&policy->rwsem);
1177 cpufreq_stats_free_table(policy);
1178 kobj = &policy->kobj;
1179 cmp = &policy->kobj_unregister;
1180 up_write(&policy->rwsem);
1184 * We need to make sure that the underlying kobj is
1185 * actually not referenced anymore by anybody before we
1186 * proceed with unloading.
1188 pr_debug("waiting for dropping of refcount\n");
1189 wait_for_completion(cmp);
1190 pr_debug("wait complete\n");
1193 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1195 struct cpufreq_policy *policy;
1196 struct device *dev = get_cpu_device(cpu);
1202 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1206 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1207 goto err_free_policy;
1209 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1210 goto err_free_cpumask;
1212 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1213 goto err_free_rcpumask;
1215 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1216 cpufreq_global_kobject, "policy%u", cpu);
1218 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1220 * The entire policy object will be freed below, but the extra
1221 * memory allocated for the kobject name needs to be freed by
1222 * releasing the kobject.
1224 kobject_put(&policy->kobj);
1225 goto err_free_real_cpus;
1228 freq_constraints_init(&policy->constraints);
1230 policy->nb_min.notifier_call = cpufreq_notifier_min;
1231 policy->nb_max.notifier_call = cpufreq_notifier_max;
1233 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1236 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1237 ret, cpumask_pr_args(policy->cpus));
1238 goto err_kobj_remove;
1241 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1244 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1245 ret, cpumask_pr_args(policy->cpus));
1246 goto err_min_qos_notifier;
1249 INIT_LIST_HEAD(&policy->policy_list);
1250 init_rwsem(&policy->rwsem);
1251 spin_lock_init(&policy->transition_lock);
1252 init_waitqueue_head(&policy->transition_wait);
1253 init_completion(&policy->kobj_unregister);
1254 INIT_WORK(&policy->update, handle_update);
1259 err_min_qos_notifier:
1260 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1263 cpufreq_policy_put_kobj(policy);
1265 free_cpumask_var(policy->real_cpus);
1267 free_cpumask_var(policy->related_cpus);
1269 free_cpumask_var(policy->cpus);
1276 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1278 unsigned long flags;
1281 /* Remove policy from list */
1282 write_lock_irqsave(&cpufreq_driver_lock, flags);
1283 list_del(&policy->policy_list);
1285 for_each_cpu(cpu, policy->related_cpus)
1286 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1287 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1289 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1291 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1294 /* Cancel any pending policy->update work before freeing the policy. */
1295 cancel_work_sync(&policy->update);
1297 if (policy->max_freq_req) {
1299 * CPUFREQ_CREATE_POLICY notification is sent only after
1300 * successfully adding max_freq_req request.
1302 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1303 CPUFREQ_REMOVE_POLICY, policy);
1304 freq_qos_remove_request(policy->max_freq_req);
1307 freq_qos_remove_request(policy->min_freq_req);
1308 kfree(policy->min_freq_req);
1310 cpufreq_policy_put_kobj(policy);
1311 free_cpumask_var(policy->real_cpus);
1312 free_cpumask_var(policy->related_cpus);
1313 free_cpumask_var(policy->cpus);
1317 static int cpufreq_online(unsigned int cpu)
1319 struct cpufreq_policy *policy;
1321 unsigned long flags;
1325 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1327 /* Check if this CPU already has a policy to manage it */
1328 policy = per_cpu(cpufreq_cpu_data, cpu);
1330 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1331 if (!policy_is_inactive(policy))
1332 return cpufreq_add_policy_cpu(policy, cpu);
1334 /* This is the only online CPU for the policy. Start over. */
1336 down_write(&policy->rwsem);
1338 policy->governor = NULL;
1339 up_write(&policy->rwsem);
1342 policy = cpufreq_policy_alloc(cpu);
1347 if (!new_policy && cpufreq_driver->online) {
1348 ret = cpufreq_driver->online(policy);
1350 pr_debug("%s: %d: initialization failed\n", __func__,
1352 goto out_exit_policy;
1355 /* Recover policy->cpus using related_cpus */
1356 cpumask_copy(policy->cpus, policy->related_cpus);
1358 cpumask_copy(policy->cpus, cpumask_of(cpu));
1361 * Call driver. From then on the cpufreq must be able
1362 * to accept all calls to ->verify and ->setpolicy for this CPU.
1364 ret = cpufreq_driver->init(policy);
1366 pr_debug("%s: %d: initialization failed\n", __func__,
1368 goto out_free_policy;
1371 ret = cpufreq_table_validate_and_sort(policy);
1373 goto out_exit_policy;
1375 /* related_cpus should at least include policy->cpus. */
1376 cpumask_copy(policy->related_cpus, policy->cpus);
1379 down_write(&policy->rwsem);
1381 * affected cpus must always be the one, which are online. We aren't
1382 * managing offline cpus here.
1384 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1387 for_each_cpu(j, policy->related_cpus) {
1388 per_cpu(cpufreq_cpu_data, j) = policy;
1389 add_cpu_dev_symlink(policy, j);
1392 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1394 if (!policy->min_freq_req)
1395 goto out_destroy_policy;
1397 ret = freq_qos_add_request(&policy->constraints,
1398 policy->min_freq_req, FREQ_QOS_MIN,
1402 * So we don't call freq_qos_remove_request() for an
1403 * uninitialized request.
1405 kfree(policy->min_freq_req);
1406 policy->min_freq_req = NULL;
1407 goto out_destroy_policy;
1411 * This must be initialized right here to avoid calling
1412 * freq_qos_remove_request() on uninitialized request in case
1415 policy->max_freq_req = policy->min_freq_req + 1;
1417 ret = freq_qos_add_request(&policy->constraints,
1418 policy->max_freq_req, FREQ_QOS_MAX,
1421 policy->max_freq_req = NULL;
1422 goto out_destroy_policy;
1425 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1426 CPUFREQ_CREATE_POLICY, policy);
1429 if (cpufreq_driver->get && has_target()) {
1430 policy->cur = cpufreq_driver->get(policy->cpu);
1432 pr_err("%s: ->get() failed\n", __func__);
1433 goto out_destroy_policy;
1438 * Sometimes boot loaders set CPU frequency to a value outside of
1439 * frequency table present with cpufreq core. In such cases CPU might be
1440 * unstable if it has to run on that frequency for long duration of time
1441 * and so its better to set it to a frequency which is specified in
1442 * freq-table. This also makes cpufreq stats inconsistent as
1443 * cpufreq-stats would fail to register because current frequency of CPU
1444 * isn't found in freq-table.
1446 * Because we don't want this change to effect boot process badly, we go
1447 * for the next freq which is >= policy->cur ('cur' must be set by now,
1448 * otherwise we will end up setting freq to lowest of the table as 'cur'
1449 * is initialized to zero).
1451 * We are passing target-freq as "policy->cur - 1" otherwise
1452 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1453 * equal to target-freq.
1455 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1457 /* Are we running at unknown frequency ? */
1458 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1459 if (ret == -EINVAL) {
1460 /* Warn user and fix it */
1461 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1462 __func__, policy->cpu, policy->cur);
1463 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1464 CPUFREQ_RELATION_L);
1467 * Reaching here after boot in a few seconds may not
1468 * mean that system will remain stable at "unknown"
1469 * frequency for longer duration. Hence, a BUG_ON().
1472 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1473 __func__, policy->cpu, policy->cur);
1478 ret = cpufreq_add_dev_interface(policy);
1480 goto out_destroy_policy;
1482 cpufreq_stats_create_table(policy);
1484 write_lock_irqsave(&cpufreq_driver_lock, flags);
1485 list_add(&policy->policy_list, &cpufreq_policy_list);
1486 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1489 ret = cpufreq_init_policy(policy);
1491 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1492 __func__, cpu, ret);
1493 goto out_destroy_policy;
1496 up_write(&policy->rwsem);
1498 kobject_uevent(&policy->kobj, KOBJ_ADD);
1500 /* Callback for handling stuff after policy is ready */
1501 if (cpufreq_driver->ready)
1502 cpufreq_driver->ready(policy);
1504 if (cpufreq_thermal_control_enabled(cpufreq_driver))
1505 policy->cdev = of_cpufreq_cooling_register(policy);
1507 pr_debug("initialization complete\n");
1512 for_each_cpu(j, policy->real_cpus)
1513 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1515 up_write(&policy->rwsem);
1518 if (cpufreq_driver->exit)
1519 cpufreq_driver->exit(policy);
1522 cpufreq_policy_free(policy);
1527 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1529 * @sif: Subsystem interface structure pointer (not used)
1531 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1533 struct cpufreq_policy *policy;
1534 unsigned cpu = dev->id;
1537 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1539 if (cpu_online(cpu)) {
1540 ret = cpufreq_online(cpu);
1545 /* Create sysfs link on CPU registration */
1546 policy = per_cpu(cpufreq_cpu_data, cpu);
1548 add_cpu_dev_symlink(policy, cpu);
1553 static int cpufreq_offline(unsigned int cpu)
1555 struct cpufreq_policy *policy;
1558 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1560 policy = cpufreq_cpu_get_raw(cpu);
1562 pr_debug("%s: No cpu_data found\n", __func__);
1566 down_write(&policy->rwsem);
1568 cpufreq_stop_governor(policy);
1570 cpumask_clear_cpu(cpu, policy->cpus);
1572 if (policy_is_inactive(policy)) {
1574 strncpy(policy->last_governor, policy->governor->name,
1577 policy->last_policy = policy->policy;
1578 } else if (cpu == policy->cpu) {
1579 /* Nominate new CPU */
1580 policy->cpu = cpumask_any(policy->cpus);
1583 /* Start governor again for active policy */
1584 if (!policy_is_inactive(policy)) {
1586 ret = cpufreq_start_governor(policy);
1588 pr_err("%s: Failed to start governor\n", __func__);
1594 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1595 cpufreq_cooling_unregister(policy->cdev);
1596 policy->cdev = NULL;
1599 if (cpufreq_driver->stop_cpu)
1600 cpufreq_driver->stop_cpu(policy);
1603 cpufreq_exit_governor(policy);
1606 * Perform the ->offline() during light-weight tear-down, as
1607 * that allows fast recovery when the CPU comes back.
1609 if (cpufreq_driver->offline) {
1610 cpufreq_driver->offline(policy);
1611 } else if (cpufreq_driver->exit) {
1612 cpufreq_driver->exit(policy);
1613 policy->freq_table = NULL;
1617 up_write(&policy->rwsem);
1622 * cpufreq_remove_dev - remove a CPU device
1624 * Removes the cpufreq interface for a CPU device.
1626 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1628 unsigned int cpu = dev->id;
1629 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1634 if (cpu_online(cpu))
1635 cpufreq_offline(cpu);
1637 cpumask_clear_cpu(cpu, policy->real_cpus);
1638 remove_cpu_dev_symlink(policy, dev);
1640 if (cpumask_empty(policy->real_cpus)) {
1641 /* We did light-weight exit earlier, do full tear down now */
1642 if (cpufreq_driver->offline)
1643 cpufreq_driver->exit(policy);
1645 cpufreq_policy_free(policy);
1650 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1652 * @policy: policy managing CPUs
1653 * @new_freq: CPU frequency the CPU actually runs at
1655 * We adjust to current frequency first, and need to clean up later.
1656 * So either call to cpufreq_update_policy() or schedule handle_update()).
1658 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1659 unsigned int new_freq)
1661 struct cpufreq_freqs freqs;
1663 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1664 policy->cur, new_freq);
1666 freqs.old = policy->cur;
1667 freqs.new = new_freq;
1669 cpufreq_freq_transition_begin(policy, &freqs);
1670 cpufreq_freq_transition_end(policy, &freqs, 0);
1673 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1675 unsigned int new_freq;
1677 new_freq = cpufreq_driver->get(policy->cpu);
1682 * If fast frequency switching is used with the given policy, the check
1683 * against policy->cur is pointless, so skip it in that case.
1685 if (policy->fast_switch_enabled || !has_target())
1688 if (policy->cur != new_freq) {
1689 cpufreq_out_of_sync(policy, new_freq);
1691 schedule_work(&policy->update);
1698 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1701 * This is the last known freq, without actually getting it from the driver.
1702 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1704 unsigned int cpufreq_quick_get(unsigned int cpu)
1706 struct cpufreq_policy *policy;
1707 unsigned int ret_freq = 0;
1708 unsigned long flags;
1710 read_lock_irqsave(&cpufreq_driver_lock, flags);
1712 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1713 ret_freq = cpufreq_driver->get(cpu);
1714 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1718 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1720 policy = cpufreq_cpu_get(cpu);
1722 ret_freq = policy->cur;
1723 cpufreq_cpu_put(policy);
1728 EXPORT_SYMBOL(cpufreq_quick_get);
1731 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1734 * Just return the max possible frequency for a given CPU.
1736 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1738 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1739 unsigned int ret_freq = 0;
1742 ret_freq = policy->max;
1743 cpufreq_cpu_put(policy);
1748 EXPORT_SYMBOL(cpufreq_quick_get_max);
1751 * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1754 * The default return value is the max_freq field of cpuinfo.
1756 __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1758 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1759 unsigned int ret_freq = 0;
1762 ret_freq = policy->cpuinfo.max_freq;
1763 cpufreq_cpu_put(policy);
1768 EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1770 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1772 if (unlikely(policy_is_inactive(policy)))
1775 return cpufreq_verify_current_freq(policy, true);
1779 * cpufreq_get - get the current CPU frequency (in kHz)
1782 * Get the CPU current (static) CPU frequency
1784 unsigned int cpufreq_get(unsigned int cpu)
1786 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1787 unsigned int ret_freq = 0;
1790 down_read(&policy->rwsem);
1791 if (cpufreq_driver->get)
1792 ret_freq = __cpufreq_get(policy);
1793 up_read(&policy->rwsem);
1795 cpufreq_cpu_put(policy);
1800 EXPORT_SYMBOL(cpufreq_get);
1802 static struct subsys_interface cpufreq_interface = {
1804 .subsys = &cpu_subsys,
1805 .add_dev = cpufreq_add_dev,
1806 .remove_dev = cpufreq_remove_dev,
1810 * In case platform wants some specific frequency to be configured
1813 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1817 if (!policy->suspend_freq) {
1818 pr_debug("%s: suspend_freq not defined\n", __func__);
1822 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1823 policy->suspend_freq);
1825 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1826 CPUFREQ_RELATION_H);
1828 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1829 __func__, policy->suspend_freq, ret);
1833 EXPORT_SYMBOL(cpufreq_generic_suspend);
1836 * cpufreq_suspend() - Suspend CPUFreq governors
1838 * Called during system wide Suspend/Hibernate cycles for suspending governors
1839 * as some platforms can't change frequency after this point in suspend cycle.
1840 * Because some of the devices (like: i2c, regulators, etc) they use for
1841 * changing frequency are suspended quickly after this point.
1843 void cpufreq_suspend(void)
1845 struct cpufreq_policy *policy;
1847 if (!cpufreq_driver)
1850 if (!has_target() && !cpufreq_driver->suspend)
1853 pr_debug("%s: Suspending Governors\n", __func__);
1855 for_each_active_policy(policy) {
1857 down_write(&policy->rwsem);
1858 cpufreq_stop_governor(policy);
1859 up_write(&policy->rwsem);
1862 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1863 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1864 cpufreq_driver->name);
1868 cpufreq_suspended = true;
1872 * cpufreq_resume() - Resume CPUFreq governors
1874 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1875 * are suspended with cpufreq_suspend().
1877 void cpufreq_resume(void)
1879 struct cpufreq_policy *policy;
1882 if (!cpufreq_driver)
1885 if (unlikely(!cpufreq_suspended))
1888 cpufreq_suspended = false;
1890 if (!has_target() && !cpufreq_driver->resume)
1893 pr_debug("%s: Resuming Governors\n", __func__);
1895 for_each_active_policy(policy) {
1896 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1897 pr_err("%s: Failed to resume driver: %p\n", __func__,
1899 } else if (has_target()) {
1900 down_write(&policy->rwsem);
1901 ret = cpufreq_start_governor(policy);
1902 up_write(&policy->rwsem);
1905 pr_err("%s: Failed to start governor for policy: %p\n",
1912 * cpufreq_get_current_driver - return current driver's name
1914 * Return the name string of the currently loaded cpufreq driver
1917 const char *cpufreq_get_current_driver(void)
1920 return cpufreq_driver->name;
1924 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1927 * cpufreq_get_driver_data - return current driver data
1929 * Return the private data of the currently loaded cpufreq
1930 * driver, or NULL if no cpufreq driver is loaded.
1932 void *cpufreq_get_driver_data(void)
1935 return cpufreq_driver->driver_data;
1939 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1941 /*********************************************************************
1942 * NOTIFIER LISTS INTERFACE *
1943 *********************************************************************/
1946 * cpufreq_register_notifier - register a driver with cpufreq
1947 * @nb: notifier function to register
1948 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1950 * Add a driver to one of two lists: either a list of drivers that
1951 * are notified about clock rate changes (once before and once after
1952 * the transition), or a list of drivers that are notified about
1953 * changes in cpufreq policy.
1955 * This function may sleep, and has the same return conditions as
1956 * blocking_notifier_chain_register.
1958 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1962 if (cpufreq_disabled())
1966 case CPUFREQ_TRANSITION_NOTIFIER:
1967 mutex_lock(&cpufreq_fast_switch_lock);
1969 if (cpufreq_fast_switch_count > 0) {
1970 mutex_unlock(&cpufreq_fast_switch_lock);
1973 ret = srcu_notifier_chain_register(
1974 &cpufreq_transition_notifier_list, nb);
1976 cpufreq_fast_switch_count--;
1978 mutex_unlock(&cpufreq_fast_switch_lock);
1980 case CPUFREQ_POLICY_NOTIFIER:
1981 ret = blocking_notifier_chain_register(
1982 &cpufreq_policy_notifier_list, nb);
1990 EXPORT_SYMBOL(cpufreq_register_notifier);
1993 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1994 * @nb: notifier block to be unregistered
1995 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1997 * Remove a driver from the CPU frequency notifier list.
1999 * This function may sleep, and has the same return conditions as
2000 * blocking_notifier_chain_unregister.
2002 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2006 if (cpufreq_disabled())
2010 case CPUFREQ_TRANSITION_NOTIFIER:
2011 mutex_lock(&cpufreq_fast_switch_lock);
2013 ret = srcu_notifier_chain_unregister(
2014 &cpufreq_transition_notifier_list, nb);
2015 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2016 cpufreq_fast_switch_count++;
2018 mutex_unlock(&cpufreq_fast_switch_lock);
2020 case CPUFREQ_POLICY_NOTIFIER:
2021 ret = blocking_notifier_chain_unregister(
2022 &cpufreq_policy_notifier_list, nb);
2030 EXPORT_SYMBOL(cpufreq_unregister_notifier);
2033 /*********************************************************************
2035 *********************************************************************/
2038 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2039 * @policy: cpufreq policy to switch the frequency for.
2040 * @target_freq: New frequency to set (may be approximate).
2042 * Carry out a fast frequency switch without sleeping.
2044 * The driver's ->fast_switch() callback invoked by this function must be
2045 * suitable for being called from within RCU-sched read-side critical sections
2046 * and it is expected to select the minimum available frequency greater than or
2047 * equal to @target_freq (CPUFREQ_RELATION_L).
2049 * This function must not be called if policy->fast_switch_enabled is unset.
2051 * Governors calling this function must guarantee that it will never be invoked
2052 * twice in parallel for the same policy and that it will never be called in
2053 * parallel with either ->target() or ->target_index() for the same policy.
2055 * Returns the actual frequency set for the CPU.
2057 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2058 * error condition, the hardware configuration must be preserved.
2060 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2061 unsigned int target_freq)
2066 target_freq = clamp_val(target_freq, policy->min, policy->max);
2067 freq = cpufreq_driver->fast_switch(policy, target_freq);
2073 arch_set_freq_scale(policy->related_cpus, freq,
2074 policy->cpuinfo.max_freq);
2075 cpufreq_stats_record_transition(policy, freq);
2077 if (trace_cpu_frequency_enabled()) {
2078 for_each_cpu(cpu, policy->cpus)
2079 trace_cpu_frequency(freq, cpu);
2084 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2086 /* Must set freqs->new to intermediate frequency */
2087 static int __target_intermediate(struct cpufreq_policy *policy,
2088 struct cpufreq_freqs *freqs, int index)
2092 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2094 /* We don't need to switch to intermediate freq */
2098 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2099 __func__, policy->cpu, freqs->old, freqs->new);
2101 cpufreq_freq_transition_begin(policy, freqs);
2102 ret = cpufreq_driver->target_intermediate(policy, index);
2103 cpufreq_freq_transition_end(policy, freqs, ret);
2106 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2112 static int __target_index(struct cpufreq_policy *policy, int index)
2114 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2115 unsigned int intermediate_freq = 0;
2116 unsigned int newfreq = policy->freq_table[index].frequency;
2117 int retval = -EINVAL;
2120 if (newfreq == policy->cur)
2123 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2125 /* Handle switching to intermediate frequency */
2126 if (cpufreq_driver->get_intermediate) {
2127 retval = __target_intermediate(policy, &freqs, index);
2131 intermediate_freq = freqs.new;
2132 /* Set old freq to intermediate */
2133 if (intermediate_freq)
2134 freqs.old = freqs.new;
2137 freqs.new = newfreq;
2138 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2139 __func__, policy->cpu, freqs.old, freqs.new);
2141 cpufreq_freq_transition_begin(policy, &freqs);
2144 retval = cpufreq_driver->target_index(policy, index);
2146 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2150 cpufreq_freq_transition_end(policy, &freqs, retval);
2153 * Failed after setting to intermediate freq? Driver should have
2154 * reverted back to initial frequency and so should we. Check
2155 * here for intermediate_freq instead of get_intermediate, in
2156 * case we haven't switched to intermediate freq at all.
2158 if (unlikely(retval && intermediate_freq)) {
2159 freqs.old = intermediate_freq;
2160 freqs.new = policy->restore_freq;
2161 cpufreq_freq_transition_begin(policy, &freqs);
2162 cpufreq_freq_transition_end(policy, &freqs, 0);
2169 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2170 unsigned int target_freq,
2171 unsigned int relation)
2173 unsigned int old_target_freq = target_freq;
2176 if (cpufreq_disabled())
2179 /* Make sure that target_freq is within supported range */
2180 target_freq = clamp_val(target_freq, policy->min, policy->max);
2182 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2183 policy->cpu, target_freq, relation, old_target_freq);
2186 * This might look like a redundant call as we are checking it again
2187 * after finding index. But it is left intentionally for cases where
2188 * exactly same freq is called again and so we can save on few function
2191 if (target_freq == policy->cur)
2194 /* Save last value to restore later on errors */
2195 policy->restore_freq = policy->cur;
2197 if (cpufreq_driver->target)
2198 return cpufreq_driver->target(policy, target_freq, relation);
2200 if (!cpufreq_driver->target_index)
2203 index = cpufreq_frequency_table_target(policy, target_freq, relation);
2205 return __target_index(policy, index);
2207 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2209 int cpufreq_driver_target(struct cpufreq_policy *policy,
2210 unsigned int target_freq,
2211 unsigned int relation)
2215 down_write(&policy->rwsem);
2217 ret = __cpufreq_driver_target(policy, target_freq, relation);
2219 up_write(&policy->rwsem);
2223 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2225 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2230 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2234 /* Don't start any governor operations if we are entering suspend */
2235 if (cpufreq_suspended)
2238 * Governor might not be initiated here if ACPI _PPC changed
2239 * notification happened, so check it.
2241 if (!policy->governor)
2244 /* Platform doesn't want dynamic frequency switching ? */
2245 if (policy->governor->dynamic_switching &&
2246 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2247 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2250 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2251 policy->governor->name, gov->name);
2252 policy->governor = gov;
2258 if (!try_module_get(policy->governor->owner))
2261 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2263 if (policy->governor->init) {
2264 ret = policy->governor->init(policy);
2266 module_put(policy->governor->owner);
2274 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2276 if (cpufreq_suspended || !policy->governor)
2279 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2281 if (policy->governor->exit)
2282 policy->governor->exit(policy);
2284 module_put(policy->governor->owner);
2287 int cpufreq_start_governor(struct cpufreq_policy *policy)
2291 if (cpufreq_suspended)
2294 if (!policy->governor)
2297 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2299 if (cpufreq_driver->get)
2300 cpufreq_verify_current_freq(policy, false);
2302 if (policy->governor->start) {
2303 ret = policy->governor->start(policy);
2308 if (policy->governor->limits)
2309 policy->governor->limits(policy);
2314 void cpufreq_stop_governor(struct cpufreq_policy *policy)
2316 if (cpufreq_suspended || !policy->governor)
2319 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2321 if (policy->governor->stop)
2322 policy->governor->stop(policy);
2325 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2327 if (cpufreq_suspended || !policy->governor)
2330 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2332 if (policy->governor->limits)
2333 policy->governor->limits(policy);
2336 int cpufreq_register_governor(struct cpufreq_governor *governor)
2343 if (cpufreq_disabled())
2346 mutex_lock(&cpufreq_governor_mutex);
2349 if (!find_governor(governor->name)) {
2351 list_add(&governor->governor_list, &cpufreq_governor_list);
2354 mutex_unlock(&cpufreq_governor_mutex);
2357 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2359 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2361 struct cpufreq_policy *policy;
2362 unsigned long flags;
2367 if (cpufreq_disabled())
2370 /* clear last_governor for all inactive policies */
2371 read_lock_irqsave(&cpufreq_driver_lock, flags);
2372 for_each_inactive_policy(policy) {
2373 if (!strcmp(policy->last_governor, governor->name)) {
2374 policy->governor = NULL;
2375 strcpy(policy->last_governor, "\0");
2378 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2380 mutex_lock(&cpufreq_governor_mutex);
2381 list_del(&governor->governor_list);
2382 mutex_unlock(&cpufreq_governor_mutex);
2384 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2387 /*********************************************************************
2388 * POLICY INTERFACE *
2389 *********************************************************************/
2392 * cpufreq_get_policy - get the current cpufreq_policy
2393 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2395 * @cpu: CPU to find the policy for
2397 * Reads the current cpufreq policy.
2399 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2401 struct cpufreq_policy *cpu_policy;
2405 cpu_policy = cpufreq_cpu_get(cpu);
2409 memcpy(policy, cpu_policy, sizeof(*policy));
2411 cpufreq_cpu_put(cpu_policy);
2414 EXPORT_SYMBOL(cpufreq_get_policy);
2417 * cpufreq_set_policy - Modify cpufreq policy parameters.
2418 * @policy: Policy object to modify.
2419 * @new_gov: Policy governor pointer.
2420 * @new_pol: Policy value (for drivers with built-in governors).
2422 * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2423 * limits to be set for the policy, update @policy with the verified limits
2424 * values and either invoke the driver's ->setpolicy() callback (if present) or
2425 * carry out a governor update for @policy. That is, run the current governor's
2426 * ->limits() callback (if @new_gov points to the same object as the one in
2427 * @policy) or replace the governor for @policy with @new_gov.
2429 * The cpuinfo part of @policy is not updated by this function.
2431 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2432 struct cpufreq_governor *new_gov,
2433 unsigned int new_pol)
2435 struct cpufreq_policy_data new_data;
2436 struct cpufreq_governor *old_gov;
2439 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2440 new_data.freq_table = policy->freq_table;
2441 new_data.cpu = policy->cpu;
2443 * PM QoS framework collects all the requests from users and provide us
2444 * the final aggregated value here.
2446 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2447 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2449 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2450 new_data.cpu, new_data.min, new_data.max);
2453 * Verify that the CPU speed can be set within these limits and make sure
2456 ret = cpufreq_driver->verify(&new_data);
2460 policy->min = new_data.min;
2461 policy->max = new_data.max;
2462 trace_cpu_frequency_limits(policy);
2464 policy->cached_target_freq = UINT_MAX;
2466 pr_debug("new min and max freqs are %u - %u kHz\n",
2467 policy->min, policy->max);
2469 if (cpufreq_driver->setpolicy) {
2470 policy->policy = new_pol;
2471 pr_debug("setting range\n");
2472 return cpufreq_driver->setpolicy(policy);
2475 if (new_gov == policy->governor) {
2476 pr_debug("governor limits update\n");
2477 cpufreq_governor_limits(policy);
2481 pr_debug("governor switch\n");
2483 /* save old, working values */
2484 old_gov = policy->governor;
2485 /* end old governor */
2487 cpufreq_stop_governor(policy);
2488 cpufreq_exit_governor(policy);
2491 /* start new governor */
2492 policy->governor = new_gov;
2493 ret = cpufreq_init_governor(policy);
2495 ret = cpufreq_start_governor(policy);
2497 pr_debug("governor change\n");
2498 sched_cpufreq_governor_change(policy, old_gov);
2501 cpufreq_exit_governor(policy);
2504 /* new governor failed, so re-start old one */
2505 pr_debug("starting governor %s failed\n", policy->governor->name);
2507 policy->governor = old_gov;
2508 if (cpufreq_init_governor(policy))
2509 policy->governor = NULL;
2511 cpufreq_start_governor(policy);
2518 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2519 * @cpu: CPU to re-evaluate the policy for.
2521 * Update the current frequency for the cpufreq policy of @cpu and use
2522 * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2523 * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2524 * for the policy in question, among other things.
2526 void cpufreq_update_policy(unsigned int cpu)
2528 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2534 * BIOS might change freq behind our back
2535 * -> ask driver for current freq and notify governors about a change
2537 if (cpufreq_driver->get && has_target() &&
2538 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2541 refresh_frequency_limits(policy);
2544 cpufreq_cpu_release(policy);
2546 EXPORT_SYMBOL(cpufreq_update_policy);
2549 * cpufreq_update_limits - Update policy limits for a given CPU.
2550 * @cpu: CPU to update the policy limits for.
2552 * Invoke the driver's ->update_limits callback if present or call
2553 * cpufreq_update_policy() for @cpu.
2555 void cpufreq_update_limits(unsigned int cpu)
2557 if (cpufreq_driver->update_limits)
2558 cpufreq_driver->update_limits(cpu);
2560 cpufreq_update_policy(cpu);
2562 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2564 /*********************************************************************
2566 *********************************************************************/
2567 static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2571 if (!policy->freq_table)
2574 ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2576 pr_err("%s: Policy frequency update failed\n", __func__);
2580 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2587 int cpufreq_boost_trigger_state(int state)
2589 struct cpufreq_policy *policy;
2590 unsigned long flags;
2593 if (cpufreq_driver->boost_enabled == state)
2596 write_lock_irqsave(&cpufreq_driver_lock, flags);
2597 cpufreq_driver->boost_enabled = state;
2598 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2601 for_each_active_policy(policy) {
2602 ret = cpufreq_driver->set_boost(policy, state);
2604 goto err_reset_state;
2613 write_lock_irqsave(&cpufreq_driver_lock, flags);
2614 cpufreq_driver->boost_enabled = !state;
2615 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2617 pr_err("%s: Cannot %s BOOST\n",
2618 __func__, state ? "enable" : "disable");
2623 static bool cpufreq_boost_supported(void)
2625 return cpufreq_driver->set_boost;
2628 static int create_boost_sysfs_file(void)
2632 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2634 pr_err("%s: cannot register global BOOST sysfs file\n",
2640 static void remove_boost_sysfs_file(void)
2642 if (cpufreq_boost_supported())
2643 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2646 int cpufreq_enable_boost_support(void)
2648 if (!cpufreq_driver)
2651 if (cpufreq_boost_supported())
2654 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2656 /* This will get removed on driver unregister */
2657 return create_boost_sysfs_file();
2659 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2661 int cpufreq_boost_enabled(void)
2663 return cpufreq_driver->boost_enabled;
2665 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2667 /*********************************************************************
2668 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2669 *********************************************************************/
2670 static enum cpuhp_state hp_online;
2672 static int cpuhp_cpufreq_online(unsigned int cpu)
2674 cpufreq_online(cpu);
2679 static int cpuhp_cpufreq_offline(unsigned int cpu)
2681 cpufreq_offline(cpu);
2687 * cpufreq_register_driver - register a CPU Frequency driver
2688 * @driver_data: A struct cpufreq_driver containing the values#
2689 * submitted by the CPU Frequency driver.
2691 * Registers a CPU Frequency driver to this core code. This code
2692 * returns zero on success, -EEXIST when another driver got here first
2693 * (and isn't unregistered in the meantime).
2696 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2698 unsigned long flags;
2701 if (cpufreq_disabled())
2705 * The cpufreq core depends heavily on the availability of device
2706 * structure, make sure they are available before proceeding further.
2708 if (!get_cpu_device(0))
2709 return -EPROBE_DEFER;
2711 if (!driver_data || !driver_data->verify || !driver_data->init ||
2712 !(driver_data->setpolicy || driver_data->target_index ||
2713 driver_data->target) ||
2714 (driver_data->setpolicy && (driver_data->target_index ||
2715 driver_data->target)) ||
2716 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2717 (!driver_data->online != !driver_data->offline))
2720 pr_debug("trying to register driver %s\n", driver_data->name);
2722 /* Protect against concurrent CPU online/offline. */
2725 write_lock_irqsave(&cpufreq_driver_lock, flags);
2726 if (cpufreq_driver) {
2727 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2731 cpufreq_driver = driver_data;
2732 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2735 * Mark support for the scheduler's frequency invariance engine for
2736 * drivers that implement target(), target_index() or fast_switch().
2738 if (!cpufreq_driver->setpolicy) {
2739 static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2740 pr_debug("supports frequency invariance");
2743 if (driver_data->setpolicy)
2744 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2746 if (cpufreq_boost_supported()) {
2747 ret = create_boost_sysfs_file();
2749 goto err_null_driver;
2752 ret = subsys_interface_register(&cpufreq_interface);
2754 goto err_boost_unreg;
2756 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2757 list_empty(&cpufreq_policy_list)) {
2758 /* if all ->init() calls failed, unregister */
2760 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2765 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2767 cpuhp_cpufreq_online,
2768 cpuhp_cpufreq_offline);
2774 pr_debug("driver %s up and running\n", driver_data->name);
2778 subsys_interface_unregister(&cpufreq_interface);
2780 remove_boost_sysfs_file();
2782 write_lock_irqsave(&cpufreq_driver_lock, flags);
2783 cpufreq_driver = NULL;
2784 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2789 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2792 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2794 * Unregister the current CPUFreq driver. Only call this if you have
2795 * the right to do so, i.e. if you have succeeded in initialising before!
2796 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2797 * currently not initialised.
2799 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2801 unsigned long flags;
2803 if (!cpufreq_driver || (driver != cpufreq_driver))
2806 pr_debug("unregistering driver %s\n", driver->name);
2808 /* Protect against concurrent cpu hotplug */
2810 subsys_interface_unregister(&cpufreq_interface);
2811 remove_boost_sysfs_file();
2812 static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2813 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2815 write_lock_irqsave(&cpufreq_driver_lock, flags);
2817 cpufreq_driver = NULL;
2819 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2824 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2826 static int __init cpufreq_core_init(void)
2828 struct cpufreq_governor *gov = cpufreq_default_governor();
2830 if (cpufreq_disabled())
2833 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2834 BUG_ON(!cpufreq_global_kobject);
2836 if (!strlen(default_governor))
2837 strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
2841 module_param(off, int, 0444);
2842 module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
2843 core_initcall(cpufreq_core_init);