2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/notifier.h>
24 #include <linux/cpufreq.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/completion.h>
32 #include <linux/mutex.h>
33 #include <linux/syscore_ops.h>
35 #include <trace/events/power.h>
38 * The "cpufreq driver" - the arch- or hardware-dependent low
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
42 static struct cpufreq_driver *cpufreq_driver;
43 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
44 #ifdef CONFIG_HOTPLUG_CPU
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 static DEFINE_SPINLOCK(cpufreq_driver_lock);
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
62 * - All holders of the lock should check to make sure that the CPU they
63 * are concerned with are online after they get the lock.
64 * - Governor routines that can be called in cpufreq hotplug path should not
65 * take this sem as top level hotplug notifier handler takes this.
66 * - Lock should not be held across
67 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
69 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
70 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
72 #define lock_policy_rwsem(mode, cpu) \
73 static int lock_policy_rwsem_##mode \
76 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
77 BUG_ON(policy_cpu == -1); \
78 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
79 if (unlikely(!cpu_online(cpu))) { \
80 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
87 lock_policy_rwsem(read, cpu);
89 lock_policy_rwsem(write, cpu);
91 static void unlock_policy_rwsem_read(int cpu)
93 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
94 BUG_ON(policy_cpu == -1);
95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
98 static void unlock_policy_rwsem_write(int cpu)
100 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
101 BUG_ON(policy_cpu == -1);
102 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
106 /* internal prototypes */
107 static int __cpufreq_governor(struct cpufreq_policy *policy,
109 static unsigned int __cpufreq_get(unsigned int cpu);
110 static void handle_update(struct work_struct *work);
113 * Two notifier lists: the "policy" list is involved in the
114 * validation process for a new CPU frequency policy; the
115 * "transition" list for kernel code that needs to handle
116 * changes to devices when the CPU clock speed changes.
117 * The mutex locks both lists.
119 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
120 static struct srcu_notifier_head cpufreq_transition_notifier_list;
122 static bool init_cpufreq_transition_notifier_list_called;
123 static int __init init_cpufreq_transition_notifier_list(void)
125 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
126 init_cpufreq_transition_notifier_list_called = true;
129 pure_initcall(init_cpufreq_transition_notifier_list);
131 static int off __read_mostly;
132 static int cpufreq_disabled(void)
136 void disable_cpufreq(void)
140 static LIST_HEAD(cpufreq_governor_list);
141 static DEFINE_MUTEX(cpufreq_governor_mutex);
143 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
145 struct cpufreq_policy *data;
148 if (cpu >= nr_cpu_ids)
151 /* get the cpufreq driver */
152 spin_lock_irqsave(&cpufreq_driver_lock, flags);
157 if (!try_module_get(cpufreq_driver->owner))
162 data = per_cpu(cpufreq_cpu_data, cpu);
165 goto err_out_put_module;
167 if (!sysfs && !kobject_get(&data->kobj))
168 goto err_out_put_module;
170 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
174 module_put(cpufreq_driver->owner);
176 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
181 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
183 return __cpufreq_cpu_get(cpu, false);
185 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
187 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
189 return __cpufreq_cpu_get(cpu, true);
192 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
195 kobject_put(&data->kobj);
196 module_put(cpufreq_driver->owner);
199 void cpufreq_cpu_put(struct cpufreq_policy *data)
201 __cpufreq_cpu_put(data, false);
203 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
205 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
207 __cpufreq_cpu_put(data, true);
210 /*********************************************************************
211 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
212 *********************************************************************/
215 * adjust_jiffies - adjust the system "loops_per_jiffy"
217 * This function alters the system "loops_per_jiffy" for the clock
218 * speed change. Note that loops_per_jiffy cannot be updated on SMP
219 * systems as each CPU might be scaled differently. So, use the arch
220 * per-CPU loops_per_jiffy value wherever possible.
223 static unsigned long l_p_j_ref;
224 static unsigned int l_p_j_ref_freq;
226 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
228 if (ci->flags & CPUFREQ_CONST_LOOPS)
231 if (!l_p_j_ref_freq) {
232 l_p_j_ref = loops_per_jiffy;
233 l_p_j_ref_freq = ci->old;
234 pr_debug("saving %lu as reference value for loops_per_jiffy; "
235 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
237 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
238 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
239 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
241 pr_debug("scaling loops_per_jiffy to %lu "
242 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
246 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
254 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
255 * on frequency transition.
257 * This function calls the transition notifiers and the "adjust_jiffies"
258 * function. It is called twice on all CPU frequency changes that have
261 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
263 struct cpufreq_policy *policy;
265 BUG_ON(irqs_disabled());
267 freqs->flags = cpufreq_driver->flags;
268 pr_debug("notification %u of frequency transition to %u kHz\n",
271 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
274 case CPUFREQ_PRECHANGE:
275 /* detect if the driver reported a value as "old frequency"
276 * which is not equal to what the cpufreq core thinks is
279 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
280 if ((policy) && (policy->cpu == freqs->cpu) &&
281 (policy->cur) && (policy->cur != freqs->old)) {
282 pr_debug("Warning: CPU frequency is"
283 " %u, cpufreq assumed %u kHz.\n",
284 freqs->old, policy->cur);
285 freqs->old = policy->cur;
288 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
289 CPUFREQ_PRECHANGE, freqs);
290 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
293 case CPUFREQ_POSTCHANGE:
294 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
295 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
296 (unsigned long)freqs->cpu);
297 trace_cpu_frequency(freqs->new, freqs->cpu);
298 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
299 CPUFREQ_POSTCHANGE, freqs);
300 if (likely(policy) && likely(policy->cpu == freqs->cpu))
301 policy->cur = freqs->new;
305 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
309 /*********************************************************************
311 *********************************************************************/
313 static struct cpufreq_governor *__find_governor(const char *str_governor)
315 struct cpufreq_governor *t;
317 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
318 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
325 * cpufreq_parse_governor - parse a governor string
327 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
328 struct cpufreq_governor **governor)
335 if (cpufreq_driver->setpolicy) {
336 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
337 *policy = CPUFREQ_POLICY_PERFORMANCE;
339 } else if (!strnicmp(str_governor, "powersave",
341 *policy = CPUFREQ_POLICY_POWERSAVE;
344 } else if (cpufreq_driver->target) {
345 struct cpufreq_governor *t;
347 mutex_lock(&cpufreq_governor_mutex);
349 t = __find_governor(str_governor);
354 mutex_unlock(&cpufreq_governor_mutex);
355 ret = request_module("cpufreq_%s", str_governor);
356 mutex_lock(&cpufreq_governor_mutex);
359 t = __find_governor(str_governor);
367 mutex_unlock(&cpufreq_governor_mutex);
375 * cpufreq_per_cpu_attr_read() / show_##file_name() -
376 * print out cpufreq information
378 * Write out information from cpufreq_driver->policy[cpu]; object must be
382 #define show_one(file_name, object) \
383 static ssize_t show_##file_name \
384 (struct cpufreq_policy *policy, char *buf) \
386 return sprintf(buf, "%u\n", policy->object); \
389 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
390 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
391 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
392 show_one(scaling_min_freq, min);
393 show_one(scaling_max_freq, max);
394 show_one(scaling_cur_freq, cur);
396 static int __cpufreq_set_policy(struct cpufreq_policy *data,
397 struct cpufreq_policy *policy);
400 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
402 #define store_one(file_name, object) \
403 static ssize_t store_##file_name \
404 (struct cpufreq_policy *policy, const char *buf, size_t count) \
407 struct cpufreq_policy new_policy; \
409 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
413 ret = sscanf(buf, "%u", &new_policy.object); \
417 ret = __cpufreq_set_policy(policy, &new_policy); \
418 policy->user_policy.object = policy->object; \
420 return ret ? ret : count; \
423 store_one(scaling_min_freq, min);
424 store_one(scaling_max_freq, max);
427 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
429 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
432 unsigned int cur_freq = __cpufreq_get(policy->cpu);
434 return sprintf(buf, "<unknown>");
435 return sprintf(buf, "%u\n", cur_freq);
440 * show_scaling_governor - show the current policy for the specified CPU
442 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
444 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
445 return sprintf(buf, "powersave\n");
446 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
447 return sprintf(buf, "performance\n");
448 else if (policy->governor)
449 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
450 policy->governor->name);
456 * store_scaling_governor - store policy for the specified CPU
458 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
459 const char *buf, size_t count)
462 char str_governor[16];
463 struct cpufreq_policy new_policy;
465 ret = cpufreq_get_policy(&new_policy, policy->cpu);
469 ret = sscanf(buf, "%15s", str_governor);
473 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
474 &new_policy.governor))
477 /* Do not use cpufreq_set_policy here or the user_policy.max
478 will be wrongly overridden */
479 ret = __cpufreq_set_policy(policy, &new_policy);
481 policy->user_policy.policy = policy->policy;
482 policy->user_policy.governor = policy->governor;
491 * show_scaling_driver - show the cpufreq driver currently loaded
493 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
495 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
499 * show_scaling_available_governors - show the available CPUfreq governors
501 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
505 struct cpufreq_governor *t;
507 if (!cpufreq_driver->target) {
508 i += sprintf(buf, "performance powersave");
512 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
513 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
514 - (CPUFREQ_NAME_LEN + 2)))
516 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
519 i += sprintf(&buf[i], "\n");
523 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
528 for_each_cpu(cpu, mask) {
530 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
531 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
532 if (i >= (PAGE_SIZE - 5))
535 i += sprintf(&buf[i], "\n");
540 * show_related_cpus - show the CPUs affected by each transition even if
541 * hw coordination is in use
543 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
545 if (cpumask_empty(policy->related_cpus))
546 return show_cpus(policy->cpus, buf);
547 return show_cpus(policy->related_cpus, buf);
551 * show_affected_cpus - show the CPUs affected by each transition
553 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
555 return show_cpus(policy->cpus, buf);
558 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
559 const char *buf, size_t count)
561 unsigned int freq = 0;
564 if (!policy->governor || !policy->governor->store_setspeed)
567 ret = sscanf(buf, "%u", &freq);
571 policy->governor->store_setspeed(policy, freq);
576 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
578 if (!policy->governor || !policy->governor->show_setspeed)
579 return sprintf(buf, "<unsupported>\n");
581 return policy->governor->show_setspeed(policy, buf);
585 * show_bios_limit - show the current cpufreq HW/BIOS limitation
587 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
591 if (cpufreq_driver->bios_limit) {
592 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
594 return sprintf(buf, "%u\n", limit);
596 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
599 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
600 cpufreq_freq_attr_ro(cpuinfo_min_freq);
601 cpufreq_freq_attr_ro(cpuinfo_max_freq);
602 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
603 cpufreq_freq_attr_ro(scaling_available_governors);
604 cpufreq_freq_attr_ro(scaling_driver);
605 cpufreq_freq_attr_ro(scaling_cur_freq);
606 cpufreq_freq_attr_ro(bios_limit);
607 cpufreq_freq_attr_ro(related_cpus);
608 cpufreq_freq_attr_ro(affected_cpus);
609 cpufreq_freq_attr_rw(scaling_min_freq);
610 cpufreq_freq_attr_rw(scaling_max_freq);
611 cpufreq_freq_attr_rw(scaling_governor);
612 cpufreq_freq_attr_rw(scaling_setspeed);
614 static struct attribute *default_attrs[] = {
615 &cpuinfo_min_freq.attr,
616 &cpuinfo_max_freq.attr,
617 &cpuinfo_transition_latency.attr,
618 &scaling_min_freq.attr,
619 &scaling_max_freq.attr,
622 &scaling_governor.attr,
623 &scaling_driver.attr,
624 &scaling_available_governors.attr,
625 &scaling_setspeed.attr,
629 struct kobject *cpufreq_global_kobject;
630 EXPORT_SYMBOL(cpufreq_global_kobject);
632 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
633 #define to_attr(a) container_of(a, struct freq_attr, attr)
635 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
637 struct cpufreq_policy *policy = to_policy(kobj);
638 struct freq_attr *fattr = to_attr(attr);
639 ssize_t ret = -EINVAL;
640 policy = cpufreq_cpu_get_sysfs(policy->cpu);
644 if (lock_policy_rwsem_read(policy->cpu) < 0)
648 ret = fattr->show(policy, buf);
652 unlock_policy_rwsem_read(policy->cpu);
654 cpufreq_cpu_put_sysfs(policy);
659 static ssize_t store(struct kobject *kobj, struct attribute *attr,
660 const char *buf, size_t count)
662 struct cpufreq_policy *policy = to_policy(kobj);
663 struct freq_attr *fattr = to_attr(attr);
664 ssize_t ret = -EINVAL;
665 policy = cpufreq_cpu_get_sysfs(policy->cpu);
669 if (lock_policy_rwsem_write(policy->cpu) < 0)
673 ret = fattr->store(policy, buf, count);
677 unlock_policy_rwsem_write(policy->cpu);
679 cpufreq_cpu_put_sysfs(policy);
684 static void cpufreq_sysfs_release(struct kobject *kobj)
686 struct cpufreq_policy *policy = to_policy(kobj);
687 pr_debug("last reference is dropped\n");
688 complete(&policy->kobj_unregister);
691 static const struct sysfs_ops sysfs_ops = {
696 static struct kobj_type ktype_cpufreq = {
697 .sysfs_ops = &sysfs_ops,
698 .default_attrs = default_attrs,
699 .release = cpufreq_sysfs_release,
706 * Positive: When we have a managed CPU and the sysfs got symlinked
708 static int cpufreq_add_dev_policy(unsigned int cpu,
709 struct cpufreq_policy *policy,
716 #ifdef CONFIG_HOTPLUG_CPU
717 struct cpufreq_governor *gov;
719 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
721 policy->governor = gov;
722 pr_debug("Restoring governor %s for cpu %d\n",
723 policy->governor->name, cpu);
727 for_each_cpu(j, policy->cpus) {
728 struct cpufreq_policy *managed_policy;
733 /* Check for existing affected CPUs.
734 * They may not be aware of it due to CPU Hotplug.
735 * cpufreq_cpu_put is called when the device is removed
736 * in __cpufreq_remove_dev()
738 managed_policy = cpufreq_cpu_get(j);
739 if (unlikely(managed_policy)) {
741 /* Set proper policy_cpu */
742 unlock_policy_rwsem_write(cpu);
743 per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
745 if (lock_policy_rwsem_write(cpu) < 0) {
746 /* Should not go through policy unlock path */
747 if (cpufreq_driver->exit)
748 cpufreq_driver->exit(policy);
749 cpufreq_cpu_put(managed_policy);
753 spin_lock_irqsave(&cpufreq_driver_lock, flags);
754 cpumask_copy(managed_policy->cpus, policy->cpus);
755 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
756 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
758 pr_debug("CPU already managed, adding link\n");
759 ret = sysfs_create_link(&dev->kobj,
760 &managed_policy->kobj,
763 cpufreq_cpu_put(managed_policy);
765 * Success. We only needed to be added to the mask.
766 * Call driver->exit() because only the cpu parent of
767 * the kobj needed to call init().
769 if (cpufreq_driver->exit)
770 cpufreq_driver->exit(policy);
783 /* symlink affected CPUs */
784 static int cpufreq_add_dev_symlink(unsigned int cpu,
785 struct cpufreq_policy *policy)
790 for_each_cpu(j, policy->cpus) {
791 struct cpufreq_policy *managed_policy;
792 struct device *cpu_dev;
799 pr_debug("CPU %u already managed, adding link\n", j);
800 managed_policy = cpufreq_cpu_get(cpu);
801 cpu_dev = get_cpu_device(j);
802 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
805 cpufreq_cpu_put(managed_policy);
812 static int cpufreq_add_dev_interface(unsigned int cpu,
813 struct cpufreq_policy *policy,
816 struct cpufreq_policy new_policy;
817 struct freq_attr **drv_attr;
822 /* prepare interface data */
823 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
824 &dev->kobj, "cpufreq");
828 /* set up files for this cpu device */
829 drv_attr = cpufreq_driver->attr;
830 while ((drv_attr) && (*drv_attr)) {
831 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
833 goto err_out_kobj_put;
836 if (cpufreq_driver->get) {
837 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
839 goto err_out_kobj_put;
841 if (cpufreq_driver->target) {
842 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
844 goto err_out_kobj_put;
846 if (cpufreq_driver->bios_limit) {
847 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
849 goto err_out_kobj_put;
852 spin_lock_irqsave(&cpufreq_driver_lock, flags);
853 for_each_cpu(j, policy->cpus) {
856 per_cpu(cpufreq_cpu_data, j) = policy;
857 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
859 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
861 ret = cpufreq_add_dev_symlink(cpu, policy);
863 goto err_out_kobj_put;
865 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
866 /* assure that the starting sequence is run in __cpufreq_set_policy */
867 policy->governor = NULL;
869 /* set default policy */
870 ret = __cpufreq_set_policy(policy, &new_policy);
871 policy->user_policy.policy = policy->policy;
872 policy->user_policy.governor = policy->governor;
875 pr_debug("setting policy failed\n");
876 if (cpufreq_driver->exit)
877 cpufreq_driver->exit(policy);
882 kobject_put(&policy->kobj);
883 wait_for_completion(&policy->kobj_unregister);
889 * cpufreq_add_dev - add a CPU device
891 * Adds the cpufreq interface for a CPU device.
893 * The Oracle says: try running cpufreq registration/unregistration concurrently
894 * with with cpu hotplugging and all hell will break loose. Tried to clean this
895 * mess up, but more thorough testing is needed. - Mathieu
897 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
899 unsigned int cpu = dev->id;
900 int ret = 0, found = 0;
901 struct cpufreq_policy *policy;
904 #ifdef CONFIG_HOTPLUG_CPU
908 if (cpu_is_offline(cpu))
911 pr_debug("adding CPU %u\n", cpu);
914 /* check whether a different CPU already registered this
915 * CPU because it is in the same boat. */
916 policy = cpufreq_cpu_get(cpu);
917 if (unlikely(policy)) {
918 cpufreq_cpu_put(policy);
923 if (!try_module_get(cpufreq_driver->owner)) {
929 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
933 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
934 goto err_free_policy;
936 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
937 goto err_free_cpumask;
940 cpumask_copy(policy->cpus, cpumask_of(cpu));
942 /* Initially set CPU itself as the policy_cpu */
943 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
944 ret = (lock_policy_rwsem_write(cpu) < 0);
947 init_completion(&policy->kobj_unregister);
948 INIT_WORK(&policy->update, handle_update);
950 /* Set governor before ->init, so that driver could check it */
951 #ifdef CONFIG_HOTPLUG_CPU
952 for_each_online_cpu(sibling) {
953 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
954 if (cp && cp->governor &&
955 (cpumask_test_cpu(cpu, cp->related_cpus))) {
956 policy->governor = cp->governor;
963 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
964 /* call driver. From then on the cpufreq must be able
965 * to accept all calls to ->verify and ->setpolicy for this CPU
967 ret = cpufreq_driver->init(policy);
969 pr_debug("initialization failed\n");
970 goto err_unlock_policy;
972 policy->user_policy.min = policy->min;
973 policy->user_policy.max = policy->max;
975 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
976 CPUFREQ_START, policy);
978 ret = cpufreq_add_dev_policy(cpu, policy, dev);
981 /* This is a managed cpu, symlink created,
984 goto err_unlock_policy;
987 ret = cpufreq_add_dev_interface(cpu, policy, dev);
989 goto err_out_unregister;
991 unlock_policy_rwsem_write(cpu);
993 kobject_uevent(&policy->kobj, KOBJ_ADD);
994 module_put(cpufreq_driver->owner);
995 pr_debug("initialization complete\n");
1001 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1002 for_each_cpu(j, policy->cpus)
1003 per_cpu(cpufreq_cpu_data, j) = NULL;
1004 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1006 kobject_put(&policy->kobj);
1007 wait_for_completion(&policy->kobj_unregister);
1010 unlock_policy_rwsem_write(cpu);
1011 free_cpumask_var(policy->related_cpus);
1013 free_cpumask_var(policy->cpus);
1017 module_put(cpufreq_driver->owner);
1024 * __cpufreq_remove_dev - remove a CPU device
1026 * Removes the cpufreq interface for a CPU device.
1027 * Caller should already have policy_rwsem in write mode for this CPU.
1028 * This routine frees the rwsem before returning.
1030 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1032 unsigned int cpu = dev->id;
1033 unsigned long flags;
1034 struct cpufreq_policy *data;
1035 struct kobject *kobj;
1036 struct completion *cmp;
1038 struct device *cpu_dev;
1042 pr_debug("unregistering CPU %u\n", cpu);
1044 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1045 data = per_cpu(cpufreq_cpu_data, cpu);
1048 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1049 unlock_policy_rwsem_write(cpu);
1052 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1056 /* if this isn't the CPU which is the parent of the kobj, we
1057 * only need to unlink, put and exit
1059 if (unlikely(cpu != data->cpu)) {
1060 pr_debug("removing link\n");
1061 cpumask_clear_cpu(cpu, data->cpus);
1062 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1064 cpufreq_cpu_put(data);
1065 unlock_policy_rwsem_write(cpu);
1066 sysfs_remove_link(kobj, "cpufreq");
1073 #ifdef CONFIG_HOTPLUG_CPU
1074 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1078 /* if we have other CPUs still registered, we need to unlink them,
1079 * or else wait_for_completion below will lock up. Clean the
1080 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1081 * the sysfs links afterwards.
1083 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1084 for_each_cpu(j, data->cpus) {
1087 per_cpu(cpufreq_cpu_data, j) = NULL;
1091 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1093 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1094 for_each_cpu(j, data->cpus) {
1097 pr_debug("removing link for cpu %u\n", j);
1098 #ifdef CONFIG_HOTPLUG_CPU
1099 strncpy(per_cpu(cpufreq_cpu_governor, j),
1100 data->governor->name, CPUFREQ_NAME_LEN);
1102 cpu_dev = get_cpu_device(j);
1103 kobj = &cpu_dev->kobj;
1104 unlock_policy_rwsem_write(cpu);
1105 sysfs_remove_link(kobj, "cpufreq");
1106 lock_policy_rwsem_write(cpu);
1107 cpufreq_cpu_put(data);
1111 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1114 if (cpufreq_driver->target)
1115 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1118 cmp = &data->kobj_unregister;
1119 unlock_policy_rwsem_write(cpu);
1122 /* we need to make sure that the underlying kobj is actually
1123 * not referenced anymore by anybody before we proceed with
1126 pr_debug("waiting for dropping of refcount\n");
1127 wait_for_completion(cmp);
1128 pr_debug("wait complete\n");
1130 lock_policy_rwsem_write(cpu);
1131 if (cpufreq_driver->exit)
1132 cpufreq_driver->exit(data);
1133 unlock_policy_rwsem_write(cpu);
1135 #ifdef CONFIG_HOTPLUG_CPU
1136 /* when the CPU which is the parent of the kobj is hotplugged
1137 * offline, check for siblings, and create cpufreq sysfs interface
1140 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1141 /* first sibling now owns the new sysfs dir */
1142 cpumask_clear_cpu(cpu, data->cpus);
1143 cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
1145 /* finally remove our own symlink */
1146 lock_policy_rwsem_write(cpu);
1147 __cpufreq_remove_dev(dev, sif);
1151 free_cpumask_var(data->related_cpus);
1152 free_cpumask_var(data->cpus);
1159 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1161 unsigned int cpu = dev->id;
1164 if (cpu_is_offline(cpu))
1167 if (unlikely(lock_policy_rwsem_write(cpu)))
1170 retval = __cpufreq_remove_dev(dev, sif);
1175 static void handle_update(struct work_struct *work)
1177 struct cpufreq_policy *policy =
1178 container_of(work, struct cpufreq_policy, update);
1179 unsigned int cpu = policy->cpu;
1180 pr_debug("handle_update for cpu %u called\n", cpu);
1181 cpufreq_update_policy(cpu);
1185 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1187 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1188 * @new_freq: CPU frequency the CPU actually runs at
1190 * We adjust to current frequency first, and need to clean up later.
1191 * So either call to cpufreq_update_policy() or schedule handle_update()).
1193 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1194 unsigned int new_freq)
1196 struct cpufreq_freqs freqs;
1198 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1199 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1202 freqs.old = old_freq;
1203 freqs.new = new_freq;
1204 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1205 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1210 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1213 * This is the last known freq, without actually getting it from the driver.
1214 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1216 unsigned int cpufreq_quick_get(unsigned int cpu)
1218 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1219 unsigned int ret_freq = 0;
1222 ret_freq = policy->cur;
1223 cpufreq_cpu_put(policy);
1228 EXPORT_SYMBOL(cpufreq_quick_get);
1231 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1234 * Just return the max possible frequency for a given CPU.
1236 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1238 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1239 unsigned int ret_freq = 0;
1242 ret_freq = policy->max;
1243 cpufreq_cpu_put(policy);
1248 EXPORT_SYMBOL(cpufreq_quick_get_max);
1251 static unsigned int __cpufreq_get(unsigned int cpu)
1253 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1254 unsigned int ret_freq = 0;
1256 if (!cpufreq_driver->get)
1259 ret_freq = cpufreq_driver->get(cpu);
1261 if (ret_freq && policy->cur &&
1262 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1263 /* verify no discrepancy between actual and
1264 saved value exists */
1265 if (unlikely(ret_freq != policy->cur)) {
1266 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1267 schedule_work(&policy->update);
1275 * cpufreq_get - get the current CPU frequency (in kHz)
1278 * Get the CPU current (static) CPU frequency
1280 unsigned int cpufreq_get(unsigned int cpu)
1282 unsigned int ret_freq = 0;
1283 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1288 if (unlikely(lock_policy_rwsem_read(cpu)))
1291 ret_freq = __cpufreq_get(cpu);
1293 unlock_policy_rwsem_read(cpu);
1296 cpufreq_cpu_put(policy);
1300 EXPORT_SYMBOL(cpufreq_get);
1302 static struct subsys_interface cpufreq_interface = {
1304 .subsys = &cpu_subsys,
1305 .add_dev = cpufreq_add_dev,
1306 .remove_dev = cpufreq_remove_dev,
1311 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1313 * This function is only executed for the boot processor. The other CPUs
1314 * have been put offline by means of CPU hotplug.
1316 static int cpufreq_bp_suspend(void)
1320 int cpu = smp_processor_id();
1321 struct cpufreq_policy *cpu_policy;
1323 pr_debug("suspending cpu %u\n", cpu);
1325 /* If there's no policy for the boot CPU, we have nothing to do. */
1326 cpu_policy = cpufreq_cpu_get(cpu);
1330 if (cpufreq_driver->suspend) {
1331 ret = cpufreq_driver->suspend(cpu_policy);
1333 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1334 "step on CPU %u\n", cpu_policy->cpu);
1337 cpufreq_cpu_put(cpu_policy);
1342 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1344 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1345 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1346 * restored. It will verify that the current freq is in sync with
1347 * what we believe it to be. This is a bit later than when it
1348 * should be, but nonethteless it's better than calling
1349 * cpufreq_driver->get() here which might re-enable interrupts...
1351 * This function is only executed for the boot CPU. The other CPUs have not
1352 * been turned on yet.
1354 static void cpufreq_bp_resume(void)
1358 int cpu = smp_processor_id();
1359 struct cpufreq_policy *cpu_policy;
1361 pr_debug("resuming cpu %u\n", cpu);
1363 /* If there's no policy for the boot CPU, we have nothing to do. */
1364 cpu_policy = cpufreq_cpu_get(cpu);
1368 if (cpufreq_driver->resume) {
1369 ret = cpufreq_driver->resume(cpu_policy);
1371 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1372 "step on CPU %u\n", cpu_policy->cpu);
1377 schedule_work(&cpu_policy->update);
1380 cpufreq_cpu_put(cpu_policy);
1383 static struct syscore_ops cpufreq_syscore_ops = {
1384 .suspend = cpufreq_bp_suspend,
1385 .resume = cpufreq_bp_resume,
1389 /*********************************************************************
1390 * NOTIFIER LISTS INTERFACE *
1391 *********************************************************************/
1394 * cpufreq_register_notifier - register a driver with cpufreq
1395 * @nb: notifier function to register
1396 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1398 * Add a driver to one of two lists: either a list of drivers that
1399 * are notified about clock rate changes (once before and once after
1400 * the transition), or a list of drivers that are notified about
1401 * changes in cpufreq policy.
1403 * This function may sleep, and has the same return conditions as
1404 * blocking_notifier_chain_register.
1406 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1410 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1413 case CPUFREQ_TRANSITION_NOTIFIER:
1414 ret = srcu_notifier_chain_register(
1415 &cpufreq_transition_notifier_list, nb);
1417 case CPUFREQ_POLICY_NOTIFIER:
1418 ret = blocking_notifier_chain_register(
1419 &cpufreq_policy_notifier_list, nb);
1427 EXPORT_SYMBOL(cpufreq_register_notifier);
1431 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1432 * @nb: notifier block to be unregistered
1433 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1435 * Remove a driver from the CPU frequency notifier list.
1437 * This function may sleep, and has the same return conditions as
1438 * blocking_notifier_chain_unregister.
1440 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1445 case CPUFREQ_TRANSITION_NOTIFIER:
1446 ret = srcu_notifier_chain_unregister(
1447 &cpufreq_transition_notifier_list, nb);
1449 case CPUFREQ_POLICY_NOTIFIER:
1450 ret = blocking_notifier_chain_unregister(
1451 &cpufreq_policy_notifier_list, nb);
1459 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1462 /*********************************************************************
1464 *********************************************************************/
1467 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1468 unsigned int target_freq,
1469 unsigned int relation)
1471 int retval = -EINVAL;
1472 unsigned int old_target_freq = target_freq;
1474 if (cpufreq_disabled())
1477 /* Make sure that target_freq is within supported range */
1478 if (target_freq > policy->max)
1479 target_freq = policy->max;
1480 if (target_freq < policy->min)
1481 target_freq = policy->min;
1483 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1484 policy->cpu, target_freq, relation, old_target_freq);
1486 if (target_freq == policy->cur)
1489 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1490 retval = cpufreq_driver->target(policy, target_freq, relation);
1494 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1496 int cpufreq_driver_target(struct cpufreq_policy *policy,
1497 unsigned int target_freq,
1498 unsigned int relation)
1502 policy = cpufreq_cpu_get(policy->cpu);
1506 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1509 ret = __cpufreq_driver_target(policy, target_freq, relation);
1511 unlock_policy_rwsem_write(policy->cpu);
1514 cpufreq_cpu_put(policy);
1518 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1520 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1524 if (!(cpu_online(cpu) && cpufreq_driver->getavg))
1527 policy = cpufreq_cpu_get(policy->cpu);
1531 ret = cpufreq_driver->getavg(policy, cpu);
1533 cpufreq_cpu_put(policy);
1536 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1539 * when "event" is CPUFREQ_GOV_LIMITS
1542 static int __cpufreq_governor(struct cpufreq_policy *policy,
1547 /* Only must be defined when default governor is known to have latency
1548 restrictions, like e.g. conservative or ondemand.
1549 That this is the case is already ensured in Kconfig
1551 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1552 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1554 struct cpufreq_governor *gov = NULL;
1557 if (policy->governor->max_transition_latency &&
1558 policy->cpuinfo.transition_latency >
1559 policy->governor->max_transition_latency) {
1563 printk(KERN_WARNING "%s governor failed, too long"
1564 " transition latency of HW, fallback"
1565 " to %s governor\n",
1566 policy->governor->name,
1568 policy->governor = gov;
1572 if (!try_module_get(policy->governor->owner))
1575 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1576 policy->cpu, event);
1577 ret = policy->governor->governor(policy, event);
1579 /* we keep one module reference alive for
1580 each CPU governed by this CPU */
1581 if ((event != CPUFREQ_GOV_START) || ret)
1582 module_put(policy->governor->owner);
1583 if ((event == CPUFREQ_GOV_STOP) && !ret)
1584 module_put(policy->governor->owner);
1590 int cpufreq_register_governor(struct cpufreq_governor *governor)
1597 if (cpufreq_disabled())
1600 mutex_lock(&cpufreq_governor_mutex);
1603 if (__find_governor(governor->name) == NULL) {
1605 list_add(&governor->governor_list, &cpufreq_governor_list);
1608 mutex_unlock(&cpufreq_governor_mutex);
1611 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1614 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1616 #ifdef CONFIG_HOTPLUG_CPU
1623 if (cpufreq_disabled())
1626 #ifdef CONFIG_HOTPLUG_CPU
1627 for_each_present_cpu(cpu) {
1628 if (cpu_online(cpu))
1630 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1631 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1635 mutex_lock(&cpufreq_governor_mutex);
1636 list_del(&governor->governor_list);
1637 mutex_unlock(&cpufreq_governor_mutex);
1640 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1644 /*********************************************************************
1645 * POLICY INTERFACE *
1646 *********************************************************************/
1649 * cpufreq_get_policy - get the current cpufreq_policy
1650 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1653 * Reads the current cpufreq policy.
1655 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1657 struct cpufreq_policy *cpu_policy;
1661 cpu_policy = cpufreq_cpu_get(cpu);
1665 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1667 cpufreq_cpu_put(cpu_policy);
1670 EXPORT_SYMBOL(cpufreq_get_policy);
1674 * data : current policy.
1675 * policy : policy to be set.
1677 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1678 struct cpufreq_policy *policy)
1682 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1683 policy->min, policy->max);
1685 memcpy(&policy->cpuinfo, &data->cpuinfo,
1686 sizeof(struct cpufreq_cpuinfo));
1688 if (policy->min > data->max || policy->max < data->min) {
1693 /* verify the cpu speed can be set within this limit */
1694 ret = cpufreq_driver->verify(policy);
1698 /* adjust if necessary - all reasons */
1699 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1700 CPUFREQ_ADJUST, policy);
1702 /* adjust if necessary - hardware incompatibility*/
1703 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1704 CPUFREQ_INCOMPATIBLE, policy);
1706 /* verify the cpu speed can be set within this limit,
1707 which might be different to the first one */
1708 ret = cpufreq_driver->verify(policy);
1712 /* notification of the new policy */
1713 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1714 CPUFREQ_NOTIFY, policy);
1716 data->min = policy->min;
1717 data->max = policy->max;
1719 pr_debug("new min and max freqs are %u - %u kHz\n",
1720 data->min, data->max);
1722 if (cpufreq_driver->setpolicy) {
1723 data->policy = policy->policy;
1724 pr_debug("setting range\n");
1725 ret = cpufreq_driver->setpolicy(policy);
1727 if (policy->governor != data->governor) {
1728 /* save old, working values */
1729 struct cpufreq_governor *old_gov = data->governor;
1731 pr_debug("governor switch\n");
1733 /* end old governor */
1735 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1737 /* start new governor */
1738 data->governor = policy->governor;
1739 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1740 /* new governor failed, so re-start old one */
1741 pr_debug("starting governor %s failed\n",
1742 data->governor->name);
1744 data->governor = old_gov;
1745 __cpufreq_governor(data,
1751 /* might be a policy change, too, so fall through */
1753 pr_debug("governor: change or update limits\n");
1754 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1762 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1763 * @cpu: CPU which shall be re-evaluated
1765 * Useful for policy notifiers which have different necessities
1766 * at different times.
1768 int cpufreq_update_policy(unsigned int cpu)
1770 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1771 struct cpufreq_policy policy;
1779 if (unlikely(lock_policy_rwsem_write(cpu))) {
1784 pr_debug("updating policy for CPU %u\n", cpu);
1785 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1786 policy.min = data->user_policy.min;
1787 policy.max = data->user_policy.max;
1788 policy.policy = data->user_policy.policy;
1789 policy.governor = data->user_policy.governor;
1791 /* BIOS might change freq behind our back
1792 -> ask driver for current freq and notify governors about a change */
1793 if (cpufreq_driver->get) {
1794 policy.cur = cpufreq_driver->get(cpu);
1796 pr_debug("Driver did not initialize current freq");
1797 data->cur = policy.cur;
1799 if (data->cur != policy.cur)
1800 cpufreq_out_of_sync(cpu, data->cur,
1805 ret = __cpufreq_set_policy(data, &policy);
1807 unlock_policy_rwsem_write(cpu);
1810 cpufreq_cpu_put(data);
1814 EXPORT_SYMBOL(cpufreq_update_policy);
1816 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1817 unsigned long action, void *hcpu)
1819 unsigned int cpu = (unsigned long)hcpu;
1822 dev = get_cpu_device(cpu);
1826 case CPU_ONLINE_FROZEN:
1827 cpufreq_add_dev(dev, NULL);
1829 case CPU_DOWN_PREPARE:
1830 case CPU_DOWN_PREPARE_FROZEN:
1831 if (unlikely(lock_policy_rwsem_write(cpu)))
1834 __cpufreq_remove_dev(dev, NULL);
1836 case CPU_DOWN_FAILED:
1837 case CPU_DOWN_FAILED_FROZEN:
1838 cpufreq_add_dev(dev, NULL);
1845 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1846 .notifier_call = cpufreq_cpu_callback,
1849 /*********************************************************************
1850 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1851 *********************************************************************/
1854 * cpufreq_register_driver - register a CPU Frequency driver
1855 * @driver_data: A struct cpufreq_driver containing the values#
1856 * submitted by the CPU Frequency driver.
1858 * Registers a CPU Frequency driver to this core code. This code
1859 * returns zero on success, -EBUSY when another driver got here first
1860 * (and isn't unregistered in the meantime).
1863 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1865 unsigned long flags;
1868 if (cpufreq_disabled())
1871 if (!driver_data || !driver_data->verify || !driver_data->init ||
1872 ((!driver_data->setpolicy) && (!driver_data->target)))
1875 pr_debug("trying to register driver %s\n", driver_data->name);
1877 if (driver_data->setpolicy)
1878 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1880 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1881 if (cpufreq_driver) {
1882 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1885 cpufreq_driver = driver_data;
1886 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1888 ret = subsys_interface_register(&cpufreq_interface);
1890 goto err_null_driver;
1892 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1896 /* check for at least one working CPU */
1897 for (i = 0; i < nr_cpu_ids; i++)
1898 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1903 /* if all ->init() calls failed, unregister */
1905 pr_debug("no CPU initialized for driver %s\n",
1911 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1912 pr_debug("driver %s up and running\n", driver_data->name);
1916 subsys_interface_unregister(&cpufreq_interface);
1918 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1919 cpufreq_driver = NULL;
1920 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1923 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1927 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1929 * Unregister the current CPUFreq driver. Only call this if you have
1930 * the right to do so, i.e. if you have succeeded in initialising before!
1931 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1932 * currently not initialised.
1934 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1936 unsigned long flags;
1938 if (!cpufreq_driver || (driver != cpufreq_driver))
1941 pr_debug("unregistering driver %s\n", driver->name);
1943 subsys_interface_unregister(&cpufreq_interface);
1944 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1946 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1947 cpufreq_driver = NULL;
1948 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1952 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1954 static int __init cpufreq_core_init(void)
1958 if (cpufreq_disabled())
1961 for_each_possible_cpu(cpu) {
1962 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1963 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1966 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
1967 BUG_ON(!cpufreq_global_kobject);
1968 register_syscore_ops(&cpufreq_syscore_ops);
1972 core_initcall(cpufreq_core_init);