Merge branch 'misc.namei' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / drivers / cpufreq / cpufreq.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/drivers/cpufreq/cpufreq.c
4  *
5  *  Copyright (C) 2001 Russell King
6  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8  *
9  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
10  *      Added handling for CPU hotplug
11  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
12  *      Fix handling for CPU hotplug -- affected CPUs
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pm_qos.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
32
33 static LIST_HEAD(cpufreq_policy_list);
34
35 /* Macros to iterate over CPU policies */
36 #define for_each_suitable_policy(__policy, __active)                     \
37         list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
38                 if ((__active) == !policy_is_inactive(__policy))
39
40 #define for_each_active_policy(__policy)                \
41         for_each_suitable_policy(__policy, true)
42 #define for_each_inactive_policy(__policy)              \
43         for_each_suitable_policy(__policy, false)
44
45 /* Iterate over governors */
46 static LIST_HEAD(cpufreq_governor_list);
47 #define for_each_governor(__governor)                           \
48         list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
49
50 static char default_governor[CPUFREQ_NAME_LEN];
51
52 /*
53  * The "cpufreq driver" - the arch- or hardware-dependent low
54  * level driver of CPUFreq support, and its spinlock. This lock
55  * also protects the cpufreq_cpu_data array.
56  */
57 static struct cpufreq_driver *cpufreq_driver;
58 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
59 static DEFINE_RWLOCK(cpufreq_driver_lock);
60
61 static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
62 bool cpufreq_supports_freq_invariance(void)
63 {
64         return static_branch_likely(&cpufreq_freq_invariance);
65 }
66
67 /* Flag to suspend/resume CPUFreq governors */
68 static bool cpufreq_suspended;
69
70 static inline bool has_target(void)
71 {
72         return cpufreq_driver->target_index || cpufreq_driver->target;
73 }
74
75 /* internal prototypes */
76 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
77 static int cpufreq_init_governor(struct cpufreq_policy *policy);
78 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
79 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
80 static int cpufreq_set_policy(struct cpufreq_policy *policy,
81                               struct cpufreq_governor *new_gov,
82                               unsigned int new_pol);
83
84 /*
85  * Two notifier lists: the "policy" list is involved in the
86  * validation process for a new CPU frequency policy; the
87  * "transition" list for kernel code that needs to handle
88  * changes to devices when the CPU clock speed changes.
89  * The mutex locks both lists.
90  */
91 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
92 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
93
94 static int off __read_mostly;
95 static int cpufreq_disabled(void)
96 {
97         return off;
98 }
99 void disable_cpufreq(void)
100 {
101         off = 1;
102 }
103 static DEFINE_MUTEX(cpufreq_governor_mutex);
104
105 bool have_governor_per_policy(void)
106 {
107         return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
108 }
109 EXPORT_SYMBOL_GPL(have_governor_per_policy);
110
111 static struct kobject *cpufreq_global_kobject;
112
113 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
114 {
115         if (have_governor_per_policy())
116                 return &policy->kobj;
117         else
118                 return cpufreq_global_kobject;
119 }
120 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
121
122 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
123 {
124         struct kernel_cpustat kcpustat;
125         u64 cur_wall_time;
126         u64 idle_time;
127         u64 busy_time;
128
129         cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
130
131         kcpustat_cpu_fetch(&kcpustat, cpu);
132
133         busy_time = kcpustat.cpustat[CPUTIME_USER];
134         busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
135         busy_time += kcpustat.cpustat[CPUTIME_IRQ];
136         busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
137         busy_time += kcpustat.cpustat[CPUTIME_STEAL];
138         busy_time += kcpustat.cpustat[CPUTIME_NICE];
139
140         idle_time = cur_wall_time - busy_time;
141         if (wall)
142                 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
143
144         return div_u64(idle_time, NSEC_PER_USEC);
145 }
146
147 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
148 {
149         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
150
151         if (idle_time == -1ULL)
152                 return get_cpu_idle_time_jiffy(cpu, wall);
153         else if (!io_busy)
154                 idle_time += get_cpu_iowait_time_us(cpu, wall);
155
156         return idle_time;
157 }
158 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
159
160 /*
161  * This is a generic cpufreq init() routine which can be used by cpufreq
162  * drivers of SMP systems. It will do following:
163  * - validate & show freq table passed
164  * - set policies transition latency
165  * - policy->cpus with all possible CPUs
166  */
167 void cpufreq_generic_init(struct cpufreq_policy *policy,
168                 struct cpufreq_frequency_table *table,
169                 unsigned int transition_latency)
170 {
171         policy->freq_table = table;
172         policy->cpuinfo.transition_latency = transition_latency;
173
174         /*
175          * The driver only supports the SMP configuration where all processors
176          * share the clock and voltage and clock.
177          */
178         cpumask_setall(policy->cpus);
179 }
180 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
181
182 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
183 {
184         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
185
186         return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
187 }
188 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
189
190 unsigned int cpufreq_generic_get(unsigned int cpu)
191 {
192         struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
193
194         if (!policy || IS_ERR(policy->clk)) {
195                 pr_err("%s: No %s associated to cpu: %d\n",
196                        __func__, policy ? "clk" : "policy", cpu);
197                 return 0;
198         }
199
200         return clk_get_rate(policy->clk) / 1000;
201 }
202 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
203
204 /**
205  * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
206  * @cpu: CPU to find the policy for.
207  *
208  * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
209  * the kobject reference counter of that policy.  Return a valid policy on
210  * success or NULL on failure.
211  *
212  * The policy returned by this function has to be released with the help of
213  * cpufreq_cpu_put() to balance its kobject reference counter properly.
214  */
215 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
216 {
217         struct cpufreq_policy *policy = NULL;
218         unsigned long flags;
219
220         if (WARN_ON(cpu >= nr_cpu_ids))
221                 return NULL;
222
223         /* get the cpufreq driver */
224         read_lock_irqsave(&cpufreq_driver_lock, flags);
225
226         if (cpufreq_driver) {
227                 /* get the CPU */
228                 policy = cpufreq_cpu_get_raw(cpu);
229                 if (policy)
230                         kobject_get(&policy->kobj);
231         }
232
233         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
234
235         return policy;
236 }
237 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
238
239 /**
240  * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
241  * @policy: cpufreq policy returned by cpufreq_cpu_get().
242  */
243 void cpufreq_cpu_put(struct cpufreq_policy *policy)
244 {
245         kobject_put(&policy->kobj);
246 }
247 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
248
249 /**
250  * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
251  * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
252  */
253 void cpufreq_cpu_release(struct cpufreq_policy *policy)
254 {
255         if (WARN_ON(!policy))
256                 return;
257
258         lockdep_assert_held(&policy->rwsem);
259
260         up_write(&policy->rwsem);
261
262         cpufreq_cpu_put(policy);
263 }
264
265 /**
266  * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
267  * @cpu: CPU to find the policy for.
268  *
269  * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
270  * if the policy returned by it is not NULL, acquire its rwsem for writing.
271  * Return the policy if it is active or release it and return NULL otherwise.
272  *
273  * The policy returned by this function has to be released with the help of
274  * cpufreq_cpu_release() in order to release its rwsem and balance its usage
275  * counter properly.
276  */
277 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
278 {
279         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
280
281         if (!policy)
282                 return NULL;
283
284         down_write(&policy->rwsem);
285
286         if (policy_is_inactive(policy)) {
287                 cpufreq_cpu_release(policy);
288                 return NULL;
289         }
290
291         return policy;
292 }
293
294 /*********************************************************************
295  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
296  *********************************************************************/
297
298 /**
299  * adjust_jiffies - Adjust the system "loops_per_jiffy".
300  * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
301  * @ci: Frequency change information.
302  *
303  * This function alters the system "loops_per_jiffy" for the clock
304  * speed change. Note that loops_per_jiffy cannot be updated on SMP
305  * systems as each CPU might be scaled differently. So, use the arch
306  * per-CPU loops_per_jiffy value wherever possible.
307  */
308 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
309 {
310 #ifndef CONFIG_SMP
311         static unsigned long l_p_j_ref;
312         static unsigned int l_p_j_ref_freq;
313
314         if (ci->flags & CPUFREQ_CONST_LOOPS)
315                 return;
316
317         if (!l_p_j_ref_freq) {
318                 l_p_j_ref = loops_per_jiffy;
319                 l_p_j_ref_freq = ci->old;
320                 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
321                          l_p_j_ref, l_p_j_ref_freq);
322         }
323         if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
324                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
325                                                                 ci->new);
326                 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
327                          loops_per_jiffy, ci->new);
328         }
329 #endif
330 }
331
332 /**
333  * cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
334  * @policy: cpufreq policy to enable fast frequency switching for.
335  * @freqs: contain details of the frequency update.
336  * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
337  *
338  * This function calls the transition notifiers and adjust_jiffies().
339  *
340  * It is called twice on all CPU frequency changes that have external effects.
341  */
342 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
343                                       struct cpufreq_freqs *freqs,
344                                       unsigned int state)
345 {
346         int cpu;
347
348         BUG_ON(irqs_disabled());
349
350         if (cpufreq_disabled())
351                 return;
352
353         freqs->policy = policy;
354         freqs->flags = cpufreq_driver->flags;
355         pr_debug("notification %u of frequency transition to %u kHz\n",
356                  state, freqs->new);
357
358         switch (state) {
359         case CPUFREQ_PRECHANGE:
360                 /*
361                  * Detect if the driver reported a value as "old frequency"
362                  * which is not equal to what the cpufreq core thinks is
363                  * "old frequency".
364                  */
365                 if (policy->cur && policy->cur != freqs->old) {
366                         pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
367                                  freqs->old, policy->cur);
368                         freqs->old = policy->cur;
369                 }
370
371                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
372                                          CPUFREQ_PRECHANGE, freqs);
373
374                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
375                 break;
376
377         case CPUFREQ_POSTCHANGE:
378                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
379                 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
380                          cpumask_pr_args(policy->cpus));
381
382                 for_each_cpu(cpu, policy->cpus)
383                         trace_cpu_frequency(freqs->new, cpu);
384
385                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
386                                          CPUFREQ_POSTCHANGE, freqs);
387
388                 cpufreq_stats_record_transition(policy, freqs->new);
389                 policy->cur = freqs->new;
390         }
391 }
392
393 /* Do post notifications when there are chances that transition has failed */
394 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
395                 struct cpufreq_freqs *freqs, int transition_failed)
396 {
397         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
398         if (!transition_failed)
399                 return;
400
401         swap(freqs->old, freqs->new);
402         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
403         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
404 }
405
406 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
407                 struct cpufreq_freqs *freqs)
408 {
409
410         /*
411          * Catch double invocations of _begin() which lead to self-deadlock.
412          * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
413          * doesn't invoke _begin() on their behalf, and hence the chances of
414          * double invocations are very low. Moreover, there are scenarios
415          * where these checks can emit false-positive warnings in these
416          * drivers; so we avoid that by skipping them altogether.
417          */
418         WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
419                                 && current == policy->transition_task);
420
421 wait:
422         wait_event(policy->transition_wait, !policy->transition_ongoing);
423
424         spin_lock(&policy->transition_lock);
425
426         if (unlikely(policy->transition_ongoing)) {
427                 spin_unlock(&policy->transition_lock);
428                 goto wait;
429         }
430
431         policy->transition_ongoing = true;
432         policy->transition_task = current;
433
434         spin_unlock(&policy->transition_lock);
435
436         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
437 }
438 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
439
440 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
441                 struct cpufreq_freqs *freqs, int transition_failed)
442 {
443         if (WARN_ON(!policy->transition_ongoing))
444                 return;
445
446         cpufreq_notify_post_transition(policy, freqs, transition_failed);
447
448         arch_set_freq_scale(policy->related_cpus,
449                             policy->cur,
450                             policy->cpuinfo.max_freq);
451
452         policy->transition_ongoing = false;
453         policy->transition_task = NULL;
454
455         wake_up(&policy->transition_wait);
456 }
457 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
458
459 /*
460  * Fast frequency switching status count.  Positive means "enabled", negative
461  * means "disabled" and 0 means "not decided yet".
462  */
463 static int cpufreq_fast_switch_count;
464 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
465
466 static void cpufreq_list_transition_notifiers(void)
467 {
468         struct notifier_block *nb;
469
470         pr_info("Registered transition notifiers:\n");
471
472         mutex_lock(&cpufreq_transition_notifier_list.mutex);
473
474         for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
475                 pr_info("%pS\n", nb->notifier_call);
476
477         mutex_unlock(&cpufreq_transition_notifier_list.mutex);
478 }
479
480 /**
481  * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
482  * @policy: cpufreq policy to enable fast frequency switching for.
483  *
484  * Try to enable fast frequency switching for @policy.
485  *
486  * The attempt will fail if there is at least one transition notifier registered
487  * at this point, as fast frequency switching is quite fundamentally at odds
488  * with transition notifiers.  Thus if successful, it will make registration of
489  * transition notifiers fail going forward.
490  */
491 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
492 {
493         lockdep_assert_held(&policy->rwsem);
494
495         if (!policy->fast_switch_possible)
496                 return;
497
498         mutex_lock(&cpufreq_fast_switch_lock);
499         if (cpufreq_fast_switch_count >= 0) {
500                 cpufreq_fast_switch_count++;
501                 policy->fast_switch_enabled = true;
502         } else {
503                 pr_warn("CPU%u: Fast frequency switching not enabled\n",
504                         policy->cpu);
505                 cpufreq_list_transition_notifiers();
506         }
507         mutex_unlock(&cpufreq_fast_switch_lock);
508 }
509 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
510
511 /**
512  * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
513  * @policy: cpufreq policy to disable fast frequency switching for.
514  */
515 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
516 {
517         mutex_lock(&cpufreq_fast_switch_lock);
518         if (policy->fast_switch_enabled) {
519                 policy->fast_switch_enabled = false;
520                 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
521                         cpufreq_fast_switch_count--;
522         }
523         mutex_unlock(&cpufreq_fast_switch_lock);
524 }
525 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
526
527 static unsigned int __resolve_freq(struct cpufreq_policy *policy,
528                 unsigned int target_freq, unsigned int relation)
529 {
530         unsigned int idx;
531
532         target_freq = clamp_val(target_freq, policy->min, policy->max);
533
534         if (!cpufreq_driver->target_index)
535                 return target_freq;
536
537         idx = cpufreq_frequency_table_target(policy, target_freq, relation);
538         policy->cached_resolved_idx = idx;
539         policy->cached_target_freq = target_freq;
540         return policy->freq_table[idx].frequency;
541 }
542
543 /**
544  * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
545  * one.
546  * @policy: associated policy to interrogate
547  * @target_freq: target frequency to resolve.
548  *
549  * The target to driver frequency mapping is cached in the policy.
550  *
551  * Return: Lowest driver-supported frequency greater than or equal to the
552  * given target_freq, subject to policy (min/max) and driver limitations.
553  */
554 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
555                                          unsigned int target_freq)
556 {
557         return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_L);
558 }
559 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
560
561 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
562 {
563         unsigned int latency;
564
565         if (policy->transition_delay_us)
566                 return policy->transition_delay_us;
567
568         latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
569         if (latency) {
570                 /*
571                  * For platforms that can change the frequency very fast (< 10
572                  * us), the above formula gives a decent transition delay. But
573                  * for platforms where transition_latency is in milliseconds, it
574                  * ends up giving unrealistic values.
575                  *
576                  * Cap the default transition delay to 10 ms, which seems to be
577                  * a reasonable amount of time after which we should reevaluate
578                  * the frequency.
579                  */
580                 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
581         }
582
583         return LATENCY_MULTIPLIER;
584 }
585 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
586
587 /*********************************************************************
588  *                          SYSFS INTERFACE                          *
589  *********************************************************************/
590 static ssize_t show_boost(struct kobject *kobj,
591                           struct kobj_attribute *attr, char *buf)
592 {
593         return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
594 }
595
596 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
597                            const char *buf, size_t count)
598 {
599         int ret, enable;
600
601         ret = sscanf(buf, "%d", &enable);
602         if (ret != 1 || enable < 0 || enable > 1)
603                 return -EINVAL;
604
605         if (cpufreq_boost_trigger_state(enable)) {
606                 pr_err("%s: Cannot %s BOOST!\n",
607                        __func__, enable ? "enable" : "disable");
608                 return -EINVAL;
609         }
610
611         pr_debug("%s: cpufreq BOOST %s\n",
612                  __func__, enable ? "enabled" : "disabled");
613
614         return count;
615 }
616 define_one_global_rw(boost);
617
618 static struct cpufreq_governor *find_governor(const char *str_governor)
619 {
620         struct cpufreq_governor *t;
621
622         for_each_governor(t)
623                 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
624                         return t;
625
626         return NULL;
627 }
628
629 static struct cpufreq_governor *get_governor(const char *str_governor)
630 {
631         struct cpufreq_governor *t;
632
633         mutex_lock(&cpufreq_governor_mutex);
634         t = find_governor(str_governor);
635         if (!t)
636                 goto unlock;
637
638         if (!try_module_get(t->owner))
639                 t = NULL;
640
641 unlock:
642         mutex_unlock(&cpufreq_governor_mutex);
643
644         return t;
645 }
646
647 static unsigned int cpufreq_parse_policy(char *str_governor)
648 {
649         if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
650                 return CPUFREQ_POLICY_PERFORMANCE;
651
652         if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
653                 return CPUFREQ_POLICY_POWERSAVE;
654
655         return CPUFREQ_POLICY_UNKNOWN;
656 }
657
658 /**
659  * cpufreq_parse_governor - parse a governor string only for has_target()
660  * @str_governor: Governor name.
661  */
662 static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
663 {
664         struct cpufreq_governor *t;
665
666         t = get_governor(str_governor);
667         if (t)
668                 return t;
669
670         if (request_module("cpufreq_%s", str_governor))
671                 return NULL;
672
673         return get_governor(str_governor);
674 }
675
676 /*
677  * cpufreq_per_cpu_attr_read() / show_##file_name() -
678  * print out cpufreq information
679  *
680  * Write out information from cpufreq_driver->policy[cpu]; object must be
681  * "unsigned int".
682  */
683
684 #define show_one(file_name, object)                     \
685 static ssize_t show_##file_name                         \
686 (struct cpufreq_policy *policy, char *buf)              \
687 {                                                       \
688         return sprintf(buf, "%u\n", policy->object);    \
689 }
690
691 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
692 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
693 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
694 show_one(scaling_min_freq, min);
695 show_one(scaling_max_freq, max);
696
697 __weak unsigned int arch_freq_get_on_cpu(int cpu)
698 {
699         return 0;
700 }
701
702 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
703 {
704         ssize_t ret;
705         unsigned int freq;
706
707         freq = arch_freq_get_on_cpu(policy->cpu);
708         if (freq)
709                 ret = sprintf(buf, "%u\n", freq);
710         else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
711                 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
712         else
713                 ret = sprintf(buf, "%u\n", policy->cur);
714         return ret;
715 }
716
717 /*
718  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
719  */
720 #define store_one(file_name, object)                    \
721 static ssize_t store_##file_name                                        \
722 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
723 {                                                                       \
724         unsigned long val;                                              \
725         int ret;                                                        \
726                                                                         \
727         ret = sscanf(buf, "%lu", &val);                                 \
728         if (ret != 1)                                                   \
729                 return -EINVAL;                                         \
730                                                                         \
731         ret = freq_qos_update_request(policy->object##_freq_req, val);\
732         return ret >= 0 ? count : ret;                                  \
733 }
734
735 store_one(scaling_min_freq, min);
736 store_one(scaling_max_freq, max);
737
738 /*
739  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
740  */
741 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
742                                         char *buf)
743 {
744         unsigned int cur_freq = __cpufreq_get(policy);
745
746         if (cur_freq)
747                 return sprintf(buf, "%u\n", cur_freq);
748
749         return sprintf(buf, "<unknown>\n");
750 }
751
752 /*
753  * show_scaling_governor - show the current policy for the specified CPU
754  */
755 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
756 {
757         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
758                 return sprintf(buf, "powersave\n");
759         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
760                 return sprintf(buf, "performance\n");
761         else if (policy->governor)
762                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
763                                 policy->governor->name);
764         return -EINVAL;
765 }
766
767 /*
768  * store_scaling_governor - store policy for the specified CPU
769  */
770 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
771                                         const char *buf, size_t count)
772 {
773         char str_governor[16];
774         int ret;
775
776         ret = sscanf(buf, "%15s", str_governor);
777         if (ret != 1)
778                 return -EINVAL;
779
780         if (cpufreq_driver->setpolicy) {
781                 unsigned int new_pol;
782
783                 new_pol = cpufreq_parse_policy(str_governor);
784                 if (!new_pol)
785                         return -EINVAL;
786
787                 ret = cpufreq_set_policy(policy, NULL, new_pol);
788         } else {
789                 struct cpufreq_governor *new_gov;
790
791                 new_gov = cpufreq_parse_governor(str_governor);
792                 if (!new_gov)
793                         return -EINVAL;
794
795                 ret = cpufreq_set_policy(policy, new_gov,
796                                          CPUFREQ_POLICY_UNKNOWN);
797
798                 module_put(new_gov->owner);
799         }
800
801         return ret ? ret : count;
802 }
803
804 /*
805  * show_scaling_driver - show the cpufreq driver currently loaded
806  */
807 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
808 {
809         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
810 }
811
812 /*
813  * show_scaling_available_governors - show the available CPUfreq governors
814  */
815 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
816                                                 char *buf)
817 {
818         ssize_t i = 0;
819         struct cpufreq_governor *t;
820
821         if (!has_target()) {
822                 i += sprintf(buf, "performance powersave");
823                 goto out;
824         }
825
826         mutex_lock(&cpufreq_governor_mutex);
827         for_each_governor(t) {
828                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
829                     - (CPUFREQ_NAME_LEN + 2)))
830                         break;
831                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
832         }
833         mutex_unlock(&cpufreq_governor_mutex);
834 out:
835         i += sprintf(&buf[i], "\n");
836         return i;
837 }
838
839 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
840 {
841         ssize_t i = 0;
842         unsigned int cpu;
843
844         for_each_cpu(cpu, mask) {
845                 if (i)
846                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
847                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
848                 if (i >= (PAGE_SIZE - 5))
849                         break;
850         }
851         i += sprintf(&buf[i], "\n");
852         return i;
853 }
854 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
855
856 /*
857  * show_related_cpus - show the CPUs affected by each transition even if
858  * hw coordination is in use
859  */
860 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
861 {
862         return cpufreq_show_cpus(policy->related_cpus, buf);
863 }
864
865 /*
866  * show_affected_cpus - show the CPUs affected by each transition
867  */
868 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
869 {
870         return cpufreq_show_cpus(policy->cpus, buf);
871 }
872
873 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
874                                         const char *buf, size_t count)
875 {
876         unsigned int freq = 0;
877         unsigned int ret;
878
879         if (!policy->governor || !policy->governor->store_setspeed)
880                 return -EINVAL;
881
882         ret = sscanf(buf, "%u", &freq);
883         if (ret != 1)
884                 return -EINVAL;
885
886         policy->governor->store_setspeed(policy, freq);
887
888         return count;
889 }
890
891 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
892 {
893         if (!policy->governor || !policy->governor->show_setspeed)
894                 return sprintf(buf, "<unsupported>\n");
895
896         return policy->governor->show_setspeed(policy, buf);
897 }
898
899 /*
900  * show_bios_limit - show the current cpufreq HW/BIOS limitation
901  */
902 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
903 {
904         unsigned int limit;
905         int ret;
906         ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
907         if (!ret)
908                 return sprintf(buf, "%u\n", limit);
909         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
910 }
911
912 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
913 cpufreq_freq_attr_ro(cpuinfo_min_freq);
914 cpufreq_freq_attr_ro(cpuinfo_max_freq);
915 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
916 cpufreq_freq_attr_ro(scaling_available_governors);
917 cpufreq_freq_attr_ro(scaling_driver);
918 cpufreq_freq_attr_ro(scaling_cur_freq);
919 cpufreq_freq_attr_ro(bios_limit);
920 cpufreq_freq_attr_ro(related_cpus);
921 cpufreq_freq_attr_ro(affected_cpus);
922 cpufreq_freq_attr_rw(scaling_min_freq);
923 cpufreq_freq_attr_rw(scaling_max_freq);
924 cpufreq_freq_attr_rw(scaling_governor);
925 cpufreq_freq_attr_rw(scaling_setspeed);
926
927 static struct attribute *default_attrs[] = {
928         &cpuinfo_min_freq.attr,
929         &cpuinfo_max_freq.attr,
930         &cpuinfo_transition_latency.attr,
931         &scaling_min_freq.attr,
932         &scaling_max_freq.attr,
933         &affected_cpus.attr,
934         &related_cpus.attr,
935         &scaling_governor.attr,
936         &scaling_driver.attr,
937         &scaling_available_governors.attr,
938         &scaling_setspeed.attr,
939         NULL
940 };
941
942 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
943 #define to_attr(a) container_of(a, struct freq_attr, attr)
944
945 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
946 {
947         struct cpufreq_policy *policy = to_policy(kobj);
948         struct freq_attr *fattr = to_attr(attr);
949         ssize_t ret;
950
951         if (!fattr->show)
952                 return -EIO;
953
954         down_read(&policy->rwsem);
955         ret = fattr->show(policy, buf);
956         up_read(&policy->rwsem);
957
958         return ret;
959 }
960
961 static ssize_t store(struct kobject *kobj, struct attribute *attr,
962                      const char *buf, size_t count)
963 {
964         struct cpufreq_policy *policy = to_policy(kobj);
965         struct freq_attr *fattr = to_attr(attr);
966         ssize_t ret = -EINVAL;
967
968         if (!fattr->store)
969                 return -EIO;
970
971         /*
972          * cpus_read_trylock() is used here to work around a circular lock
973          * dependency problem with respect to the cpufreq_register_driver().
974          */
975         if (!cpus_read_trylock())
976                 return -EBUSY;
977
978         if (cpu_online(policy->cpu)) {
979                 down_write(&policy->rwsem);
980                 ret = fattr->store(policy, buf, count);
981                 up_write(&policy->rwsem);
982         }
983
984         cpus_read_unlock();
985
986         return ret;
987 }
988
989 static void cpufreq_sysfs_release(struct kobject *kobj)
990 {
991         struct cpufreq_policy *policy = to_policy(kobj);
992         pr_debug("last reference is dropped\n");
993         complete(&policy->kobj_unregister);
994 }
995
996 static const struct sysfs_ops sysfs_ops = {
997         .show   = show,
998         .store  = store,
999 };
1000
1001 static struct kobj_type ktype_cpufreq = {
1002         .sysfs_ops      = &sysfs_ops,
1003         .default_attrs  = default_attrs,
1004         .release        = cpufreq_sysfs_release,
1005 };
1006
1007 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
1008 {
1009         struct device *dev = get_cpu_device(cpu);
1010
1011         if (unlikely(!dev))
1012                 return;
1013
1014         if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1015                 return;
1016
1017         dev_dbg(dev, "%s: Adding symlink\n", __func__);
1018         if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1019                 dev_err(dev, "cpufreq symlink creation failed\n");
1020 }
1021
1022 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1023                                    struct device *dev)
1024 {
1025         dev_dbg(dev, "%s: Removing symlink\n", __func__);
1026         sysfs_remove_link(&dev->kobj, "cpufreq");
1027 }
1028
1029 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1030 {
1031         struct freq_attr **drv_attr;
1032         int ret = 0;
1033
1034         /* set up files for this cpu device */
1035         drv_attr = cpufreq_driver->attr;
1036         while (drv_attr && *drv_attr) {
1037                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1038                 if (ret)
1039                         return ret;
1040                 drv_attr++;
1041         }
1042         if (cpufreq_driver->get) {
1043                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1044                 if (ret)
1045                         return ret;
1046         }
1047
1048         ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1049         if (ret)
1050                 return ret;
1051
1052         if (cpufreq_driver->bios_limit) {
1053                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1054                 if (ret)
1055                         return ret;
1056         }
1057
1058         return 0;
1059 }
1060
1061 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1062 {
1063         struct cpufreq_governor *gov = NULL;
1064         unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1065         int ret;
1066
1067         if (has_target()) {
1068                 /* Update policy governor to the one used before hotplug. */
1069                 gov = get_governor(policy->last_governor);
1070                 if (gov) {
1071                         pr_debug("Restoring governor %s for cpu %d\n",
1072                                  gov->name, policy->cpu);
1073                 } else {
1074                         gov = get_governor(default_governor);
1075                 }
1076
1077                 if (!gov) {
1078                         gov = cpufreq_default_governor();
1079                         __module_get(gov->owner);
1080                 }
1081
1082         } else {
1083
1084                 /* Use the default policy if there is no last_policy. */
1085                 if (policy->last_policy) {
1086                         pol = policy->last_policy;
1087                 } else {
1088                         pol = cpufreq_parse_policy(default_governor);
1089                         /*
1090                          * In case the default governor is neither "performance"
1091                          * nor "powersave", fall back to the initial policy
1092                          * value set by the driver.
1093                          */
1094                         if (pol == CPUFREQ_POLICY_UNKNOWN)
1095                                 pol = policy->policy;
1096                 }
1097                 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1098                     pol != CPUFREQ_POLICY_POWERSAVE)
1099                         return -ENODATA;
1100         }
1101
1102         ret = cpufreq_set_policy(policy, gov, pol);
1103         if (gov)
1104                 module_put(gov->owner);
1105
1106         return ret;
1107 }
1108
1109 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1110 {
1111         int ret = 0;
1112
1113         /* Has this CPU been taken care of already? */
1114         if (cpumask_test_cpu(cpu, policy->cpus))
1115                 return 0;
1116
1117         down_write(&policy->rwsem);
1118         if (has_target())
1119                 cpufreq_stop_governor(policy);
1120
1121         cpumask_set_cpu(cpu, policy->cpus);
1122
1123         if (has_target()) {
1124                 ret = cpufreq_start_governor(policy);
1125                 if (ret)
1126                         pr_err("%s: Failed to start governor\n", __func__);
1127         }
1128         up_write(&policy->rwsem);
1129         return ret;
1130 }
1131
1132 void refresh_frequency_limits(struct cpufreq_policy *policy)
1133 {
1134         if (!policy_is_inactive(policy)) {
1135                 pr_debug("updating policy for CPU %u\n", policy->cpu);
1136
1137                 cpufreq_set_policy(policy, policy->governor, policy->policy);
1138         }
1139 }
1140 EXPORT_SYMBOL(refresh_frequency_limits);
1141
1142 static void handle_update(struct work_struct *work)
1143 {
1144         struct cpufreq_policy *policy =
1145                 container_of(work, struct cpufreq_policy, update);
1146
1147         pr_debug("handle_update for cpu %u called\n", policy->cpu);
1148         down_write(&policy->rwsem);
1149         refresh_frequency_limits(policy);
1150         up_write(&policy->rwsem);
1151 }
1152
1153 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1154                                 void *data)
1155 {
1156         struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1157
1158         schedule_work(&policy->update);
1159         return 0;
1160 }
1161
1162 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1163                                 void *data)
1164 {
1165         struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1166
1167         schedule_work(&policy->update);
1168         return 0;
1169 }
1170
1171 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1172 {
1173         struct kobject *kobj;
1174         struct completion *cmp;
1175
1176         down_write(&policy->rwsem);
1177         cpufreq_stats_free_table(policy);
1178         kobj = &policy->kobj;
1179         cmp = &policy->kobj_unregister;
1180         up_write(&policy->rwsem);
1181         kobject_put(kobj);
1182
1183         /*
1184          * We need to make sure that the underlying kobj is
1185          * actually not referenced anymore by anybody before we
1186          * proceed with unloading.
1187          */
1188         pr_debug("waiting for dropping of refcount\n");
1189         wait_for_completion(cmp);
1190         pr_debug("wait complete\n");
1191 }
1192
1193 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1194 {
1195         struct cpufreq_policy *policy;
1196         struct device *dev = get_cpu_device(cpu);
1197         int ret;
1198
1199         if (!dev)
1200                 return NULL;
1201
1202         policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1203         if (!policy)
1204                 return NULL;
1205
1206         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1207                 goto err_free_policy;
1208
1209         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1210                 goto err_free_cpumask;
1211
1212         if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1213                 goto err_free_rcpumask;
1214
1215         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1216                                    cpufreq_global_kobject, "policy%u", cpu);
1217         if (ret) {
1218                 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1219                 /*
1220                  * The entire policy object will be freed below, but the extra
1221                  * memory allocated for the kobject name needs to be freed by
1222                  * releasing the kobject.
1223                  */
1224                 kobject_put(&policy->kobj);
1225                 goto err_free_real_cpus;
1226         }
1227
1228         freq_constraints_init(&policy->constraints);
1229
1230         policy->nb_min.notifier_call = cpufreq_notifier_min;
1231         policy->nb_max.notifier_call = cpufreq_notifier_max;
1232
1233         ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1234                                     &policy->nb_min);
1235         if (ret) {
1236                 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1237                         ret, cpumask_pr_args(policy->cpus));
1238                 goto err_kobj_remove;
1239         }
1240
1241         ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1242                                     &policy->nb_max);
1243         if (ret) {
1244                 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1245                         ret, cpumask_pr_args(policy->cpus));
1246                 goto err_min_qos_notifier;
1247         }
1248
1249         INIT_LIST_HEAD(&policy->policy_list);
1250         init_rwsem(&policy->rwsem);
1251         spin_lock_init(&policy->transition_lock);
1252         init_waitqueue_head(&policy->transition_wait);
1253         init_completion(&policy->kobj_unregister);
1254         INIT_WORK(&policy->update, handle_update);
1255
1256         policy->cpu = cpu;
1257         return policy;
1258
1259 err_min_qos_notifier:
1260         freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1261                                  &policy->nb_min);
1262 err_kobj_remove:
1263         cpufreq_policy_put_kobj(policy);
1264 err_free_real_cpus:
1265         free_cpumask_var(policy->real_cpus);
1266 err_free_rcpumask:
1267         free_cpumask_var(policy->related_cpus);
1268 err_free_cpumask:
1269         free_cpumask_var(policy->cpus);
1270 err_free_policy:
1271         kfree(policy);
1272
1273         return NULL;
1274 }
1275
1276 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1277 {
1278         unsigned long flags;
1279         int cpu;
1280
1281         /* Remove policy from list */
1282         write_lock_irqsave(&cpufreq_driver_lock, flags);
1283         list_del(&policy->policy_list);
1284
1285         for_each_cpu(cpu, policy->related_cpus)
1286                 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1287         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1288
1289         freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1290                                  &policy->nb_max);
1291         freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1292                                  &policy->nb_min);
1293
1294         /* Cancel any pending policy->update work before freeing the policy. */
1295         cancel_work_sync(&policy->update);
1296
1297         if (policy->max_freq_req) {
1298                 /*
1299                  * CPUFREQ_CREATE_POLICY notification is sent only after
1300                  * successfully adding max_freq_req request.
1301                  */
1302                 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1303                                              CPUFREQ_REMOVE_POLICY, policy);
1304                 freq_qos_remove_request(policy->max_freq_req);
1305         }
1306
1307         freq_qos_remove_request(policy->min_freq_req);
1308         kfree(policy->min_freq_req);
1309
1310         cpufreq_policy_put_kobj(policy);
1311         free_cpumask_var(policy->real_cpus);
1312         free_cpumask_var(policy->related_cpus);
1313         free_cpumask_var(policy->cpus);
1314         kfree(policy);
1315 }
1316
1317 static int cpufreq_online(unsigned int cpu)
1318 {
1319         struct cpufreq_policy *policy;
1320         bool new_policy;
1321         unsigned long flags;
1322         unsigned int j;
1323         int ret;
1324
1325         pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1326
1327         /* Check if this CPU already has a policy to manage it */
1328         policy = per_cpu(cpufreq_cpu_data, cpu);
1329         if (policy) {
1330                 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1331                 if (!policy_is_inactive(policy))
1332                         return cpufreq_add_policy_cpu(policy, cpu);
1333
1334                 /* This is the only online CPU for the policy.  Start over. */
1335                 new_policy = false;
1336                 down_write(&policy->rwsem);
1337                 policy->cpu = cpu;
1338                 policy->governor = NULL;
1339                 up_write(&policy->rwsem);
1340         } else {
1341                 new_policy = true;
1342                 policy = cpufreq_policy_alloc(cpu);
1343                 if (!policy)
1344                         return -ENOMEM;
1345         }
1346
1347         if (!new_policy && cpufreq_driver->online) {
1348                 ret = cpufreq_driver->online(policy);
1349                 if (ret) {
1350                         pr_debug("%s: %d: initialization failed\n", __func__,
1351                                  __LINE__);
1352                         goto out_exit_policy;
1353                 }
1354
1355                 /* Recover policy->cpus using related_cpus */
1356                 cpumask_copy(policy->cpus, policy->related_cpus);
1357         } else {
1358                 cpumask_copy(policy->cpus, cpumask_of(cpu));
1359
1360                 /*
1361                  * Call driver. From then on the cpufreq must be able
1362                  * to accept all calls to ->verify and ->setpolicy for this CPU.
1363                  */
1364                 ret = cpufreq_driver->init(policy);
1365                 if (ret) {
1366                         pr_debug("%s: %d: initialization failed\n", __func__,
1367                                  __LINE__);
1368                         goto out_free_policy;
1369                 }
1370
1371                 /*
1372                  * The initialization has succeeded and the policy is online.
1373                  * If there is a problem with its frequency table, take it
1374                  * offline and drop it.
1375                  */
1376                 ret = cpufreq_table_validate_and_sort(policy);
1377                 if (ret)
1378                         goto out_offline_policy;
1379
1380                 /* related_cpus should at least include policy->cpus. */
1381                 cpumask_copy(policy->related_cpus, policy->cpus);
1382         }
1383
1384         down_write(&policy->rwsem);
1385         /*
1386          * affected cpus must always be the one, which are online. We aren't
1387          * managing offline cpus here.
1388          */
1389         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1390
1391         if (new_policy) {
1392                 for_each_cpu(j, policy->related_cpus) {
1393                         per_cpu(cpufreq_cpu_data, j) = policy;
1394                         add_cpu_dev_symlink(policy, j);
1395                 }
1396
1397                 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1398                                                GFP_KERNEL);
1399                 if (!policy->min_freq_req) {
1400                         ret = -ENOMEM;
1401                         goto out_destroy_policy;
1402                 }
1403
1404                 ret = freq_qos_add_request(&policy->constraints,
1405                                            policy->min_freq_req, FREQ_QOS_MIN,
1406                                            policy->min);
1407                 if (ret < 0) {
1408                         /*
1409                          * So we don't call freq_qos_remove_request() for an
1410                          * uninitialized request.
1411                          */
1412                         kfree(policy->min_freq_req);
1413                         policy->min_freq_req = NULL;
1414                         goto out_destroy_policy;
1415                 }
1416
1417                 /*
1418                  * This must be initialized right here to avoid calling
1419                  * freq_qos_remove_request() on uninitialized request in case
1420                  * of errors.
1421                  */
1422                 policy->max_freq_req = policy->min_freq_req + 1;
1423
1424                 ret = freq_qos_add_request(&policy->constraints,
1425                                            policy->max_freq_req, FREQ_QOS_MAX,
1426                                            policy->max);
1427                 if (ret < 0) {
1428                         policy->max_freq_req = NULL;
1429                         goto out_destroy_policy;
1430                 }
1431
1432                 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1433                                 CPUFREQ_CREATE_POLICY, policy);
1434         }
1435
1436         if (cpufreq_driver->get && has_target()) {
1437                 policy->cur = cpufreq_driver->get(policy->cpu);
1438                 if (!policy->cur) {
1439                         ret = -EIO;
1440                         pr_err("%s: ->get() failed\n", __func__);
1441                         goto out_destroy_policy;
1442                 }
1443         }
1444
1445         /*
1446          * Sometimes boot loaders set CPU frequency to a value outside of
1447          * frequency table present with cpufreq core. In such cases CPU might be
1448          * unstable if it has to run on that frequency for long duration of time
1449          * and so its better to set it to a frequency which is specified in
1450          * freq-table. This also makes cpufreq stats inconsistent as
1451          * cpufreq-stats would fail to register because current frequency of CPU
1452          * isn't found in freq-table.
1453          *
1454          * Because we don't want this change to effect boot process badly, we go
1455          * for the next freq which is >= policy->cur ('cur' must be set by now,
1456          * otherwise we will end up setting freq to lowest of the table as 'cur'
1457          * is initialized to zero).
1458          *
1459          * We are passing target-freq as "policy->cur - 1" otherwise
1460          * __cpufreq_driver_target() would simply fail, as policy->cur will be
1461          * equal to target-freq.
1462          */
1463         if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1464             && has_target()) {
1465                 unsigned int old_freq = policy->cur;
1466
1467                 /* Are we running at unknown frequency ? */
1468                 ret = cpufreq_frequency_table_get_index(policy, old_freq);
1469                 if (ret == -EINVAL) {
1470                         ret = __cpufreq_driver_target(policy, old_freq - 1,
1471                                                       CPUFREQ_RELATION_L);
1472
1473                         /*
1474                          * Reaching here after boot in a few seconds may not
1475                          * mean that system will remain stable at "unknown"
1476                          * frequency for longer duration. Hence, a BUG_ON().
1477                          */
1478                         BUG_ON(ret);
1479                         pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1480                                 __func__, policy->cpu, old_freq, policy->cur);
1481                 }
1482         }
1483
1484         if (new_policy) {
1485                 ret = cpufreq_add_dev_interface(policy);
1486                 if (ret)
1487                         goto out_destroy_policy;
1488
1489                 cpufreq_stats_create_table(policy);
1490
1491                 write_lock_irqsave(&cpufreq_driver_lock, flags);
1492                 list_add(&policy->policy_list, &cpufreq_policy_list);
1493                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1494
1495                 /*
1496                  * Register with the energy model before
1497                  * sched_cpufreq_governor_change() is called, which will result
1498                  * in rebuilding of the sched domains, which should only be done
1499                  * once the energy model is properly initialized for the policy
1500                  * first.
1501                  *
1502                  * Also, this should be called before the policy is registered
1503                  * with cooling framework.
1504                  */
1505                 if (cpufreq_driver->register_em)
1506                         cpufreq_driver->register_em(policy);
1507         }
1508
1509         ret = cpufreq_init_policy(policy);
1510         if (ret) {
1511                 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1512                        __func__, cpu, ret);
1513                 goto out_destroy_policy;
1514         }
1515
1516         up_write(&policy->rwsem);
1517
1518         kobject_uevent(&policy->kobj, KOBJ_ADD);
1519
1520         if (cpufreq_thermal_control_enabled(cpufreq_driver))
1521                 policy->cdev = of_cpufreq_cooling_register(policy);
1522
1523         pr_debug("initialization complete\n");
1524
1525         return 0;
1526
1527 out_destroy_policy:
1528         for_each_cpu(j, policy->real_cpus)
1529                 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1530
1531         up_write(&policy->rwsem);
1532
1533 out_offline_policy:
1534         if (cpufreq_driver->offline)
1535                 cpufreq_driver->offline(policy);
1536
1537 out_exit_policy:
1538         if (cpufreq_driver->exit)
1539                 cpufreq_driver->exit(policy);
1540
1541 out_free_policy:
1542         cpufreq_policy_free(policy);
1543         return ret;
1544 }
1545
1546 /**
1547  * cpufreq_add_dev - the cpufreq interface for a CPU device.
1548  * @dev: CPU device.
1549  * @sif: Subsystem interface structure pointer (not used)
1550  */
1551 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1552 {
1553         struct cpufreq_policy *policy;
1554         unsigned cpu = dev->id;
1555         int ret;
1556
1557         dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1558
1559         if (cpu_online(cpu)) {
1560                 ret = cpufreq_online(cpu);
1561                 if (ret)
1562                         return ret;
1563         }
1564
1565         /* Create sysfs link on CPU registration */
1566         policy = per_cpu(cpufreq_cpu_data, cpu);
1567         if (policy)
1568                 add_cpu_dev_symlink(policy, cpu);
1569
1570         return 0;
1571 }
1572
1573 static int cpufreq_offline(unsigned int cpu)
1574 {
1575         struct cpufreq_policy *policy;
1576         int ret;
1577
1578         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1579
1580         policy = cpufreq_cpu_get_raw(cpu);
1581         if (!policy) {
1582                 pr_debug("%s: No cpu_data found\n", __func__);
1583                 return 0;
1584         }
1585
1586         down_write(&policy->rwsem);
1587         if (has_target())
1588                 cpufreq_stop_governor(policy);
1589
1590         cpumask_clear_cpu(cpu, policy->cpus);
1591
1592         if (policy_is_inactive(policy)) {
1593                 if (has_target())
1594                         strncpy(policy->last_governor, policy->governor->name,
1595                                 CPUFREQ_NAME_LEN);
1596                 else
1597                         policy->last_policy = policy->policy;
1598         } else if (cpu == policy->cpu) {
1599                 /* Nominate new CPU */
1600                 policy->cpu = cpumask_any(policy->cpus);
1601         }
1602
1603         /* Start governor again for active policy */
1604         if (!policy_is_inactive(policy)) {
1605                 if (has_target()) {
1606                         ret = cpufreq_start_governor(policy);
1607                         if (ret)
1608                                 pr_err("%s: Failed to start governor\n", __func__);
1609                 }
1610
1611                 goto unlock;
1612         }
1613
1614         if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1615                 cpufreq_cooling_unregister(policy->cdev);
1616                 policy->cdev = NULL;
1617         }
1618
1619         if (has_target())
1620                 cpufreq_exit_governor(policy);
1621
1622         /*
1623          * Perform the ->offline() during light-weight tear-down, as
1624          * that allows fast recovery when the CPU comes back.
1625          */
1626         if (cpufreq_driver->offline) {
1627                 cpufreq_driver->offline(policy);
1628         } else if (cpufreq_driver->exit) {
1629                 cpufreq_driver->exit(policy);
1630                 policy->freq_table = NULL;
1631         }
1632
1633 unlock:
1634         up_write(&policy->rwsem);
1635         return 0;
1636 }
1637
1638 /*
1639  * cpufreq_remove_dev - remove a CPU device
1640  *
1641  * Removes the cpufreq interface for a CPU device.
1642  */
1643 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1644 {
1645         unsigned int cpu = dev->id;
1646         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1647
1648         if (!policy)
1649                 return;
1650
1651         if (cpu_online(cpu))
1652                 cpufreq_offline(cpu);
1653
1654         cpumask_clear_cpu(cpu, policy->real_cpus);
1655         remove_cpu_dev_symlink(policy, dev);
1656
1657         if (cpumask_empty(policy->real_cpus)) {
1658                 /* We did light-weight exit earlier, do full tear down now */
1659                 if (cpufreq_driver->offline)
1660                         cpufreq_driver->exit(policy);
1661
1662                 cpufreq_policy_free(policy);
1663         }
1664 }
1665
1666 /**
1667  * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1668  * @policy: Policy managing CPUs.
1669  * @new_freq: New CPU frequency.
1670  *
1671  * Adjust to the current frequency first and clean up later by either calling
1672  * cpufreq_update_policy(), or scheduling handle_update().
1673  */
1674 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1675                                 unsigned int new_freq)
1676 {
1677         struct cpufreq_freqs freqs;
1678
1679         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1680                  policy->cur, new_freq);
1681
1682         freqs.old = policy->cur;
1683         freqs.new = new_freq;
1684
1685         cpufreq_freq_transition_begin(policy, &freqs);
1686         cpufreq_freq_transition_end(policy, &freqs, 0);
1687 }
1688
1689 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1690 {
1691         unsigned int new_freq;
1692
1693         new_freq = cpufreq_driver->get(policy->cpu);
1694         if (!new_freq)
1695                 return 0;
1696
1697         /*
1698          * If fast frequency switching is used with the given policy, the check
1699          * against policy->cur is pointless, so skip it in that case.
1700          */
1701         if (policy->fast_switch_enabled || !has_target())
1702                 return new_freq;
1703
1704         if (policy->cur != new_freq) {
1705                 cpufreq_out_of_sync(policy, new_freq);
1706                 if (update)
1707                         schedule_work(&policy->update);
1708         }
1709
1710         return new_freq;
1711 }
1712
1713 /**
1714  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1715  * @cpu: CPU number
1716  *
1717  * This is the last known freq, without actually getting it from the driver.
1718  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1719  */
1720 unsigned int cpufreq_quick_get(unsigned int cpu)
1721 {
1722         struct cpufreq_policy *policy;
1723         unsigned int ret_freq = 0;
1724         unsigned long flags;
1725
1726         read_lock_irqsave(&cpufreq_driver_lock, flags);
1727
1728         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1729                 ret_freq = cpufreq_driver->get(cpu);
1730                 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1731                 return ret_freq;
1732         }
1733
1734         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1735
1736         policy = cpufreq_cpu_get(cpu);
1737         if (policy) {
1738                 ret_freq = policy->cur;
1739                 cpufreq_cpu_put(policy);
1740         }
1741
1742         return ret_freq;
1743 }
1744 EXPORT_SYMBOL(cpufreq_quick_get);
1745
1746 /**
1747  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1748  * @cpu: CPU number
1749  *
1750  * Just return the max possible frequency for a given CPU.
1751  */
1752 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1753 {
1754         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1755         unsigned int ret_freq = 0;
1756
1757         if (policy) {
1758                 ret_freq = policy->max;
1759                 cpufreq_cpu_put(policy);
1760         }
1761
1762         return ret_freq;
1763 }
1764 EXPORT_SYMBOL(cpufreq_quick_get_max);
1765
1766 /**
1767  * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1768  * @cpu: CPU number
1769  *
1770  * The default return value is the max_freq field of cpuinfo.
1771  */
1772 __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1773 {
1774         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1775         unsigned int ret_freq = 0;
1776
1777         if (policy) {
1778                 ret_freq = policy->cpuinfo.max_freq;
1779                 cpufreq_cpu_put(policy);
1780         }
1781
1782         return ret_freq;
1783 }
1784 EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1785
1786 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1787 {
1788         if (unlikely(policy_is_inactive(policy)))
1789                 return 0;
1790
1791         return cpufreq_verify_current_freq(policy, true);
1792 }
1793
1794 /**
1795  * cpufreq_get - get the current CPU frequency (in kHz)
1796  * @cpu: CPU number
1797  *
1798  * Get the CPU current (static) CPU frequency
1799  */
1800 unsigned int cpufreq_get(unsigned int cpu)
1801 {
1802         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1803         unsigned int ret_freq = 0;
1804
1805         if (policy) {
1806                 down_read(&policy->rwsem);
1807                 if (cpufreq_driver->get)
1808                         ret_freq = __cpufreq_get(policy);
1809                 up_read(&policy->rwsem);
1810
1811                 cpufreq_cpu_put(policy);
1812         }
1813
1814         return ret_freq;
1815 }
1816 EXPORT_SYMBOL(cpufreq_get);
1817
1818 static struct subsys_interface cpufreq_interface = {
1819         .name           = "cpufreq",
1820         .subsys         = &cpu_subsys,
1821         .add_dev        = cpufreq_add_dev,
1822         .remove_dev     = cpufreq_remove_dev,
1823 };
1824
1825 /*
1826  * In case platform wants some specific frequency to be configured
1827  * during suspend..
1828  */
1829 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1830 {
1831         int ret;
1832
1833         if (!policy->suspend_freq) {
1834                 pr_debug("%s: suspend_freq not defined\n", __func__);
1835                 return 0;
1836         }
1837
1838         pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1839                         policy->suspend_freq);
1840
1841         ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1842                         CPUFREQ_RELATION_H);
1843         if (ret)
1844                 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1845                                 __func__, policy->suspend_freq, ret);
1846
1847         return ret;
1848 }
1849 EXPORT_SYMBOL(cpufreq_generic_suspend);
1850
1851 /**
1852  * cpufreq_suspend() - Suspend CPUFreq governors.
1853  *
1854  * Called during system wide Suspend/Hibernate cycles for suspending governors
1855  * as some platforms can't change frequency after this point in suspend cycle.
1856  * Because some of the devices (like: i2c, regulators, etc) they use for
1857  * changing frequency are suspended quickly after this point.
1858  */
1859 void cpufreq_suspend(void)
1860 {
1861         struct cpufreq_policy *policy;
1862
1863         if (!cpufreq_driver)
1864                 return;
1865
1866         if (!has_target() && !cpufreq_driver->suspend)
1867                 goto suspend;
1868
1869         pr_debug("%s: Suspending Governors\n", __func__);
1870
1871         for_each_active_policy(policy) {
1872                 if (has_target()) {
1873                         down_write(&policy->rwsem);
1874                         cpufreq_stop_governor(policy);
1875                         up_write(&policy->rwsem);
1876                 }
1877
1878                 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1879                         pr_err("%s: Failed to suspend driver: %s\n", __func__,
1880                                 cpufreq_driver->name);
1881         }
1882
1883 suspend:
1884         cpufreq_suspended = true;
1885 }
1886
1887 /**
1888  * cpufreq_resume() - Resume CPUFreq governors.
1889  *
1890  * Called during system wide Suspend/Hibernate cycle for resuming governors that
1891  * are suspended with cpufreq_suspend().
1892  */
1893 void cpufreq_resume(void)
1894 {
1895         struct cpufreq_policy *policy;
1896         int ret;
1897
1898         if (!cpufreq_driver)
1899                 return;
1900
1901         if (unlikely(!cpufreq_suspended))
1902                 return;
1903
1904         cpufreq_suspended = false;
1905
1906         if (!has_target() && !cpufreq_driver->resume)
1907                 return;
1908
1909         pr_debug("%s: Resuming Governors\n", __func__);
1910
1911         for_each_active_policy(policy) {
1912                 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1913                         pr_err("%s: Failed to resume driver: %p\n", __func__,
1914                                 policy);
1915                 } else if (has_target()) {
1916                         down_write(&policy->rwsem);
1917                         ret = cpufreq_start_governor(policy);
1918                         up_write(&policy->rwsem);
1919
1920                         if (ret)
1921                                 pr_err("%s: Failed to start governor for policy: %p\n",
1922                                        __func__, policy);
1923                 }
1924         }
1925 }
1926
1927 /**
1928  * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
1929  * @flags: Flags to test against the current cpufreq driver's flags.
1930  *
1931  * Assumes that the driver is there, so callers must ensure that this is the
1932  * case.
1933  */
1934 bool cpufreq_driver_test_flags(u16 flags)
1935 {
1936         return !!(cpufreq_driver->flags & flags);
1937 }
1938
1939 /**
1940  * cpufreq_get_current_driver - Return the current driver's name.
1941  *
1942  * Return the name string of the currently registered cpufreq driver or NULL if
1943  * none.
1944  */
1945 const char *cpufreq_get_current_driver(void)
1946 {
1947         if (cpufreq_driver)
1948                 return cpufreq_driver->name;
1949
1950         return NULL;
1951 }
1952 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1953
1954 /**
1955  * cpufreq_get_driver_data - Return current driver data.
1956  *
1957  * Return the private data of the currently registered cpufreq driver, or NULL
1958  * if no cpufreq driver has been registered.
1959  */
1960 void *cpufreq_get_driver_data(void)
1961 {
1962         if (cpufreq_driver)
1963                 return cpufreq_driver->driver_data;
1964
1965         return NULL;
1966 }
1967 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1968
1969 /*********************************************************************
1970  *                     NOTIFIER LISTS INTERFACE                      *
1971  *********************************************************************/
1972
1973 /**
1974  * cpufreq_register_notifier - Register a notifier with cpufreq.
1975  * @nb: notifier function to register.
1976  * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
1977  *
1978  * Add a notifier to one of two lists: either a list of notifiers that run on
1979  * clock rate changes (once before and once after every transition), or a list
1980  * of notifiers that ron on cpufreq policy changes.
1981  *
1982  * This function may sleep and it has the same return values as
1983  * blocking_notifier_chain_register().
1984  */
1985 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1986 {
1987         int ret;
1988
1989         if (cpufreq_disabled())
1990                 return -EINVAL;
1991
1992         switch (list) {
1993         case CPUFREQ_TRANSITION_NOTIFIER:
1994                 mutex_lock(&cpufreq_fast_switch_lock);
1995
1996                 if (cpufreq_fast_switch_count > 0) {
1997                         mutex_unlock(&cpufreq_fast_switch_lock);
1998                         return -EBUSY;
1999                 }
2000                 ret = srcu_notifier_chain_register(
2001                                 &cpufreq_transition_notifier_list, nb);
2002                 if (!ret)
2003                         cpufreq_fast_switch_count--;
2004
2005                 mutex_unlock(&cpufreq_fast_switch_lock);
2006                 break;
2007         case CPUFREQ_POLICY_NOTIFIER:
2008                 ret = blocking_notifier_chain_register(
2009                                 &cpufreq_policy_notifier_list, nb);
2010                 break;
2011         default:
2012                 ret = -EINVAL;
2013         }
2014
2015         return ret;
2016 }
2017 EXPORT_SYMBOL(cpufreq_register_notifier);
2018
2019 /**
2020  * cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2021  * @nb: notifier block to be unregistered.
2022  * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2023  *
2024  * Remove a notifier from one of the cpufreq notifier lists.
2025  *
2026  * This function may sleep and it has the same return values as
2027  * blocking_notifier_chain_unregister().
2028  */
2029 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2030 {
2031         int ret;
2032
2033         if (cpufreq_disabled())
2034                 return -EINVAL;
2035
2036         switch (list) {
2037         case CPUFREQ_TRANSITION_NOTIFIER:
2038                 mutex_lock(&cpufreq_fast_switch_lock);
2039
2040                 ret = srcu_notifier_chain_unregister(
2041                                 &cpufreq_transition_notifier_list, nb);
2042                 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2043                         cpufreq_fast_switch_count++;
2044
2045                 mutex_unlock(&cpufreq_fast_switch_lock);
2046                 break;
2047         case CPUFREQ_POLICY_NOTIFIER:
2048                 ret = blocking_notifier_chain_unregister(
2049                                 &cpufreq_policy_notifier_list, nb);
2050                 break;
2051         default:
2052                 ret = -EINVAL;
2053         }
2054
2055         return ret;
2056 }
2057 EXPORT_SYMBOL(cpufreq_unregister_notifier);
2058
2059
2060 /*********************************************************************
2061  *                              GOVERNORS                            *
2062  *********************************************************************/
2063
2064 /**
2065  * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2066  * @policy: cpufreq policy to switch the frequency for.
2067  * @target_freq: New frequency to set (may be approximate).
2068  *
2069  * Carry out a fast frequency switch without sleeping.
2070  *
2071  * The driver's ->fast_switch() callback invoked by this function must be
2072  * suitable for being called from within RCU-sched read-side critical sections
2073  * and it is expected to select the minimum available frequency greater than or
2074  * equal to @target_freq (CPUFREQ_RELATION_L).
2075  *
2076  * This function must not be called if policy->fast_switch_enabled is unset.
2077  *
2078  * Governors calling this function must guarantee that it will never be invoked
2079  * twice in parallel for the same policy and that it will never be called in
2080  * parallel with either ->target() or ->target_index() for the same policy.
2081  *
2082  * Returns the actual frequency set for the CPU.
2083  *
2084  * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2085  * error condition, the hardware configuration must be preserved.
2086  */
2087 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2088                                         unsigned int target_freq)
2089 {
2090         unsigned int freq;
2091         int cpu;
2092
2093         target_freq = clamp_val(target_freq, policy->min, policy->max);
2094         freq = cpufreq_driver->fast_switch(policy, target_freq);
2095
2096         if (!freq)
2097                 return 0;
2098
2099         policy->cur = freq;
2100         arch_set_freq_scale(policy->related_cpus, freq,
2101                             policy->cpuinfo.max_freq);
2102         cpufreq_stats_record_transition(policy, freq);
2103
2104         if (trace_cpu_frequency_enabled()) {
2105                 for_each_cpu(cpu, policy->cpus)
2106                         trace_cpu_frequency(freq, cpu);
2107         }
2108
2109         return freq;
2110 }
2111 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2112
2113 /**
2114  * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2115  * @cpu: Target CPU.
2116  * @min_perf: Minimum (required) performance level (units of @capacity).
2117  * @target_perf: Target (desired) performance level (units of @capacity).
2118  * @capacity: Capacity of the target CPU.
2119  *
2120  * Carry out a fast performance level switch of @cpu without sleeping.
2121  *
2122  * The driver's ->adjust_perf() callback invoked by this function must be
2123  * suitable for being called from within RCU-sched read-side critical sections
2124  * and it is expected to select a suitable performance level equal to or above
2125  * @min_perf and preferably equal to or below @target_perf.
2126  *
2127  * This function must not be called if policy->fast_switch_enabled is unset.
2128  *
2129  * Governors calling this function must guarantee that it will never be invoked
2130  * twice in parallel for the same CPU and that it will never be called in
2131  * parallel with either ->target() or ->target_index() or ->fast_switch() for
2132  * the same CPU.
2133  */
2134 void cpufreq_driver_adjust_perf(unsigned int cpu,
2135                                  unsigned long min_perf,
2136                                  unsigned long target_perf,
2137                                  unsigned long capacity)
2138 {
2139         cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2140 }
2141
2142 /**
2143  * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2144  *
2145  * Return 'true' if the ->adjust_perf callback is present for the
2146  * current driver or 'false' otherwise.
2147  */
2148 bool cpufreq_driver_has_adjust_perf(void)
2149 {
2150         return !!cpufreq_driver->adjust_perf;
2151 }
2152
2153 /* Must set freqs->new to intermediate frequency */
2154 static int __target_intermediate(struct cpufreq_policy *policy,
2155                                  struct cpufreq_freqs *freqs, int index)
2156 {
2157         int ret;
2158
2159         freqs->new = cpufreq_driver->get_intermediate(policy, index);
2160
2161         /* We don't need to switch to intermediate freq */
2162         if (!freqs->new)
2163                 return 0;
2164
2165         pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2166                  __func__, policy->cpu, freqs->old, freqs->new);
2167
2168         cpufreq_freq_transition_begin(policy, freqs);
2169         ret = cpufreq_driver->target_intermediate(policy, index);
2170         cpufreq_freq_transition_end(policy, freqs, ret);
2171
2172         if (ret)
2173                 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2174                        __func__, ret);
2175
2176         return ret;
2177 }
2178
2179 static int __target_index(struct cpufreq_policy *policy, int index)
2180 {
2181         struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2182         unsigned int restore_freq, intermediate_freq = 0;
2183         unsigned int newfreq = policy->freq_table[index].frequency;
2184         int retval = -EINVAL;
2185         bool notify;
2186
2187         if (newfreq == policy->cur)
2188                 return 0;
2189
2190         /* Save last value to restore later on errors */
2191         restore_freq = policy->cur;
2192
2193         notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2194         if (notify) {
2195                 /* Handle switching to intermediate frequency */
2196                 if (cpufreq_driver->get_intermediate) {
2197                         retval = __target_intermediate(policy, &freqs, index);
2198                         if (retval)
2199                                 return retval;
2200
2201                         intermediate_freq = freqs.new;
2202                         /* Set old freq to intermediate */
2203                         if (intermediate_freq)
2204                                 freqs.old = freqs.new;
2205                 }
2206
2207                 freqs.new = newfreq;
2208                 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2209                          __func__, policy->cpu, freqs.old, freqs.new);
2210
2211                 cpufreq_freq_transition_begin(policy, &freqs);
2212         }
2213
2214         retval = cpufreq_driver->target_index(policy, index);
2215         if (retval)
2216                 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2217                        retval);
2218
2219         if (notify) {
2220                 cpufreq_freq_transition_end(policy, &freqs, retval);
2221
2222                 /*
2223                  * Failed after setting to intermediate freq? Driver should have
2224                  * reverted back to initial frequency and so should we. Check
2225                  * here for intermediate_freq instead of get_intermediate, in
2226                  * case we haven't switched to intermediate freq at all.
2227                  */
2228                 if (unlikely(retval && intermediate_freq)) {
2229                         freqs.old = intermediate_freq;
2230                         freqs.new = restore_freq;
2231                         cpufreq_freq_transition_begin(policy, &freqs);
2232                         cpufreq_freq_transition_end(policy, &freqs, 0);
2233                 }
2234         }
2235
2236         return retval;
2237 }
2238
2239 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2240                             unsigned int target_freq,
2241                             unsigned int relation)
2242 {
2243         unsigned int old_target_freq = target_freq;
2244
2245         if (cpufreq_disabled())
2246                 return -ENODEV;
2247
2248         target_freq = __resolve_freq(policy, target_freq, relation);
2249
2250         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2251                  policy->cpu, target_freq, relation, old_target_freq);
2252
2253         /*
2254          * This might look like a redundant call as we are checking it again
2255          * after finding index. But it is left intentionally for cases where
2256          * exactly same freq is called again and so we can save on few function
2257          * calls.
2258          */
2259         if (target_freq == policy->cur &&
2260             !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2261                 return 0;
2262
2263         if (cpufreq_driver->target)
2264                 return cpufreq_driver->target(policy, target_freq, relation);
2265
2266         if (!cpufreq_driver->target_index)
2267                 return -EINVAL;
2268
2269         return __target_index(policy, policy->cached_resolved_idx);
2270 }
2271 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2272
2273 int cpufreq_driver_target(struct cpufreq_policy *policy,
2274                           unsigned int target_freq,
2275                           unsigned int relation)
2276 {
2277         int ret;
2278
2279         down_write(&policy->rwsem);
2280
2281         ret = __cpufreq_driver_target(policy, target_freq, relation);
2282
2283         up_write(&policy->rwsem);
2284
2285         return ret;
2286 }
2287 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2288
2289 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2290 {
2291         return NULL;
2292 }
2293
2294 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2295 {
2296         int ret;
2297
2298         /* Don't start any governor operations if we are entering suspend */
2299         if (cpufreq_suspended)
2300                 return 0;
2301         /*
2302          * Governor might not be initiated here if ACPI _PPC changed
2303          * notification happened, so check it.
2304          */
2305         if (!policy->governor)
2306                 return -EINVAL;
2307
2308         /* Platform doesn't want dynamic frequency switching ? */
2309         if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2310             cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2311                 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2312
2313                 if (gov) {
2314                         pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2315                                 policy->governor->name, gov->name);
2316                         policy->governor = gov;
2317                 } else {
2318                         return -EINVAL;
2319                 }
2320         }
2321
2322         if (!try_module_get(policy->governor->owner))
2323                 return -EINVAL;
2324
2325         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2326
2327         if (policy->governor->init) {
2328                 ret = policy->governor->init(policy);
2329                 if (ret) {
2330                         module_put(policy->governor->owner);
2331                         return ret;
2332                 }
2333         }
2334
2335         policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2336
2337         return 0;
2338 }
2339
2340 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2341 {
2342         if (cpufreq_suspended || !policy->governor)
2343                 return;
2344
2345         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2346
2347         if (policy->governor->exit)
2348                 policy->governor->exit(policy);
2349
2350         module_put(policy->governor->owner);
2351 }
2352
2353 int cpufreq_start_governor(struct cpufreq_policy *policy)
2354 {
2355         int ret;
2356
2357         if (cpufreq_suspended)
2358                 return 0;
2359
2360         if (!policy->governor)
2361                 return -EINVAL;
2362
2363         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2364
2365         if (cpufreq_driver->get)
2366                 cpufreq_verify_current_freq(policy, false);
2367
2368         if (policy->governor->start) {
2369                 ret = policy->governor->start(policy);
2370                 if (ret)
2371                         return ret;
2372         }
2373
2374         if (policy->governor->limits)
2375                 policy->governor->limits(policy);
2376
2377         return 0;
2378 }
2379
2380 void cpufreq_stop_governor(struct cpufreq_policy *policy)
2381 {
2382         if (cpufreq_suspended || !policy->governor)
2383                 return;
2384
2385         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2386
2387         if (policy->governor->stop)
2388                 policy->governor->stop(policy);
2389 }
2390
2391 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2392 {
2393         if (cpufreq_suspended || !policy->governor)
2394                 return;
2395
2396         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2397
2398         if (policy->governor->limits)
2399                 policy->governor->limits(policy);
2400 }
2401
2402 int cpufreq_register_governor(struct cpufreq_governor *governor)
2403 {
2404         int err;
2405
2406         if (!governor)
2407                 return -EINVAL;
2408
2409         if (cpufreq_disabled())
2410                 return -ENODEV;
2411
2412         mutex_lock(&cpufreq_governor_mutex);
2413
2414         err = -EBUSY;
2415         if (!find_governor(governor->name)) {
2416                 err = 0;
2417                 list_add(&governor->governor_list, &cpufreq_governor_list);
2418         }
2419
2420         mutex_unlock(&cpufreq_governor_mutex);
2421         return err;
2422 }
2423 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2424
2425 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2426 {
2427         struct cpufreq_policy *policy;
2428         unsigned long flags;
2429
2430         if (!governor)
2431                 return;
2432
2433         if (cpufreq_disabled())
2434                 return;
2435
2436         /* clear last_governor for all inactive policies */
2437         read_lock_irqsave(&cpufreq_driver_lock, flags);
2438         for_each_inactive_policy(policy) {
2439                 if (!strcmp(policy->last_governor, governor->name)) {
2440                         policy->governor = NULL;
2441                         strcpy(policy->last_governor, "\0");
2442                 }
2443         }
2444         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2445
2446         mutex_lock(&cpufreq_governor_mutex);
2447         list_del(&governor->governor_list);
2448         mutex_unlock(&cpufreq_governor_mutex);
2449 }
2450 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2451
2452
2453 /*********************************************************************
2454  *                          POLICY INTERFACE                         *
2455  *********************************************************************/
2456
2457 /**
2458  * cpufreq_get_policy - get the current cpufreq_policy
2459  * @policy: struct cpufreq_policy into which the current cpufreq_policy
2460  *      is written
2461  * @cpu: CPU to find the policy for
2462  *
2463  * Reads the current cpufreq policy.
2464  */
2465 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2466 {
2467         struct cpufreq_policy *cpu_policy;
2468         if (!policy)
2469                 return -EINVAL;
2470
2471         cpu_policy = cpufreq_cpu_get(cpu);
2472         if (!cpu_policy)
2473                 return -EINVAL;
2474
2475         memcpy(policy, cpu_policy, sizeof(*policy));
2476
2477         cpufreq_cpu_put(cpu_policy);
2478         return 0;
2479 }
2480 EXPORT_SYMBOL(cpufreq_get_policy);
2481
2482 /**
2483  * cpufreq_set_policy - Modify cpufreq policy parameters.
2484  * @policy: Policy object to modify.
2485  * @new_gov: Policy governor pointer.
2486  * @new_pol: Policy value (for drivers with built-in governors).
2487  *
2488  * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2489  * limits to be set for the policy, update @policy with the verified limits
2490  * values and either invoke the driver's ->setpolicy() callback (if present) or
2491  * carry out a governor update for @policy.  That is, run the current governor's
2492  * ->limits() callback (if @new_gov points to the same object as the one in
2493  * @policy) or replace the governor for @policy with @new_gov.
2494  *
2495  * The cpuinfo part of @policy is not updated by this function.
2496  */
2497 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2498                               struct cpufreq_governor *new_gov,
2499                               unsigned int new_pol)
2500 {
2501         struct cpufreq_policy_data new_data;
2502         struct cpufreq_governor *old_gov;
2503         int ret;
2504
2505         memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2506         new_data.freq_table = policy->freq_table;
2507         new_data.cpu = policy->cpu;
2508         /*
2509          * PM QoS framework collects all the requests from users and provide us
2510          * the final aggregated value here.
2511          */
2512         new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2513         new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2514
2515         pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2516                  new_data.cpu, new_data.min, new_data.max);
2517
2518         /*
2519          * Verify that the CPU speed can be set within these limits and make sure
2520          * that min <= max.
2521          */
2522         ret = cpufreq_driver->verify(&new_data);
2523         if (ret)
2524                 return ret;
2525
2526         policy->min = new_data.min;
2527         policy->max = new_data.max;
2528         trace_cpu_frequency_limits(policy);
2529
2530         policy->cached_target_freq = UINT_MAX;
2531
2532         pr_debug("new min and max freqs are %u - %u kHz\n",
2533                  policy->min, policy->max);
2534
2535         if (cpufreq_driver->setpolicy) {
2536                 policy->policy = new_pol;
2537                 pr_debug("setting range\n");
2538                 return cpufreq_driver->setpolicy(policy);
2539         }
2540
2541         if (new_gov == policy->governor) {
2542                 pr_debug("governor limits update\n");
2543                 cpufreq_governor_limits(policy);
2544                 return 0;
2545         }
2546
2547         pr_debug("governor switch\n");
2548
2549         /* save old, working values */
2550         old_gov = policy->governor;
2551         /* end old governor */
2552         if (old_gov) {
2553                 cpufreq_stop_governor(policy);
2554                 cpufreq_exit_governor(policy);
2555         }
2556
2557         /* start new governor */
2558         policy->governor = new_gov;
2559         ret = cpufreq_init_governor(policy);
2560         if (!ret) {
2561                 ret = cpufreq_start_governor(policy);
2562                 if (!ret) {
2563                         pr_debug("governor change\n");
2564                         sched_cpufreq_governor_change(policy, old_gov);
2565                         return 0;
2566                 }
2567                 cpufreq_exit_governor(policy);
2568         }
2569
2570         /* new governor failed, so re-start old one */
2571         pr_debug("starting governor %s failed\n", policy->governor->name);
2572         if (old_gov) {
2573                 policy->governor = old_gov;
2574                 if (cpufreq_init_governor(policy))
2575                         policy->governor = NULL;
2576                 else
2577                         cpufreq_start_governor(policy);
2578         }
2579
2580         return ret;
2581 }
2582
2583 /**
2584  * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2585  * @cpu: CPU to re-evaluate the policy for.
2586  *
2587  * Update the current frequency for the cpufreq policy of @cpu and use
2588  * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2589  * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2590  * for the policy in question, among other things.
2591  */
2592 void cpufreq_update_policy(unsigned int cpu)
2593 {
2594         struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2595
2596         if (!policy)
2597                 return;
2598
2599         /*
2600          * BIOS might change freq behind our back
2601          * -> ask driver for current freq and notify governors about a change
2602          */
2603         if (cpufreq_driver->get && has_target() &&
2604             (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2605                 goto unlock;
2606
2607         refresh_frequency_limits(policy);
2608
2609 unlock:
2610         cpufreq_cpu_release(policy);
2611 }
2612 EXPORT_SYMBOL(cpufreq_update_policy);
2613
2614 /**
2615  * cpufreq_update_limits - Update policy limits for a given CPU.
2616  * @cpu: CPU to update the policy limits for.
2617  *
2618  * Invoke the driver's ->update_limits callback if present or call
2619  * cpufreq_update_policy() for @cpu.
2620  */
2621 void cpufreq_update_limits(unsigned int cpu)
2622 {
2623         if (cpufreq_driver->update_limits)
2624                 cpufreq_driver->update_limits(cpu);
2625         else
2626                 cpufreq_update_policy(cpu);
2627 }
2628 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2629
2630 /*********************************************************************
2631  *               BOOST                                               *
2632  *********************************************************************/
2633 static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2634 {
2635         int ret;
2636
2637         if (!policy->freq_table)
2638                 return -ENXIO;
2639
2640         ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2641         if (ret) {
2642                 pr_err("%s: Policy frequency update failed\n", __func__);
2643                 return ret;
2644         }
2645
2646         ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2647         if (ret < 0)
2648                 return ret;
2649
2650         return 0;
2651 }
2652
2653 int cpufreq_boost_trigger_state(int state)
2654 {
2655         struct cpufreq_policy *policy;
2656         unsigned long flags;
2657         int ret = 0;
2658
2659         if (cpufreq_driver->boost_enabled == state)
2660                 return 0;
2661
2662         write_lock_irqsave(&cpufreq_driver_lock, flags);
2663         cpufreq_driver->boost_enabled = state;
2664         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2665
2666         cpus_read_lock();
2667         for_each_active_policy(policy) {
2668                 ret = cpufreq_driver->set_boost(policy, state);
2669                 if (ret)
2670                         goto err_reset_state;
2671         }
2672         cpus_read_unlock();
2673
2674         return 0;
2675
2676 err_reset_state:
2677         cpus_read_unlock();
2678
2679         write_lock_irqsave(&cpufreq_driver_lock, flags);
2680         cpufreq_driver->boost_enabled = !state;
2681         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2682
2683         pr_err("%s: Cannot %s BOOST\n",
2684                __func__, state ? "enable" : "disable");
2685
2686         return ret;
2687 }
2688
2689 static bool cpufreq_boost_supported(void)
2690 {
2691         return cpufreq_driver->set_boost;
2692 }
2693
2694 static int create_boost_sysfs_file(void)
2695 {
2696         int ret;
2697
2698         ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2699         if (ret)
2700                 pr_err("%s: cannot register global BOOST sysfs file\n",
2701                        __func__);
2702
2703         return ret;
2704 }
2705
2706 static void remove_boost_sysfs_file(void)
2707 {
2708         if (cpufreq_boost_supported())
2709                 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2710 }
2711
2712 int cpufreq_enable_boost_support(void)
2713 {
2714         if (!cpufreq_driver)
2715                 return -EINVAL;
2716
2717         if (cpufreq_boost_supported())
2718                 return 0;
2719
2720         cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2721
2722         /* This will get removed on driver unregister */
2723         return create_boost_sysfs_file();
2724 }
2725 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2726
2727 int cpufreq_boost_enabled(void)
2728 {
2729         return cpufreq_driver->boost_enabled;
2730 }
2731 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2732
2733 /*********************************************************************
2734  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2735  *********************************************************************/
2736 static enum cpuhp_state hp_online;
2737
2738 static int cpuhp_cpufreq_online(unsigned int cpu)
2739 {
2740         cpufreq_online(cpu);
2741
2742         return 0;
2743 }
2744
2745 static int cpuhp_cpufreq_offline(unsigned int cpu)
2746 {
2747         cpufreq_offline(cpu);
2748
2749         return 0;
2750 }
2751
2752 /**
2753  * cpufreq_register_driver - register a CPU Frequency driver
2754  * @driver_data: A struct cpufreq_driver containing the values#
2755  * submitted by the CPU Frequency driver.
2756  *
2757  * Registers a CPU Frequency driver to this core code. This code
2758  * returns zero on success, -EEXIST when another driver got here first
2759  * (and isn't unregistered in the meantime).
2760  *
2761  */
2762 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2763 {
2764         unsigned long flags;
2765         int ret;
2766
2767         if (cpufreq_disabled())
2768                 return -ENODEV;
2769
2770         /*
2771          * The cpufreq core depends heavily on the availability of device
2772          * structure, make sure they are available before proceeding further.
2773          */
2774         if (!get_cpu_device(0))
2775                 return -EPROBE_DEFER;
2776
2777         if (!driver_data || !driver_data->verify || !driver_data->init ||
2778             !(driver_data->setpolicy || driver_data->target_index ||
2779                     driver_data->target) ||
2780              (driver_data->setpolicy && (driver_data->target_index ||
2781                     driver_data->target)) ||
2782              (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2783              (!driver_data->online != !driver_data->offline))
2784                 return -EINVAL;
2785
2786         pr_debug("trying to register driver %s\n", driver_data->name);
2787
2788         /* Protect against concurrent CPU online/offline. */
2789         cpus_read_lock();
2790
2791         write_lock_irqsave(&cpufreq_driver_lock, flags);
2792         if (cpufreq_driver) {
2793                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2794                 ret = -EEXIST;
2795                 goto out;
2796         }
2797         cpufreq_driver = driver_data;
2798         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2799
2800         /*
2801          * Mark support for the scheduler's frequency invariance engine for
2802          * drivers that implement target(), target_index() or fast_switch().
2803          */
2804         if (!cpufreq_driver->setpolicy) {
2805                 static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2806                 pr_debug("supports frequency invariance");
2807         }
2808
2809         if (driver_data->setpolicy)
2810                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2811
2812         if (cpufreq_boost_supported()) {
2813                 ret = create_boost_sysfs_file();
2814                 if (ret)
2815                         goto err_null_driver;
2816         }
2817
2818         ret = subsys_interface_register(&cpufreq_interface);
2819         if (ret)
2820                 goto err_boost_unreg;
2821
2822         if (unlikely(list_empty(&cpufreq_policy_list))) {
2823                 /* if all ->init() calls failed, unregister */
2824                 ret = -ENODEV;
2825                 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2826                          driver_data->name);
2827                 goto err_if_unreg;
2828         }
2829
2830         ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2831                                                    "cpufreq:online",
2832                                                    cpuhp_cpufreq_online,
2833                                                    cpuhp_cpufreq_offline);
2834         if (ret < 0)
2835                 goto err_if_unreg;
2836         hp_online = ret;
2837         ret = 0;
2838
2839         pr_debug("driver %s up and running\n", driver_data->name);
2840         goto out;
2841
2842 err_if_unreg:
2843         subsys_interface_unregister(&cpufreq_interface);
2844 err_boost_unreg:
2845         remove_boost_sysfs_file();
2846 err_null_driver:
2847         write_lock_irqsave(&cpufreq_driver_lock, flags);
2848         cpufreq_driver = NULL;
2849         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2850 out:
2851         cpus_read_unlock();
2852         return ret;
2853 }
2854 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2855
2856 /*
2857  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2858  *
2859  * Unregister the current CPUFreq driver. Only call this if you have
2860  * the right to do so, i.e. if you have succeeded in initialising before!
2861  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2862  * currently not initialised.
2863  */
2864 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2865 {
2866         unsigned long flags;
2867
2868         if (!cpufreq_driver || (driver != cpufreq_driver))
2869                 return -EINVAL;
2870
2871         pr_debug("unregistering driver %s\n", driver->name);
2872
2873         /* Protect against concurrent cpu hotplug */
2874         cpus_read_lock();
2875         subsys_interface_unregister(&cpufreq_interface);
2876         remove_boost_sysfs_file();
2877         static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
2878         cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2879
2880         write_lock_irqsave(&cpufreq_driver_lock, flags);
2881
2882         cpufreq_driver = NULL;
2883
2884         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2885         cpus_read_unlock();
2886
2887         return 0;
2888 }
2889 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2890
2891 static int __init cpufreq_core_init(void)
2892 {
2893         struct cpufreq_governor *gov = cpufreq_default_governor();
2894
2895         if (cpufreq_disabled())
2896                 return -ENODEV;
2897
2898         cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2899         BUG_ON(!cpufreq_global_kobject);
2900
2901         if (!strlen(default_governor))
2902                 strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
2903
2904         return 0;
2905 }
2906 module_param(off, int, 0444);
2907 module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
2908 core_initcall(cpufreq_core_init);