1 // SPDX-License-Identifier: GPL-2.0-only
3 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
6 * Copyright (C) 2011 Samsung Electronics
7 * MyungJoo Ham <myungjoo.ham@samsung.com>
10 #include <linux/kernel.h>
11 #include <linux/kmod.h>
12 #include <linux/sched.h>
13 #include <linux/debugfs.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <linux/slab.h>
19 #include <linux/stat.h>
20 #include <linux/pm_opp.h>
21 #include <linux/devfreq.h>
22 #include <linux/workqueue.h>
23 #include <linux/platform_device.h>
24 #include <linux/list.h>
25 #include <linux/printk.h>
26 #include <linux/hrtimer.h>
28 #include <linux/pm_qos.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/devfreq.h>
34 #define HZ_PER_KHZ 1000
36 static struct class *devfreq_class;
37 static struct dentry *devfreq_debugfs;
40 * devfreq core provides delayed work based load monitoring helper
41 * functions. Governors can use these or can implement their own
42 * monitoring mechanism.
44 static struct workqueue_struct *devfreq_wq;
46 /* The list of all device-devfreq governors */
47 static LIST_HEAD(devfreq_governor_list);
48 /* The list of all device-devfreq */
49 static LIST_HEAD(devfreq_list);
50 static DEFINE_MUTEX(devfreq_list_lock);
53 * find_device_devfreq() - find devfreq struct using device pointer
54 * @dev: device pointer used to lookup device devfreq.
56 * Search the list of device devfreqs and return the matched device's
57 * devfreq info. devfreq_list_lock should be held by the caller.
59 static struct devfreq *find_device_devfreq(struct device *dev)
61 struct devfreq *tmp_devfreq;
63 lockdep_assert_held(&devfreq_list_lock);
65 if (IS_ERR_OR_NULL(dev)) {
66 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
67 return ERR_PTR(-EINVAL);
70 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
71 if (tmp_devfreq->dev.parent == dev)
75 return ERR_PTR(-ENODEV);
78 static unsigned long find_available_min_freq(struct devfreq *devfreq)
80 struct dev_pm_opp *opp;
81 unsigned long min_freq = 0;
83 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
92 static unsigned long find_available_max_freq(struct devfreq *devfreq)
94 struct dev_pm_opp *opp;
95 unsigned long max_freq = ULONG_MAX;
97 opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
107 * get_freq_range() - Get the current freq range
108 * @devfreq: the devfreq instance
109 * @min_freq: the min frequency
110 * @max_freq: the max frequency
112 * This takes into consideration all constraints.
114 static void get_freq_range(struct devfreq *devfreq,
115 unsigned long *min_freq,
116 unsigned long *max_freq)
118 unsigned long *freq_table = devfreq->profile->freq_table;
119 s32 qos_min_freq, qos_max_freq;
121 lockdep_assert_held(&devfreq->lock);
124 * Initialize minimum/maximum frequency from freq table.
125 * The devfreq drivers can initialize this in either ascending or
126 * descending order and devfreq core supports both.
128 if (freq_table[0] < freq_table[devfreq->profile->max_state - 1]) {
129 *min_freq = freq_table[0];
130 *max_freq = freq_table[devfreq->profile->max_state - 1];
132 *min_freq = freq_table[devfreq->profile->max_state - 1];
133 *max_freq = freq_table[0];
136 /* Apply constraints from PM QoS */
137 qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent,
138 DEV_PM_QOS_MIN_FREQUENCY);
139 qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent,
140 DEV_PM_QOS_MAX_FREQUENCY);
141 *min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq);
142 if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE)
143 *max_freq = min(*max_freq,
144 (unsigned long)HZ_PER_KHZ * qos_max_freq);
146 /* Apply constraints from OPP interface */
147 *min_freq = max(*min_freq, devfreq->scaling_min_freq);
148 *max_freq = min(*max_freq, devfreq->scaling_max_freq);
150 if (*min_freq > *max_freq)
151 *min_freq = *max_freq;
155 * devfreq_get_freq_level() - Lookup freq_table for the frequency
156 * @devfreq: the devfreq instance
157 * @freq: the target frequency
159 static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
163 for (lev = 0; lev < devfreq->profile->max_state; lev++)
164 if (freq == devfreq->profile->freq_table[lev])
170 static int set_freq_table(struct devfreq *devfreq)
172 struct devfreq_dev_profile *profile = devfreq->profile;
173 struct dev_pm_opp *opp;
177 /* Initialize the freq_table from OPP table */
178 count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
182 profile->max_state = count;
183 profile->freq_table = devm_kcalloc(devfreq->dev.parent,
185 sizeof(*profile->freq_table),
187 if (!profile->freq_table) {
188 profile->max_state = 0;
192 for (i = 0, freq = 0; i < profile->max_state; i++, freq++) {
193 opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &freq);
195 devm_kfree(devfreq->dev.parent, profile->freq_table);
196 profile->max_state = 0;
200 profile->freq_table[i] = freq;
207 * devfreq_update_status() - Update statistics of devfreq behavior
208 * @devfreq: the devfreq instance
209 * @freq: the update target frequency
211 int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
213 int lev, prev_lev, ret = 0;
216 lockdep_assert_held(&devfreq->lock);
217 cur_time = get_jiffies_64();
219 /* Immediately exit if previous_freq is not initialized yet. */
220 if (!devfreq->previous_freq)
223 prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
229 devfreq->stats.time_in_state[prev_lev] +=
230 cur_time - devfreq->stats.last_update;
232 lev = devfreq_get_freq_level(devfreq, freq);
238 if (lev != prev_lev) {
239 devfreq->stats.trans_table[
240 (prev_lev * devfreq->profile->max_state) + lev]++;
241 devfreq->stats.total_trans++;
245 devfreq->stats.last_update = cur_time;
248 EXPORT_SYMBOL(devfreq_update_status);
251 * find_devfreq_governor() - find devfreq governor from name
252 * @name: name of the governor
254 * Search the list of devfreq governors and return the matched
255 * governor's pointer. devfreq_list_lock should be held by the caller.
257 static struct devfreq_governor *find_devfreq_governor(const char *name)
259 struct devfreq_governor *tmp_governor;
261 lockdep_assert_held(&devfreq_list_lock);
263 if (IS_ERR_OR_NULL(name)) {
264 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
265 return ERR_PTR(-EINVAL);
268 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
269 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
273 return ERR_PTR(-ENODEV);
277 * try_then_request_governor() - Try to find the governor and request the
278 * module if is not found.
279 * @name: name of the governor
281 * Search the list of devfreq governors and request the module and try again
282 * if is not found. This can happen when both drivers (the governor driver
283 * and the driver that call devfreq_add_device) are built as modules.
284 * devfreq_list_lock should be held by the caller. Returns the matched
285 * governor's pointer or an error pointer.
287 static struct devfreq_governor *try_then_request_governor(const char *name)
289 struct devfreq_governor *governor;
292 lockdep_assert_held(&devfreq_list_lock);
294 if (IS_ERR_OR_NULL(name)) {
295 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
296 return ERR_PTR(-EINVAL);
299 governor = find_devfreq_governor(name);
300 if (IS_ERR(governor)) {
301 mutex_unlock(&devfreq_list_lock);
303 if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
305 err = request_module("governor_%s", "simpleondemand");
307 err = request_module("governor_%s", name);
308 /* Restore previous state before return */
309 mutex_lock(&devfreq_list_lock);
311 return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
313 governor = find_devfreq_governor(name);
319 static int devfreq_notify_transition(struct devfreq *devfreq,
320 struct devfreq_freqs *freqs, unsigned int state)
326 case DEVFREQ_PRECHANGE:
327 srcu_notifier_call_chain(&devfreq->transition_notifier_list,
328 DEVFREQ_PRECHANGE, freqs);
331 case DEVFREQ_POSTCHANGE:
332 srcu_notifier_call_chain(&devfreq->transition_notifier_list,
333 DEVFREQ_POSTCHANGE, freqs);
342 static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
345 struct devfreq_freqs freqs;
346 unsigned long cur_freq;
349 if (devfreq->profile->get_cur_freq)
350 devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
352 cur_freq = devfreq->previous_freq;
354 freqs.old = cur_freq;
355 freqs.new = new_freq;
356 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
358 err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
360 freqs.new = cur_freq;
361 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
365 freqs.new = new_freq;
366 devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
368 if (devfreq_update_status(devfreq, new_freq))
369 dev_err(&devfreq->dev,
370 "Couldn't update frequency transition information.\n");
372 devfreq->previous_freq = new_freq;
374 if (devfreq->suspend_freq)
375 devfreq->resume_freq = cur_freq;
380 /* Load monitoring helper functions for governors use */
383 * update_devfreq() - Reevaluate the device and configure frequency.
384 * @devfreq: the devfreq instance.
386 * Note: Lock devfreq->lock before calling update_devfreq
387 * This function is exported for governors.
389 int update_devfreq(struct devfreq *devfreq)
391 unsigned long freq, min_freq, max_freq;
395 lockdep_assert_held(&devfreq->lock);
397 if (!devfreq->governor)
400 /* Reevaluate the proper frequency */
401 err = devfreq->governor->get_target_freq(devfreq, &freq);
404 get_freq_range(devfreq, &min_freq, &max_freq);
406 if (freq < min_freq) {
408 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
410 if (freq > max_freq) {
412 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
415 return devfreq_set_target(devfreq, freq, flags);
418 EXPORT_SYMBOL(update_devfreq);
421 * devfreq_monitor() - Periodically poll devfreq objects.
422 * @work: the work struct used to run devfreq_monitor periodically.
425 static void devfreq_monitor(struct work_struct *work)
428 struct devfreq *devfreq = container_of(work,
429 struct devfreq, work.work);
431 mutex_lock(&devfreq->lock);
432 err = update_devfreq(devfreq);
434 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
436 queue_delayed_work(devfreq_wq, &devfreq->work,
437 msecs_to_jiffies(devfreq->profile->polling_ms));
438 mutex_unlock(&devfreq->lock);
440 trace_devfreq_monitor(devfreq);
444 * devfreq_monitor_start() - Start load monitoring of devfreq instance
445 * @devfreq: the devfreq instance.
447 * Helper function for starting devfreq device load monitoring. By
448 * default delayed work based monitoring is supported. Function
449 * to be called from governor in response to DEVFREQ_GOV_START
450 * event when device is added to devfreq framework.
452 void devfreq_monitor_start(struct devfreq *devfreq)
454 if (devfreq->governor->interrupt_driven)
457 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
458 if (devfreq->profile->polling_ms)
459 queue_delayed_work(devfreq_wq, &devfreq->work,
460 msecs_to_jiffies(devfreq->profile->polling_ms));
462 EXPORT_SYMBOL(devfreq_monitor_start);
465 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
466 * @devfreq: the devfreq instance.
468 * Helper function to stop devfreq device load monitoring. Function
469 * to be called from governor in response to DEVFREQ_GOV_STOP
470 * event when device is removed from devfreq framework.
472 void devfreq_monitor_stop(struct devfreq *devfreq)
474 if (devfreq->governor->interrupt_driven)
477 cancel_delayed_work_sync(&devfreq->work);
479 EXPORT_SYMBOL(devfreq_monitor_stop);
482 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
483 * @devfreq: the devfreq instance.
485 * Helper function to suspend devfreq device load monitoring. Function
486 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
487 * event or when polling interval is set to zero.
489 * Note: Though this function is same as devfreq_monitor_stop(),
490 * intentionally kept separate to provide hooks for collecting
491 * transition statistics.
493 void devfreq_monitor_suspend(struct devfreq *devfreq)
495 mutex_lock(&devfreq->lock);
496 if (devfreq->stop_polling) {
497 mutex_unlock(&devfreq->lock);
501 devfreq_update_status(devfreq, devfreq->previous_freq);
502 devfreq->stop_polling = true;
503 mutex_unlock(&devfreq->lock);
505 if (devfreq->governor->interrupt_driven)
508 cancel_delayed_work_sync(&devfreq->work);
510 EXPORT_SYMBOL(devfreq_monitor_suspend);
513 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
514 * @devfreq: the devfreq instance.
516 * Helper function to resume devfreq device load monitoring. Function
517 * to be called from governor in response to DEVFREQ_GOV_RESUME
518 * event or when polling interval is set to non-zero.
520 void devfreq_monitor_resume(struct devfreq *devfreq)
524 mutex_lock(&devfreq->lock);
525 if (!devfreq->stop_polling)
528 if (devfreq->governor->interrupt_driven)
531 if (!delayed_work_pending(&devfreq->work) &&
532 devfreq->profile->polling_ms)
533 queue_delayed_work(devfreq_wq, &devfreq->work,
534 msecs_to_jiffies(devfreq->profile->polling_ms));
537 devfreq->stats.last_update = get_jiffies_64();
538 devfreq->stop_polling = false;
540 if (devfreq->profile->get_cur_freq &&
541 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
542 devfreq->previous_freq = freq;
545 mutex_unlock(&devfreq->lock);
547 EXPORT_SYMBOL(devfreq_monitor_resume);
550 * devfreq_update_interval() - Update device devfreq monitoring interval
551 * @devfreq: the devfreq instance.
552 * @delay: new polling interval to be set.
554 * Helper function to set new load monitoring polling interval. Function
555 * to be called from governor in response to DEVFREQ_GOV_UPDATE_INTERVAL event.
557 void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay)
559 unsigned int cur_delay = devfreq->profile->polling_ms;
560 unsigned int new_delay = *delay;
562 mutex_lock(&devfreq->lock);
563 devfreq->profile->polling_ms = new_delay;
565 if (devfreq->stop_polling)
568 if (devfreq->governor->interrupt_driven)
571 /* if new delay is zero, stop polling */
573 mutex_unlock(&devfreq->lock);
574 cancel_delayed_work_sync(&devfreq->work);
578 /* if current delay is zero, start polling with new delay */
580 queue_delayed_work(devfreq_wq, &devfreq->work,
581 msecs_to_jiffies(devfreq->profile->polling_ms));
585 /* if current delay is greater than new delay, restart polling */
586 if (cur_delay > new_delay) {
587 mutex_unlock(&devfreq->lock);
588 cancel_delayed_work_sync(&devfreq->work);
589 mutex_lock(&devfreq->lock);
590 if (!devfreq->stop_polling)
591 queue_delayed_work(devfreq_wq, &devfreq->work,
592 msecs_to_jiffies(devfreq->profile->polling_ms));
595 mutex_unlock(&devfreq->lock);
597 EXPORT_SYMBOL(devfreq_update_interval);
600 * devfreq_notifier_call() - Notify that the device frequency requirements
601 * has been changed out of devfreq framework.
602 * @nb: the notifier_block (supposed to be devfreq->nb)
606 * Called by a notifier that uses devfreq->nb.
608 static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
611 struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
614 mutex_lock(&devfreq->lock);
616 devfreq->scaling_min_freq = find_available_min_freq(devfreq);
617 if (!devfreq->scaling_min_freq)
620 devfreq->scaling_max_freq = find_available_max_freq(devfreq);
621 if (!devfreq->scaling_max_freq) {
622 devfreq->scaling_max_freq = ULONG_MAX;
626 err = update_devfreq(devfreq);
629 mutex_unlock(&devfreq->lock);
631 dev_err(devfreq->dev.parent,
632 "failed to update frequency from OPP notifier (%d)\n",
639 * qos_notifier_call() - Common handler for QoS constraints.
640 * @devfreq: the devfreq instance.
642 static int qos_notifier_call(struct devfreq *devfreq)
646 mutex_lock(&devfreq->lock);
647 err = update_devfreq(devfreq);
648 mutex_unlock(&devfreq->lock);
650 dev_err(devfreq->dev.parent,
651 "failed to update frequency from PM QoS (%d)\n",
658 * qos_min_notifier_call() - Callback for QoS min_freq changes.
659 * @nb: Should be devfreq->nb_min
661 static int qos_min_notifier_call(struct notifier_block *nb,
662 unsigned long val, void *ptr)
664 return qos_notifier_call(container_of(nb, struct devfreq, nb_min));
668 * qos_max_notifier_call() - Callback for QoS max_freq changes.
669 * @nb: Should be devfreq->nb_max
671 static int qos_max_notifier_call(struct notifier_block *nb,
672 unsigned long val, void *ptr)
674 return qos_notifier_call(container_of(nb, struct devfreq, nb_max));
678 * devfreq_dev_release() - Callback for struct device to release the device.
679 * @dev: the devfreq device
681 * Remove devfreq from the list and release its resources.
683 static void devfreq_dev_release(struct device *dev)
685 struct devfreq *devfreq = to_devfreq(dev);
688 mutex_lock(&devfreq_list_lock);
689 list_del(&devfreq->node);
690 mutex_unlock(&devfreq_list_lock);
692 err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max,
693 DEV_PM_QOS_MAX_FREQUENCY);
694 if (err && err != -ENOENT)
695 dev_warn(dev->parent,
696 "Failed to remove max_freq notifier: %d\n", err);
697 err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min,
698 DEV_PM_QOS_MIN_FREQUENCY);
699 if (err && err != -ENOENT)
700 dev_warn(dev->parent,
701 "Failed to remove min_freq notifier: %d\n", err);
703 if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
704 err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
706 dev_warn(dev->parent,
707 "Failed to remove max_freq request: %d\n", err);
709 if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
710 err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
712 dev_warn(dev->parent,
713 "Failed to remove min_freq request: %d\n", err);
716 if (devfreq->profile->exit)
717 devfreq->profile->exit(devfreq->dev.parent);
719 mutex_destroy(&devfreq->lock);
724 * devfreq_add_device() - Add devfreq feature to the device
725 * @dev: the device to add devfreq feature.
726 * @profile: device-specific profile to run devfreq.
727 * @governor_name: name of the policy to choose frequency.
728 * @data: private data for the governor. The devfreq framework does not
731 struct devfreq *devfreq_add_device(struct device *dev,
732 struct devfreq_dev_profile *profile,
733 const char *governor_name,
736 struct devfreq *devfreq;
737 struct devfreq_governor *governor;
740 if (!dev || !profile || !governor_name) {
741 dev_err(dev, "%s: Invalid parameters.\n", __func__);
742 return ERR_PTR(-EINVAL);
745 mutex_lock(&devfreq_list_lock);
746 devfreq = find_device_devfreq(dev);
747 mutex_unlock(&devfreq_list_lock);
748 if (!IS_ERR(devfreq)) {
749 dev_err(dev, "%s: devfreq device already exists!\n",
755 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
761 mutex_init(&devfreq->lock);
762 mutex_lock(&devfreq->lock);
763 devfreq->dev.parent = dev;
764 devfreq->dev.class = devfreq_class;
765 devfreq->dev.release = devfreq_dev_release;
766 INIT_LIST_HEAD(&devfreq->node);
767 devfreq->profile = profile;
768 strscpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
769 devfreq->previous_freq = profile->initial_freq;
770 devfreq->last_status.current_frequency = profile->initial_freq;
771 devfreq->data = data;
772 devfreq->nb.notifier_call = devfreq_notifier_call;
774 if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
775 mutex_unlock(&devfreq->lock);
776 err = set_freq_table(devfreq);
779 mutex_lock(&devfreq->lock);
782 devfreq->scaling_min_freq = find_available_min_freq(devfreq);
783 if (!devfreq->scaling_min_freq) {
784 mutex_unlock(&devfreq->lock);
789 devfreq->scaling_max_freq = find_available_max_freq(devfreq);
790 if (!devfreq->scaling_max_freq) {
791 mutex_unlock(&devfreq->lock);
796 devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
797 atomic_set(&devfreq->suspend_count, 0);
799 dev_set_name(&devfreq->dev, "%s", dev_name(dev));
800 err = device_register(&devfreq->dev);
802 mutex_unlock(&devfreq->lock);
803 put_device(&devfreq->dev);
807 devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
808 array3_size(sizeof(unsigned int),
809 devfreq->profile->max_state,
810 devfreq->profile->max_state),
812 if (!devfreq->stats.trans_table) {
813 mutex_unlock(&devfreq->lock);
818 devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
819 devfreq->profile->max_state,
820 sizeof(*devfreq->stats.time_in_state),
822 if (!devfreq->stats.time_in_state) {
823 mutex_unlock(&devfreq->lock);
828 devfreq->stats.total_trans = 0;
829 devfreq->stats.last_update = get_jiffies_64();
831 srcu_init_notifier_head(&devfreq->transition_notifier_list);
833 mutex_unlock(&devfreq->lock);
835 err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req,
836 DEV_PM_QOS_MIN_FREQUENCY, 0);
839 err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req,
840 DEV_PM_QOS_MAX_FREQUENCY,
841 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
845 devfreq->nb_min.notifier_call = qos_min_notifier_call;
846 err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_min,
847 DEV_PM_QOS_MIN_FREQUENCY);
851 devfreq->nb_max.notifier_call = qos_max_notifier_call;
852 err = dev_pm_qos_add_notifier(devfreq->dev.parent, &devfreq->nb_max,
853 DEV_PM_QOS_MAX_FREQUENCY);
857 mutex_lock(&devfreq_list_lock);
859 governor = try_then_request_governor(devfreq->governor_name);
860 if (IS_ERR(governor)) {
861 dev_err(dev, "%s: Unable to find governor for the device\n",
863 err = PTR_ERR(governor);
867 devfreq->governor = governor;
868 err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
871 dev_err(dev, "%s: Unable to start governor for the device\n",
876 list_add(&devfreq->node, &devfreq_list);
878 mutex_unlock(&devfreq_list_lock);
883 mutex_unlock(&devfreq_list_lock);
885 devfreq_remove_device(devfreq);
892 EXPORT_SYMBOL(devfreq_add_device);
895 * devfreq_remove_device() - Remove devfreq feature from a device.
896 * @devfreq: the devfreq instance to be removed
898 * The opposite of devfreq_add_device().
900 int devfreq_remove_device(struct devfreq *devfreq)
905 if (devfreq->governor)
906 devfreq->governor->event_handler(devfreq,
907 DEVFREQ_GOV_STOP, NULL);
908 device_unregister(&devfreq->dev);
912 EXPORT_SYMBOL(devfreq_remove_device);
914 static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
916 struct devfreq **r = res;
918 if (WARN_ON(!r || !*r))
924 static void devm_devfreq_dev_release(struct device *dev, void *res)
926 devfreq_remove_device(*(struct devfreq **)res);
930 * devm_devfreq_add_device() - Resource-managed devfreq_add_device()
931 * @dev: the device to add devfreq feature.
932 * @profile: device-specific profile to run devfreq.
933 * @governor_name: name of the policy to choose frequency.
934 * @data: private data for the governor. The devfreq framework does not
937 * This function manages automatically the memory of devfreq device using device
938 * resource management and simplify the free operation for memory of devfreq
941 struct devfreq *devm_devfreq_add_device(struct device *dev,
942 struct devfreq_dev_profile *profile,
943 const char *governor_name,
946 struct devfreq **ptr, *devfreq;
948 ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
950 return ERR_PTR(-ENOMEM);
952 devfreq = devfreq_add_device(dev, profile, governor_name, data);
953 if (IS_ERR(devfreq)) {
959 devres_add(dev, ptr);
963 EXPORT_SYMBOL(devm_devfreq_add_device);
967 * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
968 * @dev - instance to the given device
969 * @index - index into list of devfreq
971 * return the instance of devfreq device
973 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
975 struct device_node *node;
976 struct devfreq *devfreq;
979 return ERR_PTR(-EINVAL);
982 return ERR_PTR(-EINVAL);
984 node = of_parse_phandle(dev->of_node, "devfreq", index);
986 return ERR_PTR(-ENODEV);
988 mutex_lock(&devfreq_list_lock);
989 list_for_each_entry(devfreq, &devfreq_list, node) {
990 if (devfreq->dev.parent
991 && devfreq->dev.parent->of_node == node) {
992 mutex_unlock(&devfreq_list_lock);
997 mutex_unlock(&devfreq_list_lock);
1000 return ERR_PTR(-EPROBE_DEFER);
1003 struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
1005 return ERR_PTR(-ENODEV);
1007 #endif /* CONFIG_OF */
1008 EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
1011 * devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
1012 * @dev: the device from which to remove devfreq feature.
1013 * @devfreq: the devfreq instance to be removed
1015 void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
1017 WARN_ON(devres_release(dev, devm_devfreq_dev_release,
1018 devm_devfreq_dev_match, devfreq));
1020 EXPORT_SYMBOL(devm_devfreq_remove_device);
1023 * devfreq_suspend_device() - Suspend devfreq of a device.
1024 * @devfreq: the devfreq instance to be suspended
1026 * This function is intended to be called by the pm callbacks
1027 * (e.g., runtime_suspend, suspend) of the device driver that
1028 * holds the devfreq.
1030 int devfreq_suspend_device(struct devfreq *devfreq)
1037 if (atomic_inc_return(&devfreq->suspend_count) > 1)
1040 if (devfreq->governor) {
1041 ret = devfreq->governor->event_handler(devfreq,
1042 DEVFREQ_GOV_SUSPEND, NULL);
1047 if (devfreq->suspend_freq) {
1048 mutex_lock(&devfreq->lock);
1049 ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
1050 mutex_unlock(&devfreq->lock);
1057 EXPORT_SYMBOL(devfreq_suspend_device);
1060 * devfreq_resume_device() - Resume devfreq of a device.
1061 * @devfreq: the devfreq instance to be resumed
1063 * This function is intended to be called by the pm callbacks
1064 * (e.g., runtime_resume, resume) of the device driver that
1065 * holds the devfreq.
1067 int devfreq_resume_device(struct devfreq *devfreq)
1074 if (atomic_dec_return(&devfreq->suspend_count) >= 1)
1077 if (devfreq->resume_freq) {
1078 mutex_lock(&devfreq->lock);
1079 ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
1080 mutex_unlock(&devfreq->lock);
1085 if (devfreq->governor) {
1086 ret = devfreq->governor->event_handler(devfreq,
1087 DEVFREQ_GOV_RESUME, NULL);
1094 EXPORT_SYMBOL(devfreq_resume_device);
1097 * devfreq_suspend() - Suspend devfreq governors and devices
1099 * Called during system wide Suspend/Hibernate cycles for suspending governors
1100 * and devices preserving the state for resume. On some platforms the devfreq
1101 * device must have precise state (frequency) after resume in order to provide
1102 * fully operating setup.
1104 void devfreq_suspend(void)
1106 struct devfreq *devfreq;
1109 mutex_lock(&devfreq_list_lock);
1110 list_for_each_entry(devfreq, &devfreq_list, node) {
1111 ret = devfreq_suspend_device(devfreq);
1113 dev_err(&devfreq->dev,
1114 "failed to suspend devfreq device\n");
1116 mutex_unlock(&devfreq_list_lock);
1120 * devfreq_resume() - Resume devfreq governors and devices
1122 * Called during system wide Suspend/Hibernate cycle for resuming governors and
1123 * devices that are suspended with devfreq_suspend().
1125 void devfreq_resume(void)
1127 struct devfreq *devfreq;
1130 mutex_lock(&devfreq_list_lock);
1131 list_for_each_entry(devfreq, &devfreq_list, node) {
1132 ret = devfreq_resume_device(devfreq);
1134 dev_warn(&devfreq->dev,
1135 "failed to resume devfreq device\n");
1137 mutex_unlock(&devfreq_list_lock);
1141 * devfreq_add_governor() - Add devfreq governor
1142 * @governor: the devfreq governor to be added
1144 int devfreq_add_governor(struct devfreq_governor *governor)
1146 struct devfreq_governor *g;
1147 struct devfreq *devfreq;
1151 pr_err("%s: Invalid parameters.\n", __func__);
1155 mutex_lock(&devfreq_list_lock);
1156 g = find_devfreq_governor(governor->name);
1158 pr_err("%s: governor %s already registered\n", __func__,
1164 list_add(&governor->node, &devfreq_governor_list);
1166 list_for_each_entry(devfreq, &devfreq_list, node) {
1168 struct device *dev = devfreq->dev.parent;
1170 if (!strncmp(devfreq->governor_name, governor->name,
1171 DEVFREQ_NAME_LEN)) {
1172 /* The following should never occur */
1173 if (devfreq->governor) {
1175 "%s: Governor %s already present\n",
1176 __func__, devfreq->governor->name);
1177 ret = devfreq->governor->event_handler(devfreq,
1178 DEVFREQ_GOV_STOP, NULL);
1181 "%s: Governor %s stop = %d\n",
1183 devfreq->governor->name, ret);
1187 devfreq->governor = governor;
1188 ret = devfreq->governor->event_handler(devfreq,
1189 DEVFREQ_GOV_START, NULL);
1191 dev_warn(dev, "%s: Governor %s start=%d\n",
1192 __func__, devfreq->governor->name,
1199 mutex_unlock(&devfreq_list_lock);
1203 EXPORT_SYMBOL(devfreq_add_governor);
1206 * devfreq_remove_governor() - Remove devfreq feature from a device.
1207 * @governor: the devfreq governor to be removed
1209 int devfreq_remove_governor(struct devfreq_governor *governor)
1211 struct devfreq_governor *g;
1212 struct devfreq *devfreq;
1216 pr_err("%s: Invalid parameters.\n", __func__);
1220 mutex_lock(&devfreq_list_lock);
1221 g = find_devfreq_governor(governor->name);
1223 pr_err("%s: governor %s not registered\n", __func__,
1228 list_for_each_entry(devfreq, &devfreq_list, node) {
1230 struct device *dev = devfreq->dev.parent;
1232 if (!strncmp(devfreq->governor_name, governor->name,
1233 DEVFREQ_NAME_LEN)) {
1234 /* we should have a devfreq governor! */
1235 if (!devfreq->governor) {
1236 dev_warn(dev, "%s: Governor %s NOT present\n",
1237 __func__, governor->name);
1241 ret = devfreq->governor->event_handler(devfreq,
1242 DEVFREQ_GOV_STOP, NULL);
1244 dev_warn(dev, "%s: Governor %s stop=%d\n",
1245 __func__, devfreq->governor->name,
1248 devfreq->governor = NULL;
1252 list_del(&governor->node);
1254 mutex_unlock(&devfreq_list_lock);
1258 EXPORT_SYMBOL(devfreq_remove_governor);
1260 static ssize_t name_show(struct device *dev,
1261 struct device_attribute *attr, char *buf)
1263 struct devfreq *devfreq = to_devfreq(dev);
1264 return sprintf(buf, "%s\n", dev_name(devfreq->dev.parent));
1266 static DEVICE_ATTR_RO(name);
1268 static ssize_t governor_show(struct device *dev,
1269 struct device_attribute *attr, char *buf)
1271 if (!to_devfreq(dev)->governor)
1274 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
1277 static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
1278 const char *buf, size_t count)
1280 struct devfreq *df = to_devfreq(dev);
1282 char str_governor[DEVFREQ_NAME_LEN + 1];
1283 const struct devfreq_governor *governor, *prev_governor;
1285 ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
1289 mutex_lock(&devfreq_list_lock);
1290 governor = try_then_request_governor(str_governor);
1291 if (IS_ERR(governor)) {
1292 ret = PTR_ERR(governor);
1295 if (df->governor == governor) {
1298 } else if ((df->governor && df->governor->immutable) ||
1299 governor->immutable) {
1305 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
1307 dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
1308 __func__, df->governor->name, ret);
1312 prev_governor = df->governor;
1313 df->governor = governor;
1314 strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
1315 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1317 dev_warn(dev, "%s: Governor %s not started(%d)\n",
1318 __func__, df->governor->name, ret);
1319 df->governor = prev_governor;
1320 strncpy(df->governor_name, prev_governor->name,
1322 ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
1325 "%s: reverting to Governor %s failed (%d)\n",
1326 __func__, df->governor_name, ret);
1327 df->governor = NULL;
1331 mutex_unlock(&devfreq_list_lock);
1337 static DEVICE_ATTR_RW(governor);
1339 static ssize_t available_governors_show(struct device *d,
1340 struct device_attribute *attr,
1343 struct devfreq *df = to_devfreq(d);
1346 mutex_lock(&devfreq_list_lock);
1349 * The devfreq with immutable governor (e.g., passive) shows
1350 * only own governor.
1352 if (df->governor && df->governor->immutable) {
1353 count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
1354 "%s ", df->governor_name);
1356 * The devfreq device shows the registered governor except for
1357 * immutable governors such as passive governor .
1360 struct devfreq_governor *governor;
1362 list_for_each_entry(governor, &devfreq_governor_list, node) {
1363 if (governor->immutable)
1365 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1366 "%s ", governor->name);
1370 mutex_unlock(&devfreq_list_lock);
1372 /* Truncate the trailing space */
1376 count += sprintf(&buf[count], "\n");
1380 static DEVICE_ATTR_RO(available_governors);
1382 static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
1386 struct devfreq *devfreq = to_devfreq(dev);
1388 if (devfreq->profile->get_cur_freq &&
1389 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
1390 return sprintf(buf, "%lu\n", freq);
1392 return sprintf(buf, "%lu\n", devfreq->previous_freq);
1394 static DEVICE_ATTR_RO(cur_freq);
1396 static ssize_t target_freq_show(struct device *dev,
1397 struct device_attribute *attr, char *buf)
1399 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
1401 static DEVICE_ATTR_RO(target_freq);
1403 static ssize_t polling_interval_show(struct device *dev,
1404 struct device_attribute *attr, char *buf)
1406 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
1409 static ssize_t polling_interval_store(struct device *dev,
1410 struct device_attribute *attr,
1411 const char *buf, size_t count)
1413 struct devfreq *df = to_devfreq(dev);
1420 ret = sscanf(buf, "%u", &value);
1424 df->governor->event_handler(df, DEVFREQ_GOV_UPDATE_INTERVAL, &value);
1429 static DEVICE_ATTR_RW(polling_interval);
1431 static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
1432 const char *buf, size_t count)
1434 struct devfreq *df = to_devfreq(dev);
1435 unsigned long value;
1439 * Protect against theoretical sysfs writes between
1440 * device_add and dev_pm_qos_add_request
1442 if (!dev_pm_qos_request_active(&df->user_min_freq_req))
1445 ret = sscanf(buf, "%lu", &value);
1449 /* Round down to kHz for PM QoS */
1450 ret = dev_pm_qos_update_request(&df->user_min_freq_req,
1451 value / HZ_PER_KHZ);
1458 static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
1461 struct devfreq *df = to_devfreq(dev);
1462 unsigned long min_freq, max_freq;
1464 mutex_lock(&df->lock);
1465 get_freq_range(df, &min_freq, &max_freq);
1466 mutex_unlock(&df->lock);
1468 return sprintf(buf, "%lu\n", min_freq);
1470 static DEVICE_ATTR_RW(min_freq);
1472 static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
1473 const char *buf, size_t count)
1475 struct devfreq *df = to_devfreq(dev);
1476 unsigned long value;
1480 * Protect against theoretical sysfs writes between
1481 * device_add and dev_pm_qos_add_request
1483 if (!dev_pm_qos_request_active(&df->user_max_freq_req))
1486 ret = sscanf(buf, "%lu", &value);
1491 * PM QoS frequencies are in kHz so we need to convert. Convert by
1492 * rounding upwards so that the acceptable interval never shrinks.
1494 * For example if the user writes "666666666" to sysfs this value will
1495 * be converted to 666667 kHz and back to 666667000 Hz before an OPP
1496 * lookup, this ensures that an OPP of 666666666Hz is still accepted.
1498 * A value of zero means "no limit".
1501 value = DIV_ROUND_UP(value, HZ_PER_KHZ);
1503 value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
1505 ret = dev_pm_qos_update_request(&df->user_max_freq_req, value);
1512 static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
1515 struct devfreq *df = to_devfreq(dev);
1516 unsigned long min_freq, max_freq;
1518 mutex_lock(&df->lock);
1519 get_freq_range(df, &min_freq, &max_freq);
1520 mutex_unlock(&df->lock);
1522 return sprintf(buf, "%lu\n", max_freq);
1524 static DEVICE_ATTR_RW(max_freq);
1526 static ssize_t available_frequencies_show(struct device *d,
1527 struct device_attribute *attr,
1530 struct devfreq *df = to_devfreq(d);
1534 mutex_lock(&df->lock);
1536 for (i = 0; i < df->profile->max_state; i++)
1537 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
1538 "%lu ", df->profile->freq_table[i]);
1540 mutex_unlock(&df->lock);
1541 /* Truncate the trailing space */
1545 count += sprintf(&buf[count], "\n");
1549 static DEVICE_ATTR_RO(available_frequencies);
1551 static ssize_t trans_stat_show(struct device *dev,
1552 struct device_attribute *attr, char *buf)
1554 struct devfreq *devfreq = to_devfreq(dev);
1557 unsigned int max_state = devfreq->profile->max_state;
1560 return sprintf(buf, "Not Supported.\n");
1562 mutex_lock(&devfreq->lock);
1563 if (!devfreq->stop_polling &&
1564 devfreq_update_status(devfreq, devfreq->previous_freq)) {
1565 mutex_unlock(&devfreq->lock);
1568 mutex_unlock(&devfreq->lock);
1570 len = sprintf(buf, " From : To\n");
1571 len += sprintf(buf + len, " :");
1572 for (i = 0; i < max_state; i++)
1573 len += sprintf(buf + len, "%10lu",
1574 devfreq->profile->freq_table[i]);
1576 len += sprintf(buf + len, " time(ms)\n");
1578 for (i = 0; i < max_state; i++) {
1579 if (devfreq->profile->freq_table[i]
1580 == devfreq->previous_freq) {
1581 len += sprintf(buf + len, "*");
1583 len += sprintf(buf + len, " ");
1585 len += sprintf(buf + len, "%10lu:",
1586 devfreq->profile->freq_table[i]);
1587 for (j = 0; j < max_state; j++)
1588 len += sprintf(buf + len, "%10u",
1589 devfreq->stats.trans_table[(i * max_state) + j]);
1591 len += sprintf(buf + len, "%10llu\n", (u64)
1592 jiffies64_to_msecs(devfreq->stats.time_in_state[i]));
1595 len += sprintf(buf + len, "Total transition : %u\n",
1596 devfreq->stats.total_trans);
1600 static ssize_t trans_stat_store(struct device *dev,
1601 struct device_attribute *attr,
1602 const char *buf, size_t count)
1604 struct devfreq *df = to_devfreq(dev);
1607 if (df->profile->max_state == 0)
1610 err = kstrtoint(buf, 10, &value);
1611 if (err || value != 0)
1614 mutex_lock(&df->lock);
1615 memset(df->stats.time_in_state, 0, (df->profile->max_state *
1616 sizeof(*df->stats.time_in_state)));
1617 memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
1618 df->profile->max_state,
1619 df->profile->max_state));
1620 df->stats.total_trans = 0;
1621 df->stats.last_update = get_jiffies_64();
1622 mutex_unlock(&df->lock);
1626 static DEVICE_ATTR_RW(trans_stat);
1628 static struct attribute *devfreq_attrs[] = {
1629 &dev_attr_name.attr,
1630 &dev_attr_governor.attr,
1631 &dev_attr_available_governors.attr,
1632 &dev_attr_cur_freq.attr,
1633 &dev_attr_available_frequencies.attr,
1634 &dev_attr_target_freq.attr,
1635 &dev_attr_polling_interval.attr,
1636 &dev_attr_min_freq.attr,
1637 &dev_attr_max_freq.attr,
1638 &dev_attr_trans_stat.attr,
1641 ATTRIBUTE_GROUPS(devfreq);
1644 * devfreq_summary_show() - Show the summary of the devfreq devices
1645 * @s: seq_file instance to show the summary of devfreq devices
1648 * Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
1649 * It helps that user can know the detailed information of the devfreq devices.
1651 * Return 0 always because it shows the information without any data change.
1653 static int devfreq_summary_show(struct seq_file *s, void *data)
1655 struct devfreq *devfreq;
1656 struct devfreq *p_devfreq = NULL;
1657 unsigned long cur_freq, min_freq, max_freq;
1658 unsigned int polling_ms;
1660 seq_printf(s, "%-30s %-10s %-10s %-15s %10s %12s %12s %12s\n",
1669 seq_printf(s, "%30s %10s %10s %15s %10s %12s %12s %12s\n",
1670 "------------------------------",
1679 mutex_lock(&devfreq_list_lock);
1681 list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
1682 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
1683 if (!strncmp(devfreq->governor_name, DEVFREQ_GOV_PASSIVE,
1684 DEVFREQ_NAME_LEN)) {
1685 struct devfreq_passive_data *data = devfreq->data;
1688 p_devfreq = data->parent;
1694 mutex_lock(&devfreq->lock);
1695 cur_freq = devfreq->previous_freq,
1696 get_freq_range(devfreq, &min_freq, &max_freq);
1697 polling_ms = devfreq->profile->polling_ms,
1698 mutex_unlock(&devfreq->lock);
1701 "%-30s %-10s %-10s %-15s %10d %12ld %12ld %12ld\n",
1702 dev_name(devfreq->dev.parent),
1703 dev_name(&devfreq->dev),
1704 p_devfreq ? dev_name(&p_devfreq->dev) : "null",
1705 devfreq->governor_name,
1712 mutex_unlock(&devfreq_list_lock);
1716 DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
1718 static int __init devfreq_init(void)
1720 devfreq_class = class_create(THIS_MODULE, "devfreq");
1721 if (IS_ERR(devfreq_class)) {
1722 pr_err("%s: couldn't create class\n", __FILE__);
1723 return PTR_ERR(devfreq_class);
1726 devfreq_wq = create_freezable_workqueue("devfreq_wq");
1728 class_destroy(devfreq_class);
1729 pr_err("%s: couldn't create workqueue\n", __FILE__);
1732 devfreq_class->dev_groups = devfreq_groups;
1734 devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
1735 debugfs_create_file("devfreq_summary", 0444,
1736 devfreq_debugfs, NULL,
1737 &devfreq_summary_fops);
1741 subsys_initcall(devfreq_init);
1744 * The following are helper functions for devfreq user device drivers with
1749 * devfreq_recommended_opp() - Helper function to get proper OPP for the
1750 * freq value given to target callback.
1751 * @dev: The devfreq user device. (parent of devfreq)
1752 * @freq: The frequency given to target function
1753 * @flags: Flags handed from devfreq framework.
1755 * The callers are required to call dev_pm_opp_put() for the returned OPP after
1758 struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
1759 unsigned long *freq,
1762 struct dev_pm_opp *opp;
1764 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1765 /* The freq is an upper bound. opp should be lower */
1766 opp = dev_pm_opp_find_freq_floor(dev, freq);
1768 /* If not available, use the closest opp */
1769 if (opp == ERR_PTR(-ERANGE))
1770 opp = dev_pm_opp_find_freq_ceil(dev, freq);
1772 /* The freq is an lower bound. opp should be higher */
1773 opp = dev_pm_opp_find_freq_ceil(dev, freq);
1775 /* If not available, use the closest opp */
1776 if (opp == ERR_PTR(-ERANGE))
1777 opp = dev_pm_opp_find_freq_floor(dev, freq);
1782 EXPORT_SYMBOL(devfreq_recommended_opp);
1785 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1786 * for any changes in the OPP availability
1788 * @dev: The devfreq user device. (parent of devfreq)
1789 * @devfreq: The devfreq object.
1791 int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1793 return dev_pm_opp_register_notifier(dev, &devfreq->nb);
1795 EXPORT_SYMBOL(devfreq_register_opp_notifier);
1798 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1799 * notified for any changes in the OPP
1800 * availability changes anymore.
1801 * @dev: The devfreq user device. (parent of devfreq)
1802 * @devfreq: The devfreq object.
1804 * At exit() callback of devfreq_dev_profile, this must be included if
1805 * devfreq_recommended_opp is used.
1807 int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1809 return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
1811 EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
1813 static void devm_devfreq_opp_release(struct device *dev, void *res)
1815 devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
1819 * devm_devfreq_register_opp_notifier() - Resource-managed
1820 * devfreq_register_opp_notifier()
1821 * @dev: The devfreq user device. (parent of devfreq)
1822 * @devfreq: The devfreq object.
1824 int devm_devfreq_register_opp_notifier(struct device *dev,
1825 struct devfreq *devfreq)
1827 struct devfreq **ptr;
1830 ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
1834 ret = devfreq_register_opp_notifier(dev, devfreq);
1841 devres_add(dev, ptr);
1845 EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
1848 * devm_devfreq_unregister_opp_notifier() - Resource-managed
1849 * devfreq_unregister_opp_notifier()
1850 * @dev: The devfreq user device. (parent of devfreq)
1851 * @devfreq: The devfreq object.
1853 void devm_devfreq_unregister_opp_notifier(struct device *dev,
1854 struct devfreq *devfreq)
1856 WARN_ON(devres_release(dev, devm_devfreq_opp_release,
1857 devm_devfreq_dev_match, devfreq));
1859 EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
1862 * devfreq_register_notifier() - Register a driver with devfreq
1863 * @devfreq: The devfreq object.
1864 * @nb: The notifier block to register.
1865 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1867 int devfreq_register_notifier(struct devfreq *devfreq,
1868 struct notifier_block *nb,
1877 case DEVFREQ_TRANSITION_NOTIFIER:
1878 ret = srcu_notifier_chain_register(
1879 &devfreq->transition_notifier_list, nb);
1887 EXPORT_SYMBOL(devfreq_register_notifier);
1890 * devfreq_unregister_notifier() - Unregister a driver with devfreq
1891 * @devfreq: The devfreq object.
1892 * @nb: The notifier block to be unregistered.
1893 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1895 int devfreq_unregister_notifier(struct devfreq *devfreq,
1896 struct notifier_block *nb,
1905 case DEVFREQ_TRANSITION_NOTIFIER:
1906 ret = srcu_notifier_chain_unregister(
1907 &devfreq->transition_notifier_list, nb);
1915 EXPORT_SYMBOL(devfreq_unregister_notifier);
1917 struct devfreq_notifier_devres {
1918 struct devfreq *devfreq;
1919 struct notifier_block *nb;
1923 static void devm_devfreq_notifier_release(struct device *dev, void *res)
1925 struct devfreq_notifier_devres *this = res;
1927 devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
1931 * devm_devfreq_register_notifier()
1932 * - Resource-managed devfreq_register_notifier()
1933 * @dev: The devfreq user device. (parent of devfreq)
1934 * @devfreq: The devfreq object.
1935 * @nb: The notifier block to be unregistered.
1936 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1938 int devm_devfreq_register_notifier(struct device *dev,
1939 struct devfreq *devfreq,
1940 struct notifier_block *nb,
1943 struct devfreq_notifier_devres *ptr;
1946 ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
1951 ret = devfreq_register_notifier(devfreq, nb, list);
1957 ptr->devfreq = devfreq;
1960 devres_add(dev, ptr);
1964 EXPORT_SYMBOL(devm_devfreq_register_notifier);
1967 * devm_devfreq_unregister_notifier()
1968 * - Resource-managed devfreq_unregister_notifier()
1969 * @dev: The devfreq user device. (parent of devfreq)
1970 * @devfreq: The devfreq object.
1971 * @nb: The notifier block to be unregistered.
1972 * @list: DEVFREQ_TRANSITION_NOTIFIER.
1974 void devm_devfreq_unregister_notifier(struct device *dev,
1975 struct devfreq *devfreq,
1976 struct notifier_block *nb,
1979 WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
1980 devm_devfreq_dev_match, devfreq));
1982 EXPORT_SYMBOL(devm_devfreq_unregister_notifier);