2 * drivers/base/power/main.c - Where the driver meets power management.
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
7 * This file is released under the GPLv2
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/cpuidle.h>
36 #include <linux/timer.h>
41 typedef int (*pm_callback_t)(struct device *);
44 * The entries in the dpm_list list are in a depth first order, simply
45 * because children are guaranteed to be discovered after parents, and
46 * are inserted at the back of the list on discovery.
48 * Since device_pm_add() may be called with a device lock held,
49 * we must never try to acquire a device lock while holding
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
63 static int async_error;
65 static const char *pm_verb(int event)
68 case PM_EVENT_SUSPEND:
74 case PM_EVENT_QUIESCE:
76 case PM_EVENT_HIBERNATE:
80 case PM_EVENT_RESTORE:
82 case PM_EVENT_RECOVER:
85 return "(unknown PM event)";
90 * device_pm_sleep_init - Initialize system suspend-related device fields.
91 * @dev: Device object being initialized.
93 void device_pm_sleep_init(struct device *dev)
95 dev->power.is_prepared = false;
96 dev->power.is_suspended = false;
97 dev->power.is_noirq_suspended = false;
98 dev->power.is_late_suspended = false;
99 init_completion(&dev->power.completion);
100 complete_all(&dev->power.completion);
101 dev->power.wakeup = NULL;
102 INIT_LIST_HEAD(&dev->power.entry);
106 * device_pm_lock - Lock the list of active devices used by the PM core.
108 void device_pm_lock(void)
110 mutex_lock(&dpm_list_mtx);
114 * device_pm_unlock - Unlock the list of active devices used by the PM core.
116 void device_pm_unlock(void)
118 mutex_unlock(&dpm_list_mtx);
122 * device_pm_add - Add a device to the PM core's list of active devices.
123 * @dev: Device to add to the list.
125 void device_pm_add(struct device *dev)
127 pr_debug("PM: Adding info for %s:%s\n",
128 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129 device_pm_check_callbacks(dev);
130 mutex_lock(&dpm_list_mtx);
131 if (dev->parent && dev->parent->power.is_prepared)
132 dev_warn(dev, "parent %s should not be sleeping\n",
133 dev_name(dev->parent));
134 list_add_tail(&dev->power.entry, &dpm_list);
135 dev->power.in_dpm_list = true;
136 mutex_unlock(&dpm_list_mtx);
140 * device_pm_remove - Remove a device from the PM core's list of active devices.
141 * @dev: Device to be removed from the list.
143 void device_pm_remove(struct device *dev)
145 pr_debug("PM: Removing info for %s:%s\n",
146 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
147 complete_all(&dev->power.completion);
148 mutex_lock(&dpm_list_mtx);
149 list_del_init(&dev->power.entry);
150 dev->power.in_dpm_list = false;
151 mutex_unlock(&dpm_list_mtx);
152 device_wakeup_disable(dev);
153 pm_runtime_remove(dev);
154 device_pm_check_callbacks(dev);
158 * device_pm_move_before - Move device in the PM core's list of active devices.
159 * @deva: Device to move in dpm_list.
160 * @devb: Device @deva should come before.
162 void device_pm_move_before(struct device *deva, struct device *devb)
164 pr_debug("PM: Moving %s:%s before %s:%s\n",
165 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
166 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
167 /* Delete deva from dpm_list and reinsert before devb. */
168 list_move_tail(&deva->power.entry, &devb->power.entry);
172 * device_pm_move_after - Move device in the PM core's list of active devices.
173 * @deva: Device to move in dpm_list.
174 * @devb: Device @deva should come after.
176 void device_pm_move_after(struct device *deva, struct device *devb)
178 pr_debug("PM: Moving %s:%s after %s:%s\n",
179 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
180 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
181 /* Delete deva from dpm_list and reinsert after devb. */
182 list_move(&deva->power.entry, &devb->power.entry);
186 * device_pm_move_last - Move device to end of the PM core's list of devices.
187 * @dev: Device to move in dpm_list.
189 void device_pm_move_last(struct device *dev)
191 pr_debug("PM: Moving %s:%s to end of list\n",
192 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
193 list_move_tail(&dev->power.entry, &dpm_list);
196 static ktime_t initcall_debug_start(struct device *dev)
198 ktime_t calltime = 0;
200 if (pm_print_times_enabled) {
201 pr_info("calling %s+ @ %i, parent: %s\n",
202 dev_name(dev), task_pid_nr(current),
203 dev->parent ? dev_name(dev->parent) : "none");
204 calltime = ktime_get();
210 static void initcall_debug_report(struct device *dev, ktime_t calltime,
211 int error, pm_message_t state,
217 rettime = ktime_get();
218 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
220 if (pm_print_times_enabled) {
221 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
222 error, (unsigned long long)nsecs >> 10);
227 * dpm_wait - Wait for a PM operation to complete.
228 * @dev: Device to wait for.
229 * @async: If unset, wait only if the device's power.async_suspend flag is set.
231 static void dpm_wait(struct device *dev, bool async)
236 if (async || (pm_async_enabled && dev->power.async_suspend))
237 wait_for_completion(&dev->power.completion);
240 static int dpm_wait_fn(struct device *dev, void *async_ptr)
242 dpm_wait(dev, *((bool *)async_ptr));
246 static void dpm_wait_for_children(struct device *dev, bool async)
248 device_for_each_child(dev, &async, dpm_wait_fn);
251 static void dpm_wait_for_suppliers(struct device *dev, bool async)
253 struct device_link *link;
256 idx = device_links_read_lock();
259 * If the supplier goes away right after we've checked the link to it,
260 * we'll wait for its completion to change the state, but that's fine,
261 * because the only things that will block as a result are the SRCU
262 * callbacks freeing the link objects for the links in the list we're
265 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
266 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
267 dpm_wait(link->supplier, async);
269 device_links_read_unlock(idx);
272 static void dpm_wait_for_superior(struct device *dev, bool async)
274 dpm_wait(dev->parent, async);
275 dpm_wait_for_suppliers(dev, async);
278 static void dpm_wait_for_consumers(struct device *dev, bool async)
280 struct device_link *link;
283 idx = device_links_read_lock();
286 * The status of a device link can only be changed from "dormant" by a
287 * probe, but that cannot happen during system suspend/resume. In
288 * theory it can change to "dormant" at that time, but then it is
289 * reasonable to wait for the target device anyway (eg. if it goes
290 * away, it's better to wait for it to go away completely and then
291 * continue instead of trying to continue in parallel with its
294 list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
295 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
296 dpm_wait(link->consumer, async);
298 device_links_read_unlock(idx);
301 static void dpm_wait_for_subordinate(struct device *dev, bool async)
303 dpm_wait_for_children(dev, async);
304 dpm_wait_for_consumers(dev, async);
308 * pm_op - Return the PM operation appropriate for given PM event.
309 * @ops: PM operations to choose from.
310 * @state: PM transition of the system being carried out.
312 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
314 switch (state.event) {
315 #ifdef CONFIG_SUSPEND
316 case PM_EVENT_SUSPEND:
318 case PM_EVENT_RESUME:
320 #endif /* CONFIG_SUSPEND */
321 #ifdef CONFIG_HIBERNATE_CALLBACKS
322 case PM_EVENT_FREEZE:
323 case PM_EVENT_QUIESCE:
325 case PM_EVENT_HIBERNATE:
326 return ops->poweroff;
328 case PM_EVENT_RECOVER:
331 case PM_EVENT_RESTORE:
333 #endif /* CONFIG_HIBERNATE_CALLBACKS */
340 * pm_late_early_op - Return the PM operation appropriate for given PM event.
341 * @ops: PM operations to choose from.
342 * @state: PM transition of the system being carried out.
344 * Runtime PM is disabled for @dev while this function is being executed.
346 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
349 switch (state.event) {
350 #ifdef CONFIG_SUSPEND
351 case PM_EVENT_SUSPEND:
352 return ops->suspend_late;
353 case PM_EVENT_RESUME:
354 return ops->resume_early;
355 #endif /* CONFIG_SUSPEND */
356 #ifdef CONFIG_HIBERNATE_CALLBACKS
357 case PM_EVENT_FREEZE:
358 case PM_EVENT_QUIESCE:
359 return ops->freeze_late;
360 case PM_EVENT_HIBERNATE:
361 return ops->poweroff_late;
363 case PM_EVENT_RECOVER:
364 return ops->thaw_early;
365 case PM_EVENT_RESTORE:
366 return ops->restore_early;
367 #endif /* CONFIG_HIBERNATE_CALLBACKS */
374 * pm_noirq_op - Return the PM operation appropriate for given PM event.
375 * @ops: PM operations to choose from.
376 * @state: PM transition of the system being carried out.
378 * The driver of @dev will not receive interrupts while this function is being
381 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
383 switch (state.event) {
384 #ifdef CONFIG_SUSPEND
385 case PM_EVENT_SUSPEND:
386 return ops->suspend_noirq;
387 case PM_EVENT_RESUME:
388 return ops->resume_noirq;
389 #endif /* CONFIG_SUSPEND */
390 #ifdef CONFIG_HIBERNATE_CALLBACKS
391 case PM_EVENT_FREEZE:
392 case PM_EVENT_QUIESCE:
393 return ops->freeze_noirq;
394 case PM_EVENT_HIBERNATE:
395 return ops->poweroff_noirq;
397 case PM_EVENT_RECOVER:
398 return ops->thaw_noirq;
399 case PM_EVENT_RESTORE:
400 return ops->restore_noirq;
401 #endif /* CONFIG_HIBERNATE_CALLBACKS */
407 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
409 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
410 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
411 ", may wakeup" : "");
414 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
417 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
418 dev_name(dev), pm_verb(state.event), info, error);
421 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
428 calltime = ktime_get();
429 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
430 do_div(usecs64, NSEC_PER_USEC);
435 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
436 info ?: "", info ? " " : "", pm_verb(state.event),
437 error ? "aborted" : "complete",
438 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
441 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
442 pm_message_t state, const char *info)
450 calltime = initcall_debug_start(dev);
452 pm_dev_dbg(dev, state, info);
453 trace_device_pm_callback_start(dev, info, state.event);
455 trace_device_pm_callback_end(dev, error);
456 suspend_report_result(cb, error);
458 initcall_debug_report(dev, calltime, error, state, info);
463 #ifdef CONFIG_DPM_WATCHDOG
464 struct dpm_watchdog {
466 struct task_struct *tsk;
467 struct timer_list timer;
470 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
471 struct dpm_watchdog wd
474 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
475 * @data: Watchdog object address.
477 * Called when a driver has timed out suspending or resuming.
478 * There's not much we can do here to recover so panic() to
479 * capture a crash-dump in pstore.
481 static void dpm_watchdog_handler(struct timer_list *t)
483 struct dpm_watchdog *wd = from_timer(wd, t, timer);
485 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
486 show_stack(wd->tsk, NULL);
487 panic("%s %s: unrecoverable failure\n",
488 dev_driver_string(wd->dev), dev_name(wd->dev));
492 * dpm_watchdog_set - Enable pm watchdog for given device.
493 * @wd: Watchdog. Must be allocated on the stack.
494 * @dev: Device to handle.
496 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
498 struct timer_list *timer = &wd->timer;
503 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
504 /* use same timeout value for both suspend and resume */
505 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
510 * dpm_watchdog_clear - Disable suspend/resume watchdog.
511 * @wd: Watchdog to disable.
513 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
515 struct timer_list *timer = &wd->timer;
517 del_timer_sync(timer);
518 destroy_timer_on_stack(timer);
521 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
522 #define dpm_watchdog_set(x, y)
523 #define dpm_watchdog_clear(x)
526 /*------------------------- Resume routines -------------------------*/
529 * device_resume_noirq - Execute a "noirq resume" callback for given device.
530 * @dev: Device to handle.
531 * @state: PM transition of the system being carried out.
532 * @async: If true, the device is being resumed asynchronously.
534 * The driver of @dev will not receive interrupts while this function is being
537 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
539 pm_callback_t callback = NULL;
540 const char *info = NULL;
546 if (dev->power.syscore || dev->power.direct_complete)
549 if (!dev->power.is_noirq_suspended)
552 dpm_wait_for_superior(dev, async);
554 if (dev->pm_domain) {
555 info = "noirq power domain ";
556 callback = pm_noirq_op(&dev->pm_domain->ops, state);
557 } else if (dev->type && dev->type->pm) {
558 info = "noirq type ";
559 callback = pm_noirq_op(dev->type->pm, state);
560 } else if (dev->class && dev->class->pm) {
561 info = "noirq class ";
562 callback = pm_noirq_op(dev->class->pm, state);
563 } else if (dev->bus && dev->bus->pm) {
565 callback = pm_noirq_op(dev->bus->pm, state);
568 if (!callback && dev->driver && dev->driver->pm) {
569 info = "noirq driver ";
570 callback = pm_noirq_op(dev->driver->pm, state);
573 error = dpm_run_callback(callback, dev, state, info);
574 dev->power.is_noirq_suspended = false;
577 complete_all(&dev->power.completion);
582 static bool is_async(struct device *dev)
584 return dev->power.async_suspend && pm_async_enabled
585 && !pm_trace_is_enabled();
588 static void async_resume_noirq(void *data, async_cookie_t cookie)
590 struct device *dev = (struct device *)data;
593 error = device_resume_noirq(dev, pm_transition, true);
595 pm_dev_err(dev, pm_transition, " async", error);
600 void dpm_noirq_resume_devices(pm_message_t state)
603 ktime_t starttime = ktime_get();
605 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
606 mutex_lock(&dpm_list_mtx);
607 pm_transition = state;
610 * Advanced the async threads upfront,
611 * in case the starting of async threads is
612 * delayed by non-async resuming devices.
614 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
615 reinit_completion(&dev->power.completion);
618 async_schedule(async_resume_noirq, dev);
622 while (!list_empty(&dpm_noirq_list)) {
623 dev = to_device(dpm_noirq_list.next);
625 list_move_tail(&dev->power.entry, &dpm_late_early_list);
626 mutex_unlock(&dpm_list_mtx);
628 if (!is_async(dev)) {
631 error = device_resume_noirq(dev, state, false);
633 suspend_stats.failed_resume_noirq++;
634 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
635 dpm_save_failed_dev(dev_name(dev));
636 pm_dev_err(dev, state, " noirq", error);
640 mutex_lock(&dpm_list_mtx);
643 mutex_unlock(&dpm_list_mtx);
644 async_synchronize_full();
645 dpm_show_time(starttime, state, 0, "noirq");
646 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
649 void dpm_noirq_end(void)
651 resume_device_irqs();
652 device_wakeup_disarm_wake_irqs();
657 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
658 * @state: PM transition of the system being carried out.
660 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
661 * allow device drivers' interrupt handlers to be called.
663 void dpm_resume_noirq(pm_message_t state)
665 dpm_noirq_resume_devices(state);
670 * device_resume_early - Execute an "early resume" callback for given device.
671 * @dev: Device to handle.
672 * @state: PM transition of the system being carried out.
673 * @async: If true, the device is being resumed asynchronously.
675 * Runtime PM is disabled for @dev while this function is being executed.
677 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
679 pm_callback_t callback = NULL;
680 const char *info = NULL;
686 if (dev->power.syscore || dev->power.direct_complete)
689 if (!dev->power.is_late_suspended)
692 dpm_wait_for_superior(dev, async);
694 if (dev->pm_domain) {
695 info = "early power domain ";
696 callback = pm_late_early_op(&dev->pm_domain->ops, state);
697 } else if (dev->type && dev->type->pm) {
698 info = "early type ";
699 callback = pm_late_early_op(dev->type->pm, state);
700 } else if (dev->class && dev->class->pm) {
701 info = "early class ";
702 callback = pm_late_early_op(dev->class->pm, state);
703 } else if (dev->bus && dev->bus->pm) {
705 callback = pm_late_early_op(dev->bus->pm, state);
708 if (!callback && dev->driver && dev->driver->pm) {
709 info = "early driver ";
710 callback = pm_late_early_op(dev->driver->pm, state);
713 error = dpm_run_callback(callback, dev, state, info);
714 dev->power.is_late_suspended = false;
719 pm_runtime_enable(dev);
720 complete_all(&dev->power.completion);
724 static void async_resume_early(void *data, async_cookie_t cookie)
726 struct device *dev = (struct device *)data;
729 error = device_resume_early(dev, pm_transition, true);
731 pm_dev_err(dev, pm_transition, " async", error);
737 * dpm_resume_early - Execute "early resume" callbacks for all devices.
738 * @state: PM transition of the system being carried out.
740 void dpm_resume_early(pm_message_t state)
743 ktime_t starttime = ktime_get();
745 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
746 mutex_lock(&dpm_list_mtx);
747 pm_transition = state;
750 * Advanced the async threads upfront,
751 * in case the starting of async threads is
752 * delayed by non-async resuming devices.
754 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
755 reinit_completion(&dev->power.completion);
758 async_schedule(async_resume_early, dev);
762 while (!list_empty(&dpm_late_early_list)) {
763 dev = to_device(dpm_late_early_list.next);
765 list_move_tail(&dev->power.entry, &dpm_suspended_list);
766 mutex_unlock(&dpm_list_mtx);
768 if (!is_async(dev)) {
771 error = device_resume_early(dev, state, false);
773 suspend_stats.failed_resume_early++;
774 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
775 dpm_save_failed_dev(dev_name(dev));
776 pm_dev_err(dev, state, " early", error);
779 mutex_lock(&dpm_list_mtx);
782 mutex_unlock(&dpm_list_mtx);
783 async_synchronize_full();
784 dpm_show_time(starttime, state, 0, "early");
785 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
789 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
790 * @state: PM transition of the system being carried out.
792 void dpm_resume_start(pm_message_t state)
794 dpm_resume_noirq(state);
795 dpm_resume_early(state);
797 EXPORT_SYMBOL_GPL(dpm_resume_start);
800 * device_resume - Execute "resume" callbacks for given device.
801 * @dev: Device to handle.
802 * @state: PM transition of the system being carried out.
803 * @async: If true, the device is being resumed asynchronously.
805 static int device_resume(struct device *dev, pm_message_t state, bool async)
807 pm_callback_t callback = NULL;
808 const char *info = NULL;
810 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
815 if (dev->power.syscore)
818 if (dev->power.direct_complete) {
819 /* Match the pm_runtime_disable() in __device_suspend(). */
820 pm_runtime_enable(dev);
824 dpm_wait_for_superior(dev, async);
825 dpm_watchdog_set(&wd, dev);
829 * This is a fib. But we'll allow new children to be added below
830 * a resumed device, even if the device hasn't been completed yet.
832 dev->power.is_prepared = false;
834 if (!dev->power.is_suspended)
837 if (dev->pm_domain) {
838 info = "power domain ";
839 callback = pm_op(&dev->pm_domain->ops, state);
843 if (dev->type && dev->type->pm) {
845 callback = pm_op(dev->type->pm, state);
849 if (dev->class && dev->class->pm) {
851 callback = pm_op(dev->class->pm, state);
858 callback = pm_op(dev->bus->pm, state);
859 } else if (dev->bus->resume) {
860 info = "legacy bus ";
861 callback = dev->bus->resume;
867 if (!callback && dev->driver && dev->driver->pm) {
869 callback = pm_op(dev->driver->pm, state);
873 error = dpm_run_callback(callback, dev, state, info);
874 dev->power.is_suspended = false;
878 dpm_watchdog_clear(&wd);
881 complete_all(&dev->power.completion);
888 static void async_resume(void *data, async_cookie_t cookie)
890 struct device *dev = (struct device *)data;
893 error = device_resume(dev, pm_transition, true);
895 pm_dev_err(dev, pm_transition, " async", error);
900 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
901 * @state: PM transition of the system being carried out.
903 * Execute the appropriate "resume" callback for all devices whose status
904 * indicates that they are suspended.
906 void dpm_resume(pm_message_t state)
909 ktime_t starttime = ktime_get();
911 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
914 mutex_lock(&dpm_list_mtx);
915 pm_transition = state;
918 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
919 reinit_completion(&dev->power.completion);
922 async_schedule(async_resume, dev);
926 while (!list_empty(&dpm_suspended_list)) {
927 dev = to_device(dpm_suspended_list.next);
929 if (!is_async(dev)) {
932 mutex_unlock(&dpm_list_mtx);
934 error = device_resume(dev, state, false);
936 suspend_stats.failed_resume++;
937 dpm_save_failed_step(SUSPEND_RESUME);
938 dpm_save_failed_dev(dev_name(dev));
939 pm_dev_err(dev, state, "", error);
942 mutex_lock(&dpm_list_mtx);
944 if (!list_empty(&dev->power.entry))
945 list_move_tail(&dev->power.entry, &dpm_prepared_list);
948 mutex_unlock(&dpm_list_mtx);
949 async_synchronize_full();
950 dpm_show_time(starttime, state, 0, NULL);
953 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
957 * device_complete - Complete a PM transition for given device.
958 * @dev: Device to handle.
959 * @state: PM transition of the system being carried out.
961 static void device_complete(struct device *dev, pm_message_t state)
963 void (*callback)(struct device *) = NULL;
964 const char *info = NULL;
966 if (dev->power.syscore)
971 if (dev->pm_domain) {
972 info = "completing power domain ";
973 callback = dev->pm_domain->ops.complete;
974 } else if (dev->type && dev->type->pm) {
975 info = "completing type ";
976 callback = dev->type->pm->complete;
977 } else if (dev->class && dev->class->pm) {
978 info = "completing class ";
979 callback = dev->class->pm->complete;
980 } else if (dev->bus && dev->bus->pm) {
981 info = "completing bus ";
982 callback = dev->bus->pm->complete;
985 if (!callback && dev->driver && dev->driver->pm) {
986 info = "completing driver ";
987 callback = dev->driver->pm->complete;
991 pm_dev_dbg(dev, state, info);
1001 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1002 * @state: PM transition of the system being carried out.
1004 * Execute the ->complete() callbacks for all devices whose PM status is not
1005 * DPM_ON (this allows new devices to be registered).
1007 void dpm_complete(pm_message_t state)
1009 struct list_head list;
1011 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1014 INIT_LIST_HEAD(&list);
1015 mutex_lock(&dpm_list_mtx);
1016 while (!list_empty(&dpm_prepared_list)) {
1017 struct device *dev = to_device(dpm_prepared_list.prev);
1020 dev->power.is_prepared = false;
1021 list_move(&dev->power.entry, &list);
1022 mutex_unlock(&dpm_list_mtx);
1024 trace_device_pm_callback_start(dev, "", state.event);
1025 device_complete(dev, state);
1026 trace_device_pm_callback_end(dev, 0);
1028 mutex_lock(&dpm_list_mtx);
1031 list_splice(&list, &dpm_list);
1032 mutex_unlock(&dpm_list_mtx);
1034 /* Allow device probing and trigger re-probing of deferred devices */
1035 device_unblock_probing();
1036 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1040 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1041 * @state: PM transition of the system being carried out.
1043 * Execute "resume" callbacks for all devices and complete the PM transition of
1046 void dpm_resume_end(pm_message_t state)
1049 dpm_complete(state);
1051 EXPORT_SYMBOL_GPL(dpm_resume_end);
1054 /*------------------------- Suspend routines -------------------------*/
1057 * resume_event - Return a "resume" message for given "suspend" sleep state.
1058 * @sleep_state: PM message representing a sleep state.
1060 * Return a PM message representing the resume event corresponding to given
1063 static pm_message_t resume_event(pm_message_t sleep_state)
1065 switch (sleep_state.event) {
1066 case PM_EVENT_SUSPEND:
1068 case PM_EVENT_FREEZE:
1069 case PM_EVENT_QUIESCE:
1070 return PMSG_RECOVER;
1071 case PM_EVENT_HIBERNATE:
1072 return PMSG_RESTORE;
1078 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1079 * @dev: Device to handle.
1080 * @state: PM transition of the system being carried out.
1081 * @async: If true, the device is being suspended asynchronously.
1083 * The driver of @dev will not receive interrupts while this function is being
1086 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1088 pm_callback_t callback = NULL;
1089 const char *info = NULL;
1095 dpm_wait_for_subordinate(dev, async);
1100 if (pm_wakeup_pending()) {
1101 async_error = -EBUSY;
1105 if (dev->power.syscore || dev->power.direct_complete)
1108 if (dev->pm_domain) {
1109 info = "noirq power domain ";
1110 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1111 } else if (dev->type && dev->type->pm) {
1112 info = "noirq type ";
1113 callback = pm_noirq_op(dev->type->pm, state);
1114 } else if (dev->class && dev->class->pm) {
1115 info = "noirq class ";
1116 callback = pm_noirq_op(dev->class->pm, state);
1117 } else if (dev->bus && dev->bus->pm) {
1118 info = "noirq bus ";
1119 callback = pm_noirq_op(dev->bus->pm, state);
1122 if (!callback && dev->driver && dev->driver->pm) {
1123 info = "noirq driver ";
1124 callback = pm_noirq_op(dev->driver->pm, state);
1127 error = dpm_run_callback(callback, dev, state, info);
1129 dev->power.is_noirq_suspended = true;
1131 async_error = error;
1134 complete_all(&dev->power.completion);
1135 TRACE_SUSPEND(error);
1139 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1141 struct device *dev = (struct device *)data;
1144 error = __device_suspend_noirq(dev, pm_transition, true);
1146 dpm_save_failed_dev(dev_name(dev));
1147 pm_dev_err(dev, pm_transition, " async", error);
1153 static int device_suspend_noirq(struct device *dev)
1155 reinit_completion(&dev->power.completion);
1157 if (is_async(dev)) {
1159 async_schedule(async_suspend_noirq, dev);
1162 return __device_suspend_noirq(dev, pm_transition, false);
1165 void dpm_noirq_begin(void)
1168 device_wakeup_arm_wake_irqs();
1169 suspend_device_irqs();
1172 int dpm_noirq_suspend_devices(pm_message_t state)
1174 ktime_t starttime = ktime_get();
1177 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1178 mutex_lock(&dpm_list_mtx);
1179 pm_transition = state;
1182 while (!list_empty(&dpm_late_early_list)) {
1183 struct device *dev = to_device(dpm_late_early_list.prev);
1186 mutex_unlock(&dpm_list_mtx);
1188 error = device_suspend_noirq(dev);
1190 mutex_lock(&dpm_list_mtx);
1192 pm_dev_err(dev, state, " noirq", error);
1193 dpm_save_failed_dev(dev_name(dev));
1197 if (!list_empty(&dev->power.entry))
1198 list_move(&dev->power.entry, &dpm_noirq_list);
1204 mutex_unlock(&dpm_list_mtx);
1205 async_synchronize_full();
1207 error = async_error;
1210 suspend_stats.failed_suspend_noirq++;
1211 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1213 dpm_show_time(starttime, state, error, "noirq");
1214 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1219 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1220 * @state: PM transition of the system being carried out.
1222 * Prevent device drivers' interrupt handlers from being called and invoke
1223 * "noirq" suspend callbacks for all non-sysdev devices.
1225 int dpm_suspend_noirq(pm_message_t state)
1230 ret = dpm_noirq_suspend_devices(state);
1232 dpm_resume_noirq(resume_event(state));
1238 * __device_suspend_late - Execute a "late suspend" callback for given device.
1239 * @dev: Device to handle.
1240 * @state: PM transition of the system being carried out.
1241 * @async: If true, the device is being suspended asynchronously.
1243 * Runtime PM is disabled for @dev while this function is being executed.
1245 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1247 pm_callback_t callback = NULL;
1248 const char *info = NULL;
1254 __pm_runtime_disable(dev, false);
1256 dpm_wait_for_subordinate(dev, async);
1261 if (pm_wakeup_pending()) {
1262 async_error = -EBUSY;
1266 if (dev->power.syscore || dev->power.direct_complete)
1269 if (dev->pm_domain) {
1270 info = "late power domain ";
1271 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1272 } else if (dev->type && dev->type->pm) {
1273 info = "late type ";
1274 callback = pm_late_early_op(dev->type->pm, state);
1275 } else if (dev->class && dev->class->pm) {
1276 info = "late class ";
1277 callback = pm_late_early_op(dev->class->pm, state);
1278 } else if (dev->bus && dev->bus->pm) {
1280 callback = pm_late_early_op(dev->bus->pm, state);
1283 if (!callback && dev->driver && dev->driver->pm) {
1284 info = "late driver ";
1285 callback = pm_late_early_op(dev->driver->pm, state);
1288 error = dpm_run_callback(callback, dev, state, info);
1290 dev->power.is_late_suspended = true;
1292 async_error = error;
1295 TRACE_SUSPEND(error);
1296 complete_all(&dev->power.completion);
1300 static void async_suspend_late(void *data, async_cookie_t cookie)
1302 struct device *dev = (struct device *)data;
1305 error = __device_suspend_late(dev, pm_transition, true);
1307 dpm_save_failed_dev(dev_name(dev));
1308 pm_dev_err(dev, pm_transition, " async", error);
1313 static int device_suspend_late(struct device *dev)
1315 reinit_completion(&dev->power.completion);
1317 if (is_async(dev)) {
1319 async_schedule(async_suspend_late, dev);
1323 return __device_suspend_late(dev, pm_transition, false);
1327 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1328 * @state: PM transition of the system being carried out.
1330 int dpm_suspend_late(pm_message_t state)
1332 ktime_t starttime = ktime_get();
1335 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1336 mutex_lock(&dpm_list_mtx);
1337 pm_transition = state;
1340 while (!list_empty(&dpm_suspended_list)) {
1341 struct device *dev = to_device(dpm_suspended_list.prev);
1344 mutex_unlock(&dpm_list_mtx);
1346 error = device_suspend_late(dev);
1348 mutex_lock(&dpm_list_mtx);
1349 if (!list_empty(&dev->power.entry))
1350 list_move(&dev->power.entry, &dpm_late_early_list);
1353 pm_dev_err(dev, state, " late", error);
1354 dpm_save_failed_dev(dev_name(dev));
1363 mutex_unlock(&dpm_list_mtx);
1364 async_synchronize_full();
1366 error = async_error;
1368 suspend_stats.failed_suspend_late++;
1369 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1370 dpm_resume_early(resume_event(state));
1372 dpm_show_time(starttime, state, error, "late");
1373 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1378 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1379 * @state: PM transition of the system being carried out.
1381 int dpm_suspend_end(pm_message_t state)
1383 int error = dpm_suspend_late(state);
1387 error = dpm_suspend_noirq(state);
1389 dpm_resume_early(resume_event(state));
1395 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1398 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1399 * @dev: Device to suspend.
1400 * @state: PM transition of the system being carried out.
1401 * @cb: Suspend callback to execute.
1402 * @info: string description of caller.
1404 static int legacy_suspend(struct device *dev, pm_message_t state,
1405 int (*cb)(struct device *dev, pm_message_t state),
1411 calltime = initcall_debug_start(dev);
1413 trace_device_pm_callback_start(dev, info, state.event);
1414 error = cb(dev, state);
1415 trace_device_pm_callback_end(dev, error);
1416 suspend_report_result(cb, error);
1418 initcall_debug_report(dev, calltime, error, state, info);
1423 static void dpm_clear_suppliers_direct_complete(struct device *dev)
1425 struct device_link *link;
1428 idx = device_links_read_lock();
1430 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1431 spin_lock_irq(&link->supplier->power.lock);
1432 link->supplier->power.direct_complete = false;
1433 spin_unlock_irq(&link->supplier->power.lock);
1436 device_links_read_unlock(idx);
1440 * __device_suspend - Execute "suspend" callbacks for given device.
1441 * @dev: Device to handle.
1442 * @state: PM transition of the system being carried out.
1443 * @async: If true, the device is being suspended asynchronously.
1445 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1447 pm_callback_t callback = NULL;
1448 const char *info = NULL;
1450 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1455 dpm_wait_for_subordinate(dev, async);
1461 * If a device configured to wake up the system from sleep states
1462 * has been suspended at run time and there's a resume request pending
1463 * for it, this is equivalent to the device signaling wakeup, so the
1464 * system suspend operation should be aborted.
1466 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1467 pm_wakeup_event(dev, 0);
1469 if (pm_wakeup_pending()) {
1470 async_error = -EBUSY;
1474 if (dev->power.syscore)
1477 if (dev->power.direct_complete) {
1478 if (pm_runtime_status_suspended(dev)) {
1479 pm_runtime_disable(dev);
1480 if (pm_runtime_status_suspended(dev))
1483 pm_runtime_enable(dev);
1485 dev->power.direct_complete = false;
1488 dpm_watchdog_set(&wd, dev);
1491 if (dev->pm_domain) {
1492 info = "power domain ";
1493 callback = pm_op(&dev->pm_domain->ops, state);
1497 if (dev->type && dev->type->pm) {
1499 callback = pm_op(dev->type->pm, state);
1503 if (dev->class && dev->class->pm) {
1505 callback = pm_op(dev->class->pm, state);
1512 callback = pm_op(dev->bus->pm, state);
1513 } else if (dev->bus->suspend) {
1514 pm_dev_dbg(dev, state, "legacy bus ");
1515 error = legacy_suspend(dev, state, dev->bus->suspend,
1522 if (!callback && dev->driver && dev->driver->pm) {
1524 callback = pm_op(dev->driver->pm, state);
1527 error = dpm_run_callback(callback, dev, state, info);
1531 struct device *parent = dev->parent;
1533 dev->power.is_suspended = true;
1535 spin_lock_irq(&parent->power.lock);
1537 dev->parent->power.direct_complete = false;
1538 if (dev->power.wakeup_path
1539 && !dev->parent->power.ignore_children)
1540 dev->parent->power.wakeup_path = true;
1542 spin_unlock_irq(&parent->power.lock);
1544 dpm_clear_suppliers_direct_complete(dev);
1548 dpm_watchdog_clear(&wd);
1552 async_error = error;
1554 complete_all(&dev->power.completion);
1555 TRACE_SUSPEND(error);
1559 static void async_suspend(void *data, async_cookie_t cookie)
1561 struct device *dev = (struct device *)data;
1564 error = __device_suspend(dev, pm_transition, true);
1566 dpm_save_failed_dev(dev_name(dev));
1567 pm_dev_err(dev, pm_transition, " async", error);
1573 static int device_suspend(struct device *dev)
1575 reinit_completion(&dev->power.completion);
1577 if (is_async(dev)) {
1579 async_schedule(async_suspend, dev);
1583 return __device_suspend(dev, pm_transition, false);
1587 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1588 * @state: PM transition of the system being carried out.
1590 int dpm_suspend(pm_message_t state)
1592 ktime_t starttime = ktime_get();
1595 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1600 mutex_lock(&dpm_list_mtx);
1601 pm_transition = state;
1603 while (!list_empty(&dpm_prepared_list)) {
1604 struct device *dev = to_device(dpm_prepared_list.prev);
1607 mutex_unlock(&dpm_list_mtx);
1609 error = device_suspend(dev);
1611 mutex_lock(&dpm_list_mtx);
1613 pm_dev_err(dev, state, "", error);
1614 dpm_save_failed_dev(dev_name(dev));
1618 if (!list_empty(&dev->power.entry))
1619 list_move(&dev->power.entry, &dpm_suspended_list);
1624 mutex_unlock(&dpm_list_mtx);
1625 async_synchronize_full();
1627 error = async_error;
1629 suspend_stats.failed_suspend++;
1630 dpm_save_failed_step(SUSPEND_SUSPEND);
1632 dpm_show_time(starttime, state, error, NULL);
1633 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1638 * device_prepare - Prepare a device for system power transition.
1639 * @dev: Device to handle.
1640 * @state: PM transition of the system being carried out.
1642 * Execute the ->prepare() callback(s) for given device. No new children of the
1643 * device may be registered after this function has returned.
1645 static int device_prepare(struct device *dev, pm_message_t state)
1647 int (*callback)(struct device *) = NULL;
1650 if (dev->power.syscore)
1653 WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
1654 !pm_runtime_enabled(dev));
1657 * If a device's parent goes into runtime suspend at the wrong time,
1658 * it won't be possible to resume the device. To prevent this we
1659 * block runtime suspend here, during the prepare phase, and allow
1660 * it again during the complete phase.
1662 pm_runtime_get_noresume(dev);
1666 dev->power.wakeup_path = device_may_wakeup(dev);
1668 if (dev->power.no_pm_callbacks) {
1669 ret = 1; /* Let device go direct_complete */
1674 callback = dev->pm_domain->ops.prepare;
1675 else if (dev->type && dev->type->pm)
1676 callback = dev->type->pm->prepare;
1677 else if (dev->class && dev->class->pm)
1678 callback = dev->class->pm->prepare;
1679 else if (dev->bus && dev->bus->pm)
1680 callback = dev->bus->pm->prepare;
1682 if (!callback && dev->driver && dev->driver->pm)
1683 callback = dev->driver->pm->prepare;
1686 ret = callback(dev);
1692 suspend_report_result(callback, ret);
1693 pm_runtime_put(dev);
1697 * A positive return value from ->prepare() means "this device appears
1698 * to be runtime-suspended and its state is fine, so if it really is
1699 * runtime-suspended, you can leave it in that state provided that you
1700 * will do the same thing with all of its descendants". This only
1701 * applies to suspend transitions, however.
1703 spin_lock_irq(&dev->power.lock);
1704 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1705 pm_runtime_suspended(dev) && ret > 0 &&
1706 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1707 spin_unlock_irq(&dev->power.lock);
1712 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1713 * @state: PM transition of the system being carried out.
1715 * Execute the ->prepare() callback(s) for all devices.
1717 int dpm_prepare(pm_message_t state)
1721 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1725 * Give a chance for the known devices to complete their probes, before
1726 * disable probing of devices. This sync point is important at least
1727 * at boot time + hibernation restore.
1729 wait_for_device_probe();
1731 * It is unsafe if probing of devices will happen during suspend or
1732 * hibernation and system behavior will be unpredictable in this case.
1733 * So, let's prohibit device's probing here and defer their probes
1734 * instead. The normal behavior will be restored in dpm_complete().
1736 device_block_probing();
1738 mutex_lock(&dpm_list_mtx);
1739 while (!list_empty(&dpm_list)) {
1740 struct device *dev = to_device(dpm_list.next);
1743 mutex_unlock(&dpm_list_mtx);
1745 trace_device_pm_callback_start(dev, "", state.event);
1746 error = device_prepare(dev, state);
1747 trace_device_pm_callback_end(dev, error);
1749 mutex_lock(&dpm_list_mtx);
1751 if (error == -EAGAIN) {
1756 printk(KERN_INFO "PM: Device %s not prepared "
1757 "for power transition: code %d\n",
1758 dev_name(dev), error);
1762 dev->power.is_prepared = true;
1763 if (!list_empty(&dev->power.entry))
1764 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1767 mutex_unlock(&dpm_list_mtx);
1768 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1773 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1774 * @state: PM transition of the system being carried out.
1776 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1777 * callbacks for them.
1779 int dpm_suspend_start(pm_message_t state)
1783 error = dpm_prepare(state);
1785 suspend_stats.failed_prepare++;
1786 dpm_save_failed_step(SUSPEND_PREPARE);
1788 error = dpm_suspend(state);
1791 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1793 void __suspend_report_result(const char *function, void *fn, int ret)
1796 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1798 EXPORT_SYMBOL_GPL(__suspend_report_result);
1801 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1802 * @dev: Device to wait for.
1803 * @subordinate: Device that needs to wait for @dev.
1805 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1807 dpm_wait(dev, subordinate->power.async_suspend);
1810 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1813 * dpm_for_each_dev - device iterator.
1814 * @data: data for the callback.
1815 * @fn: function to be called for each device.
1817 * Iterate over devices in dpm_list, and call @fn for each device,
1820 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1828 list_for_each_entry(dev, &dpm_list, power.entry)
1832 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1834 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1839 return !ops->prepare &&
1841 !ops->suspend_late &&
1842 !ops->suspend_noirq &&
1843 !ops->resume_noirq &&
1844 !ops->resume_early &&
1849 void device_pm_check_callbacks(struct device *dev)
1851 spin_lock_irq(&dev->power.lock);
1852 dev->power.no_pm_callbacks =
1853 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1854 !dev->bus->suspend && !dev->bus->resume)) &&
1855 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1856 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1857 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1858 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1859 !dev->driver->suspend && !dev->driver->resume));
1860 spin_unlock_irq(&dev->power.lock);
1863 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
1865 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
1866 pm_runtime_status_suspended(dev);