1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/main.c - Where the driver meets power management.
5 * Copyright (c) 2003 Patrick Mochel
6 * Copyright (c) 2003 Open Source Development Lab
8 * The driver model core calls device_pm_add() when a device is registered.
9 * This will initialize the embedded device_pm_info object in the device
10 * and add it to the list of power-controlled devices. sysfs entries for
11 * controlling device power management will also be added.
13 * A separate list is used for keeping track of power info, because the power
14 * domain dependencies may differ from the ancestral dependencies that the
15 * subsystem list maintains.
18 #define pr_fmt(fmt) "PM: " fmt
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
41 typedef int (*pm_callback_t)(struct device *);
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44 list_for_each_entry_rcu(pos, head, member, \
45 device_links_read_lock_held())
48 * The entries in the dpm_list list are in a depth first order, simply
49 * because children are guaranteed to be discovered after parents, and
50 * are inserted at the back of the list on discovery.
52 * Since device_pm_add() may be called with a device lock held,
53 * we must never try to acquire a device lock while holding
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
63 struct suspend_stats suspend_stats;
64 static DEFINE_MUTEX(dpm_list_mtx);
65 static pm_message_t pm_transition;
67 static int async_error;
69 static const char *pm_verb(int event)
72 case PM_EVENT_SUSPEND:
78 case PM_EVENT_QUIESCE:
80 case PM_EVENT_HIBERNATE:
84 case PM_EVENT_RESTORE:
86 case PM_EVENT_RECOVER:
89 return "(unknown PM event)";
94 * device_pm_sleep_init - Initialize system suspend-related device fields.
95 * @dev: Device object being initialized.
97 void device_pm_sleep_init(struct device *dev)
99 dev->power.is_prepared = false;
100 dev->power.is_suspended = false;
101 dev->power.is_noirq_suspended = false;
102 dev->power.is_late_suspended = false;
103 init_completion(&dev->power.completion);
104 complete_all(&dev->power.completion);
105 dev->power.wakeup = NULL;
106 INIT_LIST_HEAD(&dev->power.entry);
110 * device_pm_lock - Lock the list of active devices used by the PM core.
112 void device_pm_lock(void)
114 mutex_lock(&dpm_list_mtx);
118 * device_pm_unlock - Unlock the list of active devices used by the PM core.
120 void device_pm_unlock(void)
122 mutex_unlock(&dpm_list_mtx);
126 * device_pm_add - Add a device to the PM core's list of active devices.
127 * @dev: Device to add to the list.
129 void device_pm_add(struct device *dev)
131 /* Skip PM setup/initialization. */
132 if (device_pm_not_required(dev))
135 pr_debug("Adding info for %s:%s\n",
136 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137 device_pm_check_callbacks(dev);
138 mutex_lock(&dpm_list_mtx);
139 if (dev->parent && dev->parent->power.is_prepared)
140 dev_warn(dev, "parent %s should not be sleeping\n",
141 dev_name(dev->parent));
142 list_add_tail(&dev->power.entry, &dpm_list);
143 dev->power.in_dpm_list = true;
144 mutex_unlock(&dpm_list_mtx);
148 * device_pm_remove - Remove a device from the PM core's list of active devices.
149 * @dev: Device to be removed from the list.
151 void device_pm_remove(struct device *dev)
153 if (device_pm_not_required(dev))
156 pr_debug("Removing info for %s:%s\n",
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 complete_all(&dev->power.completion);
159 mutex_lock(&dpm_list_mtx);
160 list_del_init(&dev->power.entry);
161 dev->power.in_dpm_list = false;
162 mutex_unlock(&dpm_list_mtx);
163 device_wakeup_disable(dev);
164 pm_runtime_remove(dev);
165 device_pm_check_callbacks(dev);
169 * device_pm_move_before - Move device in the PM core's list of active devices.
170 * @deva: Device to move in dpm_list.
171 * @devb: Device @deva should come before.
173 void device_pm_move_before(struct device *deva, struct device *devb)
175 pr_debug("Moving %s:%s before %s:%s\n",
176 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178 /* Delete deva from dpm_list and reinsert before devb. */
179 list_move_tail(&deva->power.entry, &devb->power.entry);
183 * device_pm_move_after - Move device in the PM core's list of active devices.
184 * @deva: Device to move in dpm_list.
185 * @devb: Device @deva should come after.
187 void device_pm_move_after(struct device *deva, struct device *devb)
189 pr_debug("Moving %s:%s after %s:%s\n",
190 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192 /* Delete deva from dpm_list and reinsert after devb. */
193 list_move(&deva->power.entry, &devb->power.entry);
197 * device_pm_move_last - Move device to end of the PM core's list of devices.
198 * @dev: Device to move in dpm_list.
200 void device_pm_move_last(struct device *dev)
202 pr_debug("Moving %s:%s to end of list\n",
203 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204 list_move_tail(&dev->power.entry, &dpm_list);
207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
209 if (!pm_print_times_enabled)
212 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213 task_pid_nr(current),
214 dev->parent ? dev_name(dev->parent) : "none");
218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
224 if (!pm_print_times_enabled)
227 rettime = ktime_get();
228 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
230 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
231 (unsigned long long)nsecs >> 10);
235 * dpm_wait - Wait for a PM operation to complete.
236 * @dev: Device to wait for.
237 * @async: If unset, wait only if the device's power.async_suspend flag is set.
239 static void dpm_wait(struct device *dev, bool async)
244 if (async || (pm_async_enabled && dev->power.async_suspend))
245 wait_for_completion(&dev->power.completion);
248 static int dpm_wait_fn(struct device *dev, void *async_ptr)
250 dpm_wait(dev, *((bool *)async_ptr));
254 static void dpm_wait_for_children(struct device *dev, bool async)
256 device_for_each_child(dev, &async, dpm_wait_fn);
259 static void dpm_wait_for_suppliers(struct device *dev, bool async)
261 struct device_link *link;
264 idx = device_links_read_lock();
267 * If the supplier goes away right after we've checked the link to it,
268 * we'll wait for its completion to change the state, but that's fine,
269 * because the only things that will block as a result are the SRCU
270 * callbacks freeing the link objects for the links in the list we're
273 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
274 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
275 dpm_wait(link->supplier, async);
277 device_links_read_unlock(idx);
280 static bool dpm_wait_for_superior(struct device *dev, bool async)
282 struct device *parent;
285 * If the device is resumed asynchronously and the parent's callback
286 * deletes both the device and the parent itself, the parent object may
287 * be freed while this function is running, so avoid that by reference
288 * counting the parent once more unless the device has been deleted
289 * already (in which case return right away).
291 mutex_lock(&dpm_list_mtx);
293 if (!device_pm_initialized(dev)) {
294 mutex_unlock(&dpm_list_mtx);
298 parent = get_device(dev->parent);
300 mutex_unlock(&dpm_list_mtx);
302 dpm_wait(parent, async);
305 dpm_wait_for_suppliers(dev, async);
308 * If the parent's callback has deleted the device, attempting to resume
309 * it would be invalid, so avoid doing that then.
311 return device_pm_initialized(dev);
314 static void dpm_wait_for_consumers(struct device *dev, bool async)
316 struct device_link *link;
319 idx = device_links_read_lock();
322 * The status of a device link can only be changed from "dormant" by a
323 * probe, but that cannot happen during system suspend/resume. In
324 * theory it can change to "dormant" at that time, but then it is
325 * reasonable to wait for the target device anyway (eg. if it goes
326 * away, it's better to wait for it to go away completely and then
327 * continue instead of trying to continue in parallel with its
330 list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
331 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
332 dpm_wait(link->consumer, async);
334 device_links_read_unlock(idx);
337 static void dpm_wait_for_subordinate(struct device *dev, bool async)
339 dpm_wait_for_children(dev, async);
340 dpm_wait_for_consumers(dev, async);
344 * pm_op - Return the PM operation appropriate for given PM event.
345 * @ops: PM operations to choose from.
346 * @state: PM transition of the system being carried out.
348 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
350 switch (state.event) {
351 #ifdef CONFIG_SUSPEND
352 case PM_EVENT_SUSPEND:
354 case PM_EVENT_RESUME:
356 #endif /* CONFIG_SUSPEND */
357 #ifdef CONFIG_HIBERNATE_CALLBACKS
358 case PM_EVENT_FREEZE:
359 case PM_EVENT_QUIESCE:
361 case PM_EVENT_HIBERNATE:
362 return ops->poweroff;
364 case PM_EVENT_RECOVER:
367 case PM_EVENT_RESTORE:
369 #endif /* CONFIG_HIBERNATE_CALLBACKS */
376 * pm_late_early_op - Return the PM operation appropriate for given PM event.
377 * @ops: PM operations to choose from.
378 * @state: PM transition of the system being carried out.
380 * Runtime PM is disabled for @dev while this function is being executed.
382 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
385 switch (state.event) {
386 #ifdef CONFIG_SUSPEND
387 case PM_EVENT_SUSPEND:
388 return ops->suspend_late;
389 case PM_EVENT_RESUME:
390 return ops->resume_early;
391 #endif /* CONFIG_SUSPEND */
392 #ifdef CONFIG_HIBERNATE_CALLBACKS
393 case PM_EVENT_FREEZE:
394 case PM_EVENT_QUIESCE:
395 return ops->freeze_late;
396 case PM_EVENT_HIBERNATE:
397 return ops->poweroff_late;
399 case PM_EVENT_RECOVER:
400 return ops->thaw_early;
401 case PM_EVENT_RESTORE:
402 return ops->restore_early;
403 #endif /* CONFIG_HIBERNATE_CALLBACKS */
410 * pm_noirq_op - Return the PM operation appropriate for given PM event.
411 * @ops: PM operations to choose from.
412 * @state: PM transition of the system being carried out.
414 * The driver of @dev will not receive interrupts while this function is being
417 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
419 switch (state.event) {
420 #ifdef CONFIG_SUSPEND
421 case PM_EVENT_SUSPEND:
422 return ops->suspend_noirq;
423 case PM_EVENT_RESUME:
424 return ops->resume_noirq;
425 #endif /* CONFIG_SUSPEND */
426 #ifdef CONFIG_HIBERNATE_CALLBACKS
427 case PM_EVENT_FREEZE:
428 case PM_EVENT_QUIESCE:
429 return ops->freeze_noirq;
430 case PM_EVENT_HIBERNATE:
431 return ops->poweroff_noirq;
433 case PM_EVENT_RECOVER:
434 return ops->thaw_noirq;
435 case PM_EVENT_RESTORE:
436 return ops->restore_noirq;
437 #endif /* CONFIG_HIBERNATE_CALLBACKS */
443 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
445 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
446 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
447 ", may wakeup" : "");
450 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
453 pr_err("Device %s failed to %s%s: error %d\n",
454 dev_name(dev), pm_verb(state.event), info, error);
457 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
464 calltime = ktime_get();
465 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
466 do_div(usecs64, NSEC_PER_USEC);
471 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
472 info ?: "", info ? " " : "", pm_verb(state.event),
473 error ? "aborted" : "complete",
474 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
477 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
478 pm_message_t state, const char *info)
486 calltime = initcall_debug_start(dev, cb);
488 pm_dev_dbg(dev, state, info);
489 trace_device_pm_callback_start(dev, info, state.event);
491 trace_device_pm_callback_end(dev, error);
492 suspend_report_result(cb, error);
494 initcall_debug_report(dev, calltime, cb, error);
499 #ifdef CONFIG_DPM_WATCHDOG
500 struct dpm_watchdog {
502 struct task_struct *tsk;
503 struct timer_list timer;
506 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
507 struct dpm_watchdog wd
510 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
511 * @t: The timer that PM watchdog depends on.
513 * Called when a driver has timed out suspending or resuming.
514 * There's not much we can do here to recover so panic() to
515 * capture a crash-dump in pstore.
517 static void dpm_watchdog_handler(struct timer_list *t)
519 struct dpm_watchdog *wd = from_timer(wd, t, timer);
521 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
522 show_stack(wd->tsk, NULL);
523 panic("%s %s: unrecoverable failure\n",
524 dev_driver_string(wd->dev), dev_name(wd->dev));
528 * dpm_watchdog_set - Enable pm watchdog for given device.
529 * @wd: Watchdog. Must be allocated on the stack.
530 * @dev: Device to handle.
532 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
534 struct timer_list *timer = &wd->timer;
539 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
540 /* use same timeout value for both suspend and resume */
541 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
546 * dpm_watchdog_clear - Disable suspend/resume watchdog.
547 * @wd: Watchdog to disable.
549 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
551 struct timer_list *timer = &wd->timer;
553 del_timer_sync(timer);
554 destroy_timer_on_stack(timer);
557 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
558 #define dpm_watchdog_set(x, y)
559 #define dpm_watchdog_clear(x)
562 /*------------------------- Resume routines -------------------------*/
565 * dev_pm_may_skip_resume - System-wide device resume optimization check.
566 * @dev: Target device.
569 * - %false if the transition under way is RESTORE.
570 * - The return value of dev_pm_smart_suspend_and_suspended() if the transition
572 * - The logical negation of %power.must_resume otherwise (that is, when the
573 * transition under way is RESUME).
575 bool dev_pm_may_skip_resume(struct device *dev)
577 if (pm_transition.event == PM_EVENT_RESTORE)
580 if (pm_transition.event == PM_EVENT_THAW)
581 return dev_pm_smart_suspend_and_suspended(dev);
583 return !dev->power.must_resume;
587 * device_resume_noirq - Execute a "noirq resume" callback for given device.
588 * @dev: Device to handle.
589 * @state: PM transition of the system being carried out.
590 * @async: If true, the device is being resumed asynchronously.
592 * The driver of @dev will not receive interrupts while this function is being
595 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
597 pm_callback_t callback = NULL;
598 const char *info = NULL;
605 if (dev->power.syscore || dev->power.direct_complete)
608 if (!dev->power.is_noirq_suspended)
611 if (!dpm_wait_for_superior(dev, async))
614 skip_resume = dev_pm_may_skip_resume(dev);
616 * If the driver callback is skipped below or by the middle layer
617 * callback and device_resume_early() also skips the driver callback for
618 * this device later, it needs to appear as "suspended" to PM-runtime,
619 * so change its status accordingly.
621 * Otherwise, the device is going to be resumed, so set its PM-runtime
622 * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
623 * to avoid confusing drivers that don't use it.
626 pm_runtime_set_suspended(dev);
627 else if (dev_pm_smart_suspend_and_suspended(dev))
628 pm_runtime_set_active(dev);
630 if (dev->pm_domain) {
631 info = "noirq power domain ";
632 callback = pm_noirq_op(&dev->pm_domain->ops, state);
633 } else if (dev->type && dev->type->pm) {
634 info = "noirq type ";
635 callback = pm_noirq_op(dev->type->pm, state);
636 } else if (dev->class && dev->class->pm) {
637 info = "noirq class ";
638 callback = pm_noirq_op(dev->class->pm, state);
639 } else if (dev->bus && dev->bus->pm) {
641 callback = pm_noirq_op(dev->bus->pm, state);
649 if (dev->driver && dev->driver->pm) {
650 info = "noirq driver ";
651 callback = pm_noirq_op(dev->driver->pm, state);
655 error = dpm_run_callback(callback, dev, state, info);
658 dev->power.is_noirq_suspended = false;
661 complete_all(&dev->power.completion);
666 static bool is_async(struct device *dev)
668 return dev->power.async_suspend && pm_async_enabled
669 && !pm_trace_is_enabled();
672 static bool dpm_async_fn(struct device *dev, async_func_t func)
674 reinit_completion(&dev->power.completion);
678 async_schedule(func, dev);
685 static void async_resume_noirq(void *data, async_cookie_t cookie)
687 struct device *dev = (struct device *)data;
690 error = device_resume_noirq(dev, pm_transition, true);
692 pm_dev_err(dev, pm_transition, " async", error);
697 static void dpm_noirq_resume_devices(pm_message_t state)
700 ktime_t starttime = ktime_get();
702 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
703 mutex_lock(&dpm_list_mtx);
704 pm_transition = state;
707 * Advanced the async threads upfront,
708 * in case the starting of async threads is
709 * delayed by non-async resuming devices.
711 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
712 dpm_async_fn(dev, async_resume_noirq);
714 while (!list_empty(&dpm_noirq_list)) {
715 dev = to_device(dpm_noirq_list.next);
717 list_move_tail(&dev->power.entry, &dpm_late_early_list);
718 mutex_unlock(&dpm_list_mtx);
720 if (!is_async(dev)) {
723 error = device_resume_noirq(dev, state, false);
725 suspend_stats.failed_resume_noirq++;
726 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
727 dpm_save_failed_dev(dev_name(dev));
728 pm_dev_err(dev, state, " noirq", error);
732 mutex_lock(&dpm_list_mtx);
735 mutex_unlock(&dpm_list_mtx);
736 async_synchronize_full();
737 dpm_show_time(starttime, state, 0, "noirq");
738 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
742 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
743 * @state: PM transition of the system being carried out.
745 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
746 * allow device drivers' interrupt handlers to be called.
748 void dpm_resume_noirq(pm_message_t state)
750 dpm_noirq_resume_devices(state);
752 resume_device_irqs();
753 device_wakeup_disarm_wake_irqs();
759 * device_resume_early - Execute an "early resume" callback for given device.
760 * @dev: Device to handle.
761 * @state: PM transition of the system being carried out.
762 * @async: If true, the device is being resumed asynchronously.
764 * Runtime PM is disabled for @dev while this function is being executed.
766 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
768 pm_callback_t callback = NULL;
769 const char *info = NULL;
775 if (dev->power.syscore || dev->power.direct_complete)
778 if (!dev->power.is_late_suspended)
781 if (!dpm_wait_for_superior(dev, async))
784 if (dev->pm_domain) {
785 info = "early power domain ";
786 callback = pm_late_early_op(&dev->pm_domain->ops, state);
787 } else if (dev->type && dev->type->pm) {
788 info = "early type ";
789 callback = pm_late_early_op(dev->type->pm, state);
790 } else if (dev->class && dev->class->pm) {
791 info = "early class ";
792 callback = pm_late_early_op(dev->class->pm, state);
793 } else if (dev->bus && dev->bus->pm) {
795 callback = pm_late_early_op(dev->bus->pm, state);
800 if (dev_pm_may_skip_resume(dev))
803 if (dev->driver && dev->driver->pm) {
804 info = "early driver ";
805 callback = pm_late_early_op(dev->driver->pm, state);
809 error = dpm_run_callback(callback, dev, state, info);
812 dev->power.is_late_suspended = false;
817 pm_runtime_enable(dev);
818 complete_all(&dev->power.completion);
822 static void async_resume_early(void *data, async_cookie_t cookie)
824 struct device *dev = (struct device *)data;
827 error = device_resume_early(dev, pm_transition, true);
829 pm_dev_err(dev, pm_transition, " async", error);
835 * dpm_resume_early - Execute "early resume" callbacks for all devices.
836 * @state: PM transition of the system being carried out.
838 void dpm_resume_early(pm_message_t state)
841 ktime_t starttime = ktime_get();
843 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
844 mutex_lock(&dpm_list_mtx);
845 pm_transition = state;
848 * Advanced the async threads upfront,
849 * in case the starting of async threads is
850 * delayed by non-async resuming devices.
852 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
853 dpm_async_fn(dev, async_resume_early);
855 while (!list_empty(&dpm_late_early_list)) {
856 dev = to_device(dpm_late_early_list.next);
858 list_move_tail(&dev->power.entry, &dpm_suspended_list);
859 mutex_unlock(&dpm_list_mtx);
861 if (!is_async(dev)) {
864 error = device_resume_early(dev, state, false);
866 suspend_stats.failed_resume_early++;
867 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
868 dpm_save_failed_dev(dev_name(dev));
869 pm_dev_err(dev, state, " early", error);
872 mutex_lock(&dpm_list_mtx);
875 mutex_unlock(&dpm_list_mtx);
876 async_synchronize_full();
877 dpm_show_time(starttime, state, 0, "early");
878 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
882 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
883 * @state: PM transition of the system being carried out.
885 void dpm_resume_start(pm_message_t state)
887 dpm_resume_noirq(state);
888 dpm_resume_early(state);
890 EXPORT_SYMBOL_GPL(dpm_resume_start);
893 * device_resume - Execute "resume" callbacks for given device.
894 * @dev: Device to handle.
895 * @state: PM transition of the system being carried out.
896 * @async: If true, the device is being resumed asynchronously.
898 static int device_resume(struct device *dev, pm_message_t state, bool async)
900 pm_callback_t callback = NULL;
901 const char *info = NULL;
903 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
908 if (dev->power.syscore)
911 if (dev->power.direct_complete) {
912 /* Match the pm_runtime_disable() in __device_suspend(). */
913 pm_runtime_enable(dev);
917 if (!dpm_wait_for_superior(dev, async))
920 dpm_watchdog_set(&wd, dev);
924 * This is a fib. But we'll allow new children to be added below
925 * a resumed device, even if the device hasn't been completed yet.
927 dev->power.is_prepared = false;
929 if (!dev->power.is_suspended)
932 if (dev->pm_domain) {
933 info = "power domain ";
934 callback = pm_op(&dev->pm_domain->ops, state);
938 if (dev->type && dev->type->pm) {
940 callback = pm_op(dev->type->pm, state);
944 if (dev->class && dev->class->pm) {
946 callback = pm_op(dev->class->pm, state);
953 callback = pm_op(dev->bus->pm, state);
954 } else if (dev->bus->resume) {
955 info = "legacy bus ";
956 callback = dev->bus->resume;
962 if (!callback && dev->driver && dev->driver->pm) {
964 callback = pm_op(dev->driver->pm, state);
968 error = dpm_run_callback(callback, dev, state, info);
969 dev->power.is_suspended = false;
973 dpm_watchdog_clear(&wd);
976 complete_all(&dev->power.completion);
983 static void async_resume(void *data, async_cookie_t cookie)
985 struct device *dev = (struct device *)data;
988 error = device_resume(dev, pm_transition, true);
990 pm_dev_err(dev, pm_transition, " async", error);
995 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
996 * @state: PM transition of the system being carried out.
998 * Execute the appropriate "resume" callback for all devices whose status
999 * indicates that they are suspended.
1001 void dpm_resume(pm_message_t state)
1004 ktime_t starttime = ktime_get();
1006 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1009 mutex_lock(&dpm_list_mtx);
1010 pm_transition = state;
1013 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1014 dpm_async_fn(dev, async_resume);
1016 while (!list_empty(&dpm_suspended_list)) {
1017 dev = to_device(dpm_suspended_list.next);
1019 if (!is_async(dev)) {
1022 mutex_unlock(&dpm_list_mtx);
1024 error = device_resume(dev, state, false);
1026 suspend_stats.failed_resume++;
1027 dpm_save_failed_step(SUSPEND_RESUME);
1028 dpm_save_failed_dev(dev_name(dev));
1029 pm_dev_err(dev, state, "", error);
1032 mutex_lock(&dpm_list_mtx);
1034 if (!list_empty(&dev->power.entry))
1035 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1038 mutex_unlock(&dpm_list_mtx);
1039 async_synchronize_full();
1040 dpm_show_time(starttime, state, 0, NULL);
1044 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1048 * device_complete - Complete a PM transition for given device.
1049 * @dev: Device to handle.
1050 * @state: PM transition of the system being carried out.
1052 static void device_complete(struct device *dev, pm_message_t state)
1054 void (*callback)(struct device *) = NULL;
1055 const char *info = NULL;
1057 if (dev->power.syscore)
1062 if (dev->pm_domain) {
1063 info = "completing power domain ";
1064 callback = dev->pm_domain->ops.complete;
1065 } else if (dev->type && dev->type->pm) {
1066 info = "completing type ";
1067 callback = dev->type->pm->complete;
1068 } else if (dev->class && dev->class->pm) {
1069 info = "completing class ";
1070 callback = dev->class->pm->complete;
1071 } else if (dev->bus && dev->bus->pm) {
1072 info = "completing bus ";
1073 callback = dev->bus->pm->complete;
1076 if (!callback && dev->driver && dev->driver->pm) {
1077 info = "completing driver ";
1078 callback = dev->driver->pm->complete;
1082 pm_dev_dbg(dev, state, info);
1088 pm_runtime_put(dev);
1092 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1093 * @state: PM transition of the system being carried out.
1095 * Execute the ->complete() callbacks for all devices whose PM status is not
1096 * DPM_ON (this allows new devices to be registered).
1098 void dpm_complete(pm_message_t state)
1100 struct list_head list;
1102 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1105 INIT_LIST_HEAD(&list);
1106 mutex_lock(&dpm_list_mtx);
1107 while (!list_empty(&dpm_prepared_list)) {
1108 struct device *dev = to_device(dpm_prepared_list.prev);
1111 dev->power.is_prepared = false;
1112 list_move(&dev->power.entry, &list);
1113 mutex_unlock(&dpm_list_mtx);
1115 trace_device_pm_callback_start(dev, "", state.event);
1116 device_complete(dev, state);
1117 trace_device_pm_callback_end(dev, 0);
1119 mutex_lock(&dpm_list_mtx);
1122 list_splice(&list, &dpm_list);
1123 mutex_unlock(&dpm_list_mtx);
1125 /* Allow device probing and trigger re-probing of deferred devices */
1126 device_unblock_probing();
1127 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1131 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1132 * @state: PM transition of the system being carried out.
1134 * Execute "resume" callbacks for all devices and complete the PM transition of
1137 void dpm_resume_end(pm_message_t state)
1140 dpm_complete(state);
1142 EXPORT_SYMBOL_GPL(dpm_resume_end);
1145 /*------------------------- Suspend routines -------------------------*/
1148 * resume_event - Return a "resume" message for given "suspend" sleep state.
1149 * @sleep_state: PM message representing a sleep state.
1151 * Return a PM message representing the resume event corresponding to given
1154 static pm_message_t resume_event(pm_message_t sleep_state)
1156 switch (sleep_state.event) {
1157 case PM_EVENT_SUSPEND:
1159 case PM_EVENT_FREEZE:
1160 case PM_EVENT_QUIESCE:
1161 return PMSG_RECOVER;
1162 case PM_EVENT_HIBERNATE:
1163 return PMSG_RESTORE;
1168 static void dpm_superior_set_must_resume(struct device *dev)
1170 struct device_link *link;
1174 dev->parent->power.must_resume = true;
1176 idx = device_links_read_lock();
1178 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1179 link->supplier->power.must_resume = true;
1181 device_links_read_unlock(idx);
1185 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1186 * @dev: Device to handle.
1187 * @state: PM transition of the system being carried out.
1188 * @async: If true, the device is being suspended asynchronously.
1190 * The driver of @dev will not receive interrupts while this function is being
1193 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1195 pm_callback_t callback = NULL;
1196 const char *info = NULL;
1202 dpm_wait_for_subordinate(dev, async);
1207 if (dev->power.syscore || dev->power.direct_complete)
1210 if (dev->pm_domain) {
1211 info = "noirq power domain ";
1212 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1213 } else if (dev->type && dev->type->pm) {
1214 info = "noirq type ";
1215 callback = pm_noirq_op(dev->type->pm, state);
1216 } else if (dev->class && dev->class->pm) {
1217 info = "noirq class ";
1218 callback = pm_noirq_op(dev->class->pm, state);
1219 } else if (dev->bus && dev->bus->pm) {
1220 info = "noirq bus ";
1221 callback = pm_noirq_op(dev->bus->pm, state);
1226 if (dev_pm_smart_suspend_and_suspended(dev))
1229 if (dev->driver && dev->driver->pm) {
1230 info = "noirq driver ";
1231 callback = pm_noirq_op(dev->driver->pm, state);
1235 error = dpm_run_callback(callback, dev, state, info);
1237 async_error = error;
1242 dev->power.is_noirq_suspended = true;
1245 * Skipping the resume of devices that were in use right before the
1246 * system suspend (as indicated by their PM-runtime usage counters)
1247 * would be suboptimal. Also resume them if doing that is not allowed
1250 if (atomic_read(&dev->power.usage_count) > 1 ||
1251 !(dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED) &&
1252 dev->power.may_skip_resume))
1253 dev->power.must_resume = true;
1255 if (dev->power.must_resume)
1256 dpm_superior_set_must_resume(dev);
1259 complete_all(&dev->power.completion);
1260 TRACE_SUSPEND(error);
1264 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1266 struct device *dev = (struct device *)data;
1269 error = __device_suspend_noirq(dev, pm_transition, true);
1271 dpm_save_failed_dev(dev_name(dev));
1272 pm_dev_err(dev, pm_transition, " async", error);
1278 static int device_suspend_noirq(struct device *dev)
1280 if (dpm_async_fn(dev, async_suspend_noirq))
1283 return __device_suspend_noirq(dev, pm_transition, false);
1286 static int dpm_noirq_suspend_devices(pm_message_t state)
1288 ktime_t starttime = ktime_get();
1291 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1292 mutex_lock(&dpm_list_mtx);
1293 pm_transition = state;
1296 while (!list_empty(&dpm_late_early_list)) {
1297 struct device *dev = to_device(dpm_late_early_list.prev);
1300 mutex_unlock(&dpm_list_mtx);
1302 error = device_suspend_noirq(dev);
1304 mutex_lock(&dpm_list_mtx);
1306 pm_dev_err(dev, state, " noirq", error);
1307 dpm_save_failed_dev(dev_name(dev));
1311 if (!list_empty(&dev->power.entry))
1312 list_move(&dev->power.entry, &dpm_noirq_list);
1318 mutex_unlock(&dpm_list_mtx);
1319 async_synchronize_full();
1321 error = async_error;
1324 suspend_stats.failed_suspend_noirq++;
1325 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1327 dpm_show_time(starttime, state, error, "noirq");
1328 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1333 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1334 * @state: PM transition of the system being carried out.
1336 * Prevent device drivers' interrupt handlers from being called and invoke
1337 * "noirq" suspend callbacks for all non-sysdev devices.
1339 int dpm_suspend_noirq(pm_message_t state)
1345 device_wakeup_arm_wake_irqs();
1346 suspend_device_irqs();
1348 ret = dpm_noirq_suspend_devices(state);
1350 dpm_resume_noirq(resume_event(state));
1355 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1357 struct device *parent = dev->parent;
1362 spin_lock_irq(&parent->power.lock);
1364 if (dev->power.wakeup_path && !parent->power.ignore_children)
1365 parent->power.wakeup_path = true;
1367 spin_unlock_irq(&parent->power.lock);
1371 * __device_suspend_late - Execute a "late suspend" callback for given device.
1372 * @dev: Device to handle.
1373 * @state: PM transition of the system being carried out.
1374 * @async: If true, the device is being suspended asynchronously.
1376 * Runtime PM is disabled for @dev while this function is being executed.
1378 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1380 pm_callback_t callback = NULL;
1381 const char *info = NULL;
1387 __pm_runtime_disable(dev, false);
1389 dpm_wait_for_subordinate(dev, async);
1394 if (pm_wakeup_pending()) {
1395 async_error = -EBUSY;
1399 if (dev->power.syscore || dev->power.direct_complete)
1402 if (dev->pm_domain) {
1403 info = "late power domain ";
1404 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1405 } else if (dev->type && dev->type->pm) {
1406 info = "late type ";
1407 callback = pm_late_early_op(dev->type->pm, state);
1408 } else if (dev->class && dev->class->pm) {
1409 info = "late class ";
1410 callback = pm_late_early_op(dev->class->pm, state);
1411 } else if (dev->bus && dev->bus->pm) {
1413 callback = pm_late_early_op(dev->bus->pm, state);
1418 if (dev_pm_smart_suspend_and_suspended(dev)) {
1420 * In principle, the resume of the device may be skippend if it
1421 * remains in runtime suspend at this point.
1423 dev->power.may_skip_resume = true;
1427 if (dev->driver && dev->driver->pm) {
1428 info = "late driver ";
1429 callback = pm_late_early_op(dev->driver->pm, state);
1433 error = dpm_run_callback(callback, dev, state, info);
1435 async_error = error;
1438 dpm_propagate_wakeup_to_parent(dev);
1441 dev->power.is_late_suspended = true;
1444 TRACE_SUSPEND(error);
1445 complete_all(&dev->power.completion);
1449 static void async_suspend_late(void *data, async_cookie_t cookie)
1451 struct device *dev = (struct device *)data;
1454 error = __device_suspend_late(dev, pm_transition, true);
1456 dpm_save_failed_dev(dev_name(dev));
1457 pm_dev_err(dev, pm_transition, " async", error);
1462 static int device_suspend_late(struct device *dev)
1464 if (dpm_async_fn(dev, async_suspend_late))
1467 return __device_suspend_late(dev, pm_transition, false);
1471 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1472 * @state: PM transition of the system being carried out.
1474 int dpm_suspend_late(pm_message_t state)
1476 ktime_t starttime = ktime_get();
1479 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1480 mutex_lock(&dpm_list_mtx);
1481 pm_transition = state;
1484 while (!list_empty(&dpm_suspended_list)) {
1485 struct device *dev = to_device(dpm_suspended_list.prev);
1488 mutex_unlock(&dpm_list_mtx);
1490 error = device_suspend_late(dev);
1492 mutex_lock(&dpm_list_mtx);
1493 if (!list_empty(&dev->power.entry))
1494 list_move(&dev->power.entry, &dpm_late_early_list);
1497 pm_dev_err(dev, state, " late", error);
1498 dpm_save_failed_dev(dev_name(dev));
1507 mutex_unlock(&dpm_list_mtx);
1508 async_synchronize_full();
1510 error = async_error;
1512 suspend_stats.failed_suspend_late++;
1513 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1514 dpm_resume_early(resume_event(state));
1516 dpm_show_time(starttime, state, error, "late");
1517 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1522 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1523 * @state: PM transition of the system being carried out.
1525 int dpm_suspend_end(pm_message_t state)
1527 ktime_t starttime = ktime_get();
1530 error = dpm_suspend_late(state);
1534 error = dpm_suspend_noirq(state);
1536 dpm_resume_early(resume_event(state));
1539 dpm_show_time(starttime, state, error, "end");
1542 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1545 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1546 * @dev: Device to suspend.
1547 * @state: PM transition of the system being carried out.
1548 * @cb: Suspend callback to execute.
1549 * @info: string description of caller.
1551 static int legacy_suspend(struct device *dev, pm_message_t state,
1552 int (*cb)(struct device *dev, pm_message_t state),
1558 calltime = initcall_debug_start(dev, cb);
1560 trace_device_pm_callback_start(dev, info, state.event);
1561 error = cb(dev, state);
1562 trace_device_pm_callback_end(dev, error);
1563 suspend_report_result(cb, error);
1565 initcall_debug_report(dev, calltime, cb, error);
1570 static void dpm_clear_superiors_direct_complete(struct device *dev)
1572 struct device_link *link;
1576 spin_lock_irq(&dev->parent->power.lock);
1577 dev->parent->power.direct_complete = false;
1578 spin_unlock_irq(&dev->parent->power.lock);
1581 idx = device_links_read_lock();
1583 list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1584 spin_lock_irq(&link->supplier->power.lock);
1585 link->supplier->power.direct_complete = false;
1586 spin_unlock_irq(&link->supplier->power.lock);
1589 device_links_read_unlock(idx);
1593 * __device_suspend - Execute "suspend" callbacks for given device.
1594 * @dev: Device to handle.
1595 * @state: PM transition of the system being carried out.
1596 * @async: If true, the device is being suspended asynchronously.
1598 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1600 pm_callback_t callback = NULL;
1601 const char *info = NULL;
1603 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1608 dpm_wait_for_subordinate(dev, async);
1611 dev->power.direct_complete = false;
1616 * If a device configured to wake up the system from sleep states
1617 * has been suspended at run time and there's a resume request pending
1618 * for it, this is equivalent to the device signaling wakeup, so the
1619 * system suspend operation should be aborted.
1621 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1622 pm_wakeup_event(dev, 0);
1624 if (pm_wakeup_pending()) {
1625 dev->power.direct_complete = false;
1626 async_error = -EBUSY;
1630 if (dev->power.syscore)
1633 /* Avoid direct_complete to let wakeup_path propagate. */
1634 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1635 dev->power.direct_complete = false;
1637 if (dev->power.direct_complete) {
1638 if (pm_runtime_status_suspended(dev)) {
1639 pm_runtime_disable(dev);
1640 if (pm_runtime_status_suspended(dev)) {
1641 pm_dev_dbg(dev, state, "direct-complete ");
1645 pm_runtime_enable(dev);
1647 dev->power.direct_complete = false;
1650 dev->power.may_skip_resume = false;
1651 dev->power.must_resume = false;
1653 dpm_watchdog_set(&wd, dev);
1656 if (dev->pm_domain) {
1657 info = "power domain ";
1658 callback = pm_op(&dev->pm_domain->ops, state);
1662 if (dev->type && dev->type->pm) {
1664 callback = pm_op(dev->type->pm, state);
1668 if (dev->class && dev->class->pm) {
1670 callback = pm_op(dev->class->pm, state);
1677 callback = pm_op(dev->bus->pm, state);
1678 } else if (dev->bus->suspend) {
1679 pm_dev_dbg(dev, state, "legacy bus ");
1680 error = legacy_suspend(dev, state, dev->bus->suspend,
1687 if (!callback && dev->driver && dev->driver->pm) {
1689 callback = pm_op(dev->driver->pm, state);
1692 error = dpm_run_callback(callback, dev, state, info);
1696 dev->power.is_suspended = true;
1697 if (device_may_wakeup(dev))
1698 dev->power.wakeup_path = true;
1700 dpm_propagate_wakeup_to_parent(dev);
1701 dpm_clear_superiors_direct_complete(dev);
1705 dpm_watchdog_clear(&wd);
1709 async_error = error;
1711 complete_all(&dev->power.completion);
1712 TRACE_SUSPEND(error);
1716 static void async_suspend(void *data, async_cookie_t cookie)
1718 struct device *dev = (struct device *)data;
1721 error = __device_suspend(dev, pm_transition, true);
1723 dpm_save_failed_dev(dev_name(dev));
1724 pm_dev_err(dev, pm_transition, " async", error);
1730 static int device_suspend(struct device *dev)
1732 if (dpm_async_fn(dev, async_suspend))
1735 return __device_suspend(dev, pm_transition, false);
1739 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1740 * @state: PM transition of the system being carried out.
1742 int dpm_suspend(pm_message_t state)
1744 ktime_t starttime = ktime_get();
1747 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1753 mutex_lock(&dpm_list_mtx);
1754 pm_transition = state;
1756 while (!list_empty(&dpm_prepared_list)) {
1757 struct device *dev = to_device(dpm_prepared_list.prev);
1760 mutex_unlock(&dpm_list_mtx);
1762 error = device_suspend(dev);
1764 mutex_lock(&dpm_list_mtx);
1766 pm_dev_err(dev, state, "", error);
1767 dpm_save_failed_dev(dev_name(dev));
1771 if (!list_empty(&dev->power.entry))
1772 list_move(&dev->power.entry, &dpm_suspended_list);
1777 mutex_unlock(&dpm_list_mtx);
1778 async_synchronize_full();
1780 error = async_error;
1782 suspend_stats.failed_suspend++;
1783 dpm_save_failed_step(SUSPEND_SUSPEND);
1785 dpm_show_time(starttime, state, error, NULL);
1786 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1791 * device_prepare - Prepare a device for system power transition.
1792 * @dev: Device to handle.
1793 * @state: PM transition of the system being carried out.
1795 * Execute the ->prepare() callback(s) for given device. No new children of the
1796 * device may be registered after this function has returned.
1798 static int device_prepare(struct device *dev, pm_message_t state)
1800 int (*callback)(struct device *) = NULL;
1803 if (dev->power.syscore)
1807 * If a device's parent goes into runtime suspend at the wrong time,
1808 * it won't be possible to resume the device. To prevent this we
1809 * block runtime suspend here, during the prepare phase, and allow
1810 * it again during the complete phase.
1812 pm_runtime_get_noresume(dev);
1816 dev->power.wakeup_path = false;
1818 if (dev->power.no_pm_callbacks)
1822 callback = dev->pm_domain->ops.prepare;
1823 else if (dev->type && dev->type->pm)
1824 callback = dev->type->pm->prepare;
1825 else if (dev->class && dev->class->pm)
1826 callback = dev->class->pm->prepare;
1827 else if (dev->bus && dev->bus->pm)
1828 callback = dev->bus->pm->prepare;
1830 if (!callback && dev->driver && dev->driver->pm)
1831 callback = dev->driver->pm->prepare;
1834 ret = callback(dev);
1840 suspend_report_result(callback, ret);
1841 pm_runtime_put(dev);
1845 * A positive return value from ->prepare() means "this device appears
1846 * to be runtime-suspended and its state is fine, so if it really is
1847 * runtime-suspended, you can leave it in that state provided that you
1848 * will do the same thing with all of its descendants". This only
1849 * applies to suspend transitions, however.
1851 spin_lock_irq(&dev->power.lock);
1852 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1853 (ret > 0 || dev->power.no_pm_callbacks) &&
1854 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1855 spin_unlock_irq(&dev->power.lock);
1860 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1861 * @state: PM transition of the system being carried out.
1863 * Execute the ->prepare() callback(s) for all devices.
1865 int dpm_prepare(pm_message_t state)
1869 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1873 * Give a chance for the known devices to complete their probes, before
1874 * disable probing of devices. This sync point is important at least
1875 * at boot time + hibernation restore.
1877 wait_for_device_probe();
1879 * It is unsafe if probing of devices will happen during suspend or
1880 * hibernation and system behavior will be unpredictable in this case.
1881 * So, let's prohibit device's probing here and defer their probes
1882 * instead. The normal behavior will be restored in dpm_complete().
1884 device_block_probing();
1886 mutex_lock(&dpm_list_mtx);
1887 while (!list_empty(&dpm_list)) {
1888 struct device *dev = to_device(dpm_list.next);
1891 mutex_unlock(&dpm_list_mtx);
1893 trace_device_pm_callback_start(dev, "", state.event);
1894 error = device_prepare(dev, state);
1895 trace_device_pm_callback_end(dev, error);
1897 mutex_lock(&dpm_list_mtx);
1899 if (error == -EAGAIN) {
1904 pr_info("Device %s not prepared for power transition: code %d\n",
1905 dev_name(dev), error);
1909 dev->power.is_prepared = true;
1910 if (!list_empty(&dev->power.entry))
1911 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1914 mutex_unlock(&dpm_list_mtx);
1915 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1920 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1921 * @state: PM transition of the system being carried out.
1923 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1924 * callbacks for them.
1926 int dpm_suspend_start(pm_message_t state)
1928 ktime_t starttime = ktime_get();
1931 error = dpm_prepare(state);
1933 suspend_stats.failed_prepare++;
1934 dpm_save_failed_step(SUSPEND_PREPARE);
1936 error = dpm_suspend(state);
1937 dpm_show_time(starttime, state, error, "start");
1940 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1942 void __suspend_report_result(const char *function, void *fn, int ret)
1945 pr_err("%s(): %pS returns %d\n", function, fn, ret);
1947 EXPORT_SYMBOL_GPL(__suspend_report_result);
1950 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1951 * @subordinate: Device that needs to wait for @dev.
1952 * @dev: Device to wait for.
1954 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1956 dpm_wait(dev, subordinate->power.async_suspend);
1959 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1962 * dpm_for_each_dev - device iterator.
1963 * @data: data for the callback.
1964 * @fn: function to be called for each device.
1966 * Iterate over devices in dpm_list, and call @fn for each device,
1969 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1977 list_for_each_entry(dev, &dpm_list, power.entry)
1981 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1983 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1988 return !ops->prepare &&
1990 !ops->suspend_late &&
1991 !ops->suspend_noirq &&
1992 !ops->resume_noirq &&
1993 !ops->resume_early &&
1998 void device_pm_check_callbacks(struct device *dev)
2000 spin_lock_irq(&dev->power.lock);
2001 dev->power.no_pm_callbacks =
2002 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2003 !dev->bus->suspend && !dev->bus->resume)) &&
2004 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2005 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2006 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2007 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2008 !dev->driver->suspend && !dev->driver->resume));
2009 spin_unlock_irq(&dev->power.lock);
2012 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2014 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2015 pm_runtime_status_suspended(dev);