Merge tag 'pm-4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-microblaze.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/sched/debug.h>
31 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <trace/events/power.h>
34 #include <linux/cpufreq.h>
35 #include <linux/cpuidle.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static int async_error;
64
65 static const char *pm_verb(int event)
66 {
67         switch (event) {
68         case PM_EVENT_SUSPEND:
69                 return "suspend";
70         case PM_EVENT_RESUME:
71                 return "resume";
72         case PM_EVENT_FREEZE:
73                 return "freeze";
74         case PM_EVENT_QUIESCE:
75                 return "quiesce";
76         case PM_EVENT_HIBERNATE:
77                 return "hibernate";
78         case PM_EVENT_THAW:
79                 return "thaw";
80         case PM_EVENT_RESTORE:
81                 return "restore";
82         case PM_EVENT_RECOVER:
83                 return "recover";
84         default:
85                 return "(unknown PM event)";
86         }
87 }
88
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
93 void device_pm_sleep_init(struct device *dev)
94 {
95         dev->power.is_prepared = false;
96         dev->power.is_suspended = false;
97         dev->power.is_noirq_suspended = false;
98         dev->power.is_late_suspended = false;
99         init_completion(&dev->power.completion);
100         complete_all(&dev->power.completion);
101         dev->power.wakeup = NULL;
102         INIT_LIST_HEAD(&dev->power.entry);
103 }
104
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
108 void device_pm_lock(void)
109 {
110         mutex_lock(&dpm_list_mtx);
111 }
112
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
116 void device_pm_unlock(void)
117 {
118         mutex_unlock(&dpm_list_mtx);
119 }
120
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
125 void device_pm_add(struct device *dev)
126 {
127         pr_debug("PM: Adding info for %s:%s\n",
128                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129         device_pm_check_callbacks(dev);
130         mutex_lock(&dpm_list_mtx);
131         if (dev->parent && dev->parent->power.is_prepared)
132                 dev_warn(dev, "parent %s should not be sleeping\n",
133                         dev_name(dev->parent));
134         list_add_tail(&dev->power.entry, &dpm_list);
135         dev->power.in_dpm_list = true;
136         mutex_unlock(&dpm_list_mtx);
137 }
138
139 /**
140  * device_pm_remove - Remove a device from the PM core's list of active devices.
141  * @dev: Device to be removed from the list.
142  */
143 void device_pm_remove(struct device *dev)
144 {
145         pr_debug("PM: Removing info for %s:%s\n",
146                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
147         complete_all(&dev->power.completion);
148         mutex_lock(&dpm_list_mtx);
149         list_del_init(&dev->power.entry);
150         dev->power.in_dpm_list = false;
151         mutex_unlock(&dpm_list_mtx);
152         device_wakeup_disable(dev);
153         pm_runtime_remove(dev);
154         device_pm_check_callbacks(dev);
155 }
156
157 /**
158  * device_pm_move_before - Move device in the PM core's list of active devices.
159  * @deva: Device to move in dpm_list.
160  * @devb: Device @deva should come before.
161  */
162 void device_pm_move_before(struct device *deva, struct device *devb)
163 {
164         pr_debug("PM: Moving %s:%s before %s:%s\n",
165                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
166                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
167         /* Delete deva from dpm_list and reinsert before devb. */
168         list_move_tail(&deva->power.entry, &devb->power.entry);
169 }
170
171 /**
172  * device_pm_move_after - Move device in the PM core's list of active devices.
173  * @deva: Device to move in dpm_list.
174  * @devb: Device @deva should come after.
175  */
176 void device_pm_move_after(struct device *deva, struct device *devb)
177 {
178         pr_debug("PM: Moving %s:%s after %s:%s\n",
179                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
180                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
181         /* Delete deva from dpm_list and reinsert after devb. */
182         list_move(&deva->power.entry, &devb->power.entry);
183 }
184
185 /**
186  * device_pm_move_last - Move device to end of the PM core's list of devices.
187  * @dev: Device to move in dpm_list.
188  */
189 void device_pm_move_last(struct device *dev)
190 {
191         pr_debug("PM: Moving %s:%s to end of list\n",
192                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
193         list_move_tail(&dev->power.entry, &dpm_list);
194 }
195
196 static ktime_t initcall_debug_start(struct device *dev)
197 {
198         ktime_t calltime = 0;
199
200         if (pm_print_times_enabled) {
201                 pr_info("calling  %s+ @ %i, parent: %s\n",
202                         dev_name(dev), task_pid_nr(current),
203                         dev->parent ? dev_name(dev->parent) : "none");
204                 calltime = ktime_get();
205         }
206
207         return calltime;
208 }
209
210 static void initcall_debug_report(struct device *dev, ktime_t calltime,
211                                   int error, pm_message_t state,
212                                   const char *info)
213 {
214         ktime_t rettime;
215         s64 nsecs;
216
217         rettime = ktime_get();
218         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
219
220         if (pm_print_times_enabled) {
221                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
222                         error, (unsigned long long)nsecs >> 10);
223         }
224 }
225
226 /**
227  * dpm_wait - Wait for a PM operation to complete.
228  * @dev: Device to wait for.
229  * @async: If unset, wait only if the device's power.async_suspend flag is set.
230  */
231 static void dpm_wait(struct device *dev, bool async)
232 {
233         if (!dev)
234                 return;
235
236         if (async || (pm_async_enabled && dev->power.async_suspend))
237                 wait_for_completion(&dev->power.completion);
238 }
239
240 static int dpm_wait_fn(struct device *dev, void *async_ptr)
241 {
242         dpm_wait(dev, *((bool *)async_ptr));
243         return 0;
244 }
245
246 static void dpm_wait_for_children(struct device *dev, bool async)
247 {
248        device_for_each_child(dev, &async, dpm_wait_fn);
249 }
250
251 static void dpm_wait_for_suppliers(struct device *dev, bool async)
252 {
253         struct device_link *link;
254         int idx;
255
256         idx = device_links_read_lock();
257
258         /*
259          * If the supplier goes away right after we've checked the link to it,
260          * we'll wait for its completion to change the state, but that's fine,
261          * because the only things that will block as a result are the SRCU
262          * callbacks freeing the link objects for the links in the list we're
263          * walking.
264          */
265         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
266                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
267                         dpm_wait(link->supplier, async);
268
269         device_links_read_unlock(idx);
270 }
271
272 static void dpm_wait_for_superior(struct device *dev, bool async)
273 {
274         dpm_wait(dev->parent, async);
275         dpm_wait_for_suppliers(dev, async);
276 }
277
278 static void dpm_wait_for_consumers(struct device *dev, bool async)
279 {
280         struct device_link *link;
281         int idx;
282
283         idx = device_links_read_lock();
284
285         /*
286          * The status of a device link can only be changed from "dormant" by a
287          * probe, but that cannot happen during system suspend/resume.  In
288          * theory it can change to "dormant" at that time, but then it is
289          * reasonable to wait for the target device anyway (eg. if it goes
290          * away, it's better to wait for it to go away completely and then
291          * continue instead of trying to continue in parallel with its
292          * unregistration).
293          */
294         list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
295                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
296                         dpm_wait(link->consumer, async);
297
298         device_links_read_unlock(idx);
299 }
300
301 static void dpm_wait_for_subordinate(struct device *dev, bool async)
302 {
303         dpm_wait_for_children(dev, async);
304         dpm_wait_for_consumers(dev, async);
305 }
306
307 /**
308  * pm_op - Return the PM operation appropriate for given PM event.
309  * @ops: PM operations to choose from.
310  * @state: PM transition of the system being carried out.
311  */
312 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
313 {
314         switch (state.event) {
315 #ifdef CONFIG_SUSPEND
316         case PM_EVENT_SUSPEND:
317                 return ops->suspend;
318         case PM_EVENT_RESUME:
319                 return ops->resume;
320 #endif /* CONFIG_SUSPEND */
321 #ifdef CONFIG_HIBERNATE_CALLBACKS
322         case PM_EVENT_FREEZE:
323         case PM_EVENT_QUIESCE:
324                 return ops->freeze;
325         case PM_EVENT_HIBERNATE:
326                 return ops->poweroff;
327         case PM_EVENT_THAW:
328         case PM_EVENT_RECOVER:
329                 return ops->thaw;
330                 break;
331         case PM_EVENT_RESTORE:
332                 return ops->restore;
333 #endif /* CONFIG_HIBERNATE_CALLBACKS */
334         }
335
336         return NULL;
337 }
338
339 /**
340  * pm_late_early_op - Return the PM operation appropriate for given PM event.
341  * @ops: PM operations to choose from.
342  * @state: PM transition of the system being carried out.
343  *
344  * Runtime PM is disabled for @dev while this function is being executed.
345  */
346 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
347                                       pm_message_t state)
348 {
349         switch (state.event) {
350 #ifdef CONFIG_SUSPEND
351         case PM_EVENT_SUSPEND:
352                 return ops->suspend_late;
353         case PM_EVENT_RESUME:
354                 return ops->resume_early;
355 #endif /* CONFIG_SUSPEND */
356 #ifdef CONFIG_HIBERNATE_CALLBACKS
357         case PM_EVENT_FREEZE:
358         case PM_EVENT_QUIESCE:
359                 return ops->freeze_late;
360         case PM_EVENT_HIBERNATE:
361                 return ops->poweroff_late;
362         case PM_EVENT_THAW:
363         case PM_EVENT_RECOVER:
364                 return ops->thaw_early;
365         case PM_EVENT_RESTORE:
366                 return ops->restore_early;
367 #endif /* CONFIG_HIBERNATE_CALLBACKS */
368         }
369
370         return NULL;
371 }
372
373 /**
374  * pm_noirq_op - Return the PM operation appropriate for given PM event.
375  * @ops: PM operations to choose from.
376  * @state: PM transition of the system being carried out.
377  *
378  * The driver of @dev will not receive interrupts while this function is being
379  * executed.
380  */
381 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
382 {
383         switch (state.event) {
384 #ifdef CONFIG_SUSPEND
385         case PM_EVENT_SUSPEND:
386                 return ops->suspend_noirq;
387         case PM_EVENT_RESUME:
388                 return ops->resume_noirq;
389 #endif /* CONFIG_SUSPEND */
390 #ifdef CONFIG_HIBERNATE_CALLBACKS
391         case PM_EVENT_FREEZE:
392         case PM_EVENT_QUIESCE:
393                 return ops->freeze_noirq;
394         case PM_EVENT_HIBERNATE:
395                 return ops->poweroff_noirq;
396         case PM_EVENT_THAW:
397         case PM_EVENT_RECOVER:
398                 return ops->thaw_noirq;
399         case PM_EVENT_RESTORE:
400                 return ops->restore_noirq;
401 #endif /* CONFIG_HIBERNATE_CALLBACKS */
402         }
403
404         return NULL;
405 }
406
407 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
408 {
409         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
410                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
411                 ", may wakeup" : "");
412 }
413
414 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
415                         int error)
416 {
417         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
418                 dev_name(dev), pm_verb(state.event), info, error);
419 }
420
421 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
422                           const char *info)
423 {
424         ktime_t calltime;
425         u64 usecs64;
426         int usecs;
427
428         calltime = ktime_get();
429         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
430         do_div(usecs64, NSEC_PER_USEC);
431         usecs = usecs64;
432         if (usecs == 0)
433                 usecs = 1;
434
435         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
436                   info ?: "", info ? " " : "", pm_verb(state.event),
437                   error ? "aborted" : "complete",
438                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
439 }
440
441 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
442                             pm_message_t state, const char *info)
443 {
444         ktime_t calltime;
445         int error;
446
447         if (!cb)
448                 return 0;
449
450         calltime = initcall_debug_start(dev);
451
452         pm_dev_dbg(dev, state, info);
453         trace_device_pm_callback_start(dev, info, state.event);
454         error = cb(dev);
455         trace_device_pm_callback_end(dev, error);
456         suspend_report_result(cb, error);
457
458         initcall_debug_report(dev, calltime, error, state, info);
459
460         return error;
461 }
462
463 #ifdef CONFIG_DPM_WATCHDOG
464 struct dpm_watchdog {
465         struct device           *dev;
466         struct task_struct      *tsk;
467         struct timer_list       timer;
468 };
469
470 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
471         struct dpm_watchdog wd
472
473 /**
474  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
475  * @data: Watchdog object address.
476  *
477  * Called when a driver has timed out suspending or resuming.
478  * There's not much we can do here to recover so panic() to
479  * capture a crash-dump in pstore.
480  */
481 static void dpm_watchdog_handler(struct timer_list *t)
482 {
483         struct dpm_watchdog *wd = from_timer(wd, t, timer);
484
485         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
486         show_stack(wd->tsk, NULL);
487         panic("%s %s: unrecoverable failure\n",
488                 dev_driver_string(wd->dev), dev_name(wd->dev));
489 }
490
491 /**
492  * dpm_watchdog_set - Enable pm watchdog for given device.
493  * @wd: Watchdog. Must be allocated on the stack.
494  * @dev: Device to handle.
495  */
496 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
497 {
498         struct timer_list *timer = &wd->timer;
499
500         wd->dev = dev;
501         wd->tsk = current;
502
503         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
504         /* use same timeout value for both suspend and resume */
505         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
506         add_timer(timer);
507 }
508
509 /**
510  * dpm_watchdog_clear - Disable suspend/resume watchdog.
511  * @wd: Watchdog to disable.
512  */
513 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
514 {
515         struct timer_list *timer = &wd->timer;
516
517         del_timer_sync(timer);
518         destroy_timer_on_stack(timer);
519 }
520 #else
521 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
522 #define dpm_watchdog_set(x, y)
523 #define dpm_watchdog_clear(x)
524 #endif
525
526 /*------------------------- Resume routines -------------------------*/
527
528 /**
529  * device_resume_noirq - Execute a "noirq resume" callback for given device.
530  * @dev: Device to handle.
531  * @state: PM transition of the system being carried out.
532  * @async: If true, the device is being resumed asynchronously.
533  *
534  * The driver of @dev will not receive interrupts while this function is being
535  * executed.
536  */
537 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
538 {
539         pm_callback_t callback = NULL;
540         const char *info = NULL;
541         int error = 0;
542
543         TRACE_DEVICE(dev);
544         TRACE_RESUME(0);
545
546         if (dev->power.syscore || dev->power.direct_complete)
547                 goto Out;
548
549         if (!dev->power.is_noirq_suspended)
550                 goto Out;
551
552         dpm_wait_for_superior(dev, async);
553
554         if (dev->pm_domain) {
555                 info = "noirq power domain ";
556                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
557         } else if (dev->type && dev->type->pm) {
558                 info = "noirq type ";
559                 callback = pm_noirq_op(dev->type->pm, state);
560         } else if (dev->class && dev->class->pm) {
561                 info = "noirq class ";
562                 callback = pm_noirq_op(dev->class->pm, state);
563         } else if (dev->bus && dev->bus->pm) {
564                 info = "noirq bus ";
565                 callback = pm_noirq_op(dev->bus->pm, state);
566         }
567
568         if (!callback && dev->driver && dev->driver->pm) {
569                 info = "noirq driver ";
570                 callback = pm_noirq_op(dev->driver->pm, state);
571         }
572
573         error = dpm_run_callback(callback, dev, state, info);
574         dev->power.is_noirq_suspended = false;
575
576  Out:
577         complete_all(&dev->power.completion);
578         TRACE_RESUME(error);
579         return error;
580 }
581
582 static bool is_async(struct device *dev)
583 {
584         return dev->power.async_suspend && pm_async_enabled
585                 && !pm_trace_is_enabled();
586 }
587
588 static void async_resume_noirq(void *data, async_cookie_t cookie)
589 {
590         struct device *dev = (struct device *)data;
591         int error;
592
593         error = device_resume_noirq(dev, pm_transition, true);
594         if (error)
595                 pm_dev_err(dev, pm_transition, " async", error);
596
597         put_device(dev);
598 }
599
600 void dpm_noirq_resume_devices(pm_message_t state)
601 {
602         struct device *dev;
603         ktime_t starttime = ktime_get();
604
605         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
606         mutex_lock(&dpm_list_mtx);
607         pm_transition = state;
608
609         /*
610          * Advanced the async threads upfront,
611          * in case the starting of async threads is
612          * delayed by non-async resuming devices.
613          */
614         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
615                 reinit_completion(&dev->power.completion);
616                 if (is_async(dev)) {
617                         get_device(dev);
618                         async_schedule(async_resume_noirq, dev);
619                 }
620         }
621
622         while (!list_empty(&dpm_noirq_list)) {
623                 dev = to_device(dpm_noirq_list.next);
624                 get_device(dev);
625                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
626                 mutex_unlock(&dpm_list_mtx);
627
628                 if (!is_async(dev)) {
629                         int error;
630
631                         error = device_resume_noirq(dev, state, false);
632                         if (error) {
633                                 suspend_stats.failed_resume_noirq++;
634                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
635                                 dpm_save_failed_dev(dev_name(dev));
636                                 pm_dev_err(dev, state, " noirq", error);
637                         }
638                 }
639
640                 mutex_lock(&dpm_list_mtx);
641                 put_device(dev);
642         }
643         mutex_unlock(&dpm_list_mtx);
644         async_synchronize_full();
645         dpm_show_time(starttime, state, 0, "noirq");
646         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
647 }
648
649 void dpm_noirq_end(void)
650 {
651         resume_device_irqs();
652         device_wakeup_disarm_wake_irqs();
653         cpuidle_resume();
654 }
655
656 /**
657  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
658  * @state: PM transition of the system being carried out.
659  *
660  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
661  * allow device drivers' interrupt handlers to be called.
662  */
663 void dpm_resume_noirq(pm_message_t state)
664 {
665         dpm_noirq_resume_devices(state);
666         dpm_noirq_end();
667 }
668
669 /**
670  * device_resume_early - Execute an "early resume" callback for given device.
671  * @dev: Device to handle.
672  * @state: PM transition of the system being carried out.
673  * @async: If true, the device is being resumed asynchronously.
674  *
675  * Runtime PM is disabled for @dev while this function is being executed.
676  */
677 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
678 {
679         pm_callback_t callback = NULL;
680         const char *info = NULL;
681         int error = 0;
682
683         TRACE_DEVICE(dev);
684         TRACE_RESUME(0);
685
686         if (dev->power.syscore || dev->power.direct_complete)
687                 goto Out;
688
689         if (!dev->power.is_late_suspended)
690                 goto Out;
691
692         dpm_wait_for_superior(dev, async);
693
694         if (dev->pm_domain) {
695                 info = "early power domain ";
696                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
697         } else if (dev->type && dev->type->pm) {
698                 info = "early type ";
699                 callback = pm_late_early_op(dev->type->pm, state);
700         } else if (dev->class && dev->class->pm) {
701                 info = "early class ";
702                 callback = pm_late_early_op(dev->class->pm, state);
703         } else if (dev->bus && dev->bus->pm) {
704                 info = "early bus ";
705                 callback = pm_late_early_op(dev->bus->pm, state);
706         }
707
708         if (!callback && dev->driver && dev->driver->pm) {
709                 info = "early driver ";
710                 callback = pm_late_early_op(dev->driver->pm, state);
711         }
712
713         error = dpm_run_callback(callback, dev, state, info);
714         dev->power.is_late_suspended = false;
715
716  Out:
717         TRACE_RESUME(error);
718
719         pm_runtime_enable(dev);
720         complete_all(&dev->power.completion);
721         return error;
722 }
723
724 static void async_resume_early(void *data, async_cookie_t cookie)
725 {
726         struct device *dev = (struct device *)data;
727         int error;
728
729         error = device_resume_early(dev, pm_transition, true);
730         if (error)
731                 pm_dev_err(dev, pm_transition, " async", error);
732
733         put_device(dev);
734 }
735
736 /**
737  * dpm_resume_early - Execute "early resume" callbacks for all devices.
738  * @state: PM transition of the system being carried out.
739  */
740 void dpm_resume_early(pm_message_t state)
741 {
742         struct device *dev;
743         ktime_t starttime = ktime_get();
744
745         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
746         mutex_lock(&dpm_list_mtx);
747         pm_transition = state;
748
749         /*
750          * Advanced the async threads upfront,
751          * in case the starting of async threads is
752          * delayed by non-async resuming devices.
753          */
754         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
755                 reinit_completion(&dev->power.completion);
756                 if (is_async(dev)) {
757                         get_device(dev);
758                         async_schedule(async_resume_early, dev);
759                 }
760         }
761
762         while (!list_empty(&dpm_late_early_list)) {
763                 dev = to_device(dpm_late_early_list.next);
764                 get_device(dev);
765                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
766                 mutex_unlock(&dpm_list_mtx);
767
768                 if (!is_async(dev)) {
769                         int error;
770
771                         error = device_resume_early(dev, state, false);
772                         if (error) {
773                                 suspend_stats.failed_resume_early++;
774                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
775                                 dpm_save_failed_dev(dev_name(dev));
776                                 pm_dev_err(dev, state, " early", error);
777                         }
778                 }
779                 mutex_lock(&dpm_list_mtx);
780                 put_device(dev);
781         }
782         mutex_unlock(&dpm_list_mtx);
783         async_synchronize_full();
784         dpm_show_time(starttime, state, 0, "early");
785         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
786 }
787
788 /**
789  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
790  * @state: PM transition of the system being carried out.
791  */
792 void dpm_resume_start(pm_message_t state)
793 {
794         dpm_resume_noirq(state);
795         dpm_resume_early(state);
796 }
797 EXPORT_SYMBOL_GPL(dpm_resume_start);
798
799 /**
800  * device_resume - Execute "resume" callbacks for given device.
801  * @dev: Device to handle.
802  * @state: PM transition of the system being carried out.
803  * @async: If true, the device is being resumed asynchronously.
804  */
805 static int device_resume(struct device *dev, pm_message_t state, bool async)
806 {
807         pm_callback_t callback = NULL;
808         const char *info = NULL;
809         int error = 0;
810         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
811
812         TRACE_DEVICE(dev);
813         TRACE_RESUME(0);
814
815         if (dev->power.syscore)
816                 goto Complete;
817
818         if (dev->power.direct_complete) {
819                 /* Match the pm_runtime_disable() in __device_suspend(). */
820                 pm_runtime_enable(dev);
821                 goto Complete;
822         }
823
824         dpm_wait_for_superior(dev, async);
825         dpm_watchdog_set(&wd, dev);
826         device_lock(dev);
827
828         /*
829          * This is a fib.  But we'll allow new children to be added below
830          * a resumed device, even if the device hasn't been completed yet.
831          */
832         dev->power.is_prepared = false;
833
834         if (!dev->power.is_suspended)
835                 goto Unlock;
836
837         if (dev->pm_domain) {
838                 info = "power domain ";
839                 callback = pm_op(&dev->pm_domain->ops, state);
840                 goto Driver;
841         }
842
843         if (dev->type && dev->type->pm) {
844                 info = "type ";
845                 callback = pm_op(dev->type->pm, state);
846                 goto Driver;
847         }
848
849         if (dev->class && dev->class->pm) {
850                 info = "class ";
851                 callback = pm_op(dev->class->pm, state);
852                 goto Driver;
853         }
854
855         if (dev->bus) {
856                 if (dev->bus->pm) {
857                         info = "bus ";
858                         callback = pm_op(dev->bus->pm, state);
859                 } else if (dev->bus->resume) {
860                         info = "legacy bus ";
861                         callback = dev->bus->resume;
862                         goto End;
863                 }
864         }
865
866  Driver:
867         if (!callback && dev->driver && dev->driver->pm) {
868                 info = "driver ";
869                 callback = pm_op(dev->driver->pm, state);
870         }
871
872  End:
873         error = dpm_run_callback(callback, dev, state, info);
874         dev->power.is_suspended = false;
875
876  Unlock:
877         device_unlock(dev);
878         dpm_watchdog_clear(&wd);
879
880  Complete:
881         complete_all(&dev->power.completion);
882
883         TRACE_RESUME(error);
884
885         return error;
886 }
887
888 static void async_resume(void *data, async_cookie_t cookie)
889 {
890         struct device *dev = (struct device *)data;
891         int error;
892
893         error = device_resume(dev, pm_transition, true);
894         if (error)
895                 pm_dev_err(dev, pm_transition, " async", error);
896         put_device(dev);
897 }
898
899 /**
900  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
901  * @state: PM transition of the system being carried out.
902  *
903  * Execute the appropriate "resume" callback for all devices whose status
904  * indicates that they are suspended.
905  */
906 void dpm_resume(pm_message_t state)
907 {
908         struct device *dev;
909         ktime_t starttime = ktime_get();
910
911         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
912         might_sleep();
913
914         mutex_lock(&dpm_list_mtx);
915         pm_transition = state;
916         async_error = 0;
917
918         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
919                 reinit_completion(&dev->power.completion);
920                 if (is_async(dev)) {
921                         get_device(dev);
922                         async_schedule(async_resume, dev);
923                 }
924         }
925
926         while (!list_empty(&dpm_suspended_list)) {
927                 dev = to_device(dpm_suspended_list.next);
928                 get_device(dev);
929                 if (!is_async(dev)) {
930                         int error;
931
932                         mutex_unlock(&dpm_list_mtx);
933
934                         error = device_resume(dev, state, false);
935                         if (error) {
936                                 suspend_stats.failed_resume++;
937                                 dpm_save_failed_step(SUSPEND_RESUME);
938                                 dpm_save_failed_dev(dev_name(dev));
939                                 pm_dev_err(dev, state, "", error);
940                         }
941
942                         mutex_lock(&dpm_list_mtx);
943                 }
944                 if (!list_empty(&dev->power.entry))
945                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
946                 put_device(dev);
947         }
948         mutex_unlock(&dpm_list_mtx);
949         async_synchronize_full();
950         dpm_show_time(starttime, state, 0, NULL);
951
952         cpufreq_resume();
953         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
954 }
955
956 /**
957  * device_complete - Complete a PM transition for given device.
958  * @dev: Device to handle.
959  * @state: PM transition of the system being carried out.
960  */
961 static void device_complete(struct device *dev, pm_message_t state)
962 {
963         void (*callback)(struct device *) = NULL;
964         const char *info = NULL;
965
966         if (dev->power.syscore)
967                 return;
968
969         device_lock(dev);
970
971         if (dev->pm_domain) {
972                 info = "completing power domain ";
973                 callback = dev->pm_domain->ops.complete;
974         } else if (dev->type && dev->type->pm) {
975                 info = "completing type ";
976                 callback = dev->type->pm->complete;
977         } else if (dev->class && dev->class->pm) {
978                 info = "completing class ";
979                 callback = dev->class->pm->complete;
980         } else if (dev->bus && dev->bus->pm) {
981                 info = "completing bus ";
982                 callback = dev->bus->pm->complete;
983         }
984
985         if (!callback && dev->driver && dev->driver->pm) {
986                 info = "completing driver ";
987                 callback = dev->driver->pm->complete;
988         }
989
990         if (callback) {
991                 pm_dev_dbg(dev, state, info);
992                 callback(dev);
993         }
994
995         device_unlock(dev);
996
997         pm_runtime_put(dev);
998 }
999
1000 /**
1001  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1002  * @state: PM transition of the system being carried out.
1003  *
1004  * Execute the ->complete() callbacks for all devices whose PM status is not
1005  * DPM_ON (this allows new devices to be registered).
1006  */
1007 void dpm_complete(pm_message_t state)
1008 {
1009         struct list_head list;
1010
1011         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1012         might_sleep();
1013
1014         INIT_LIST_HEAD(&list);
1015         mutex_lock(&dpm_list_mtx);
1016         while (!list_empty(&dpm_prepared_list)) {
1017                 struct device *dev = to_device(dpm_prepared_list.prev);
1018
1019                 get_device(dev);
1020                 dev->power.is_prepared = false;
1021                 list_move(&dev->power.entry, &list);
1022                 mutex_unlock(&dpm_list_mtx);
1023
1024                 trace_device_pm_callback_start(dev, "", state.event);
1025                 device_complete(dev, state);
1026                 trace_device_pm_callback_end(dev, 0);
1027
1028                 mutex_lock(&dpm_list_mtx);
1029                 put_device(dev);
1030         }
1031         list_splice(&list, &dpm_list);
1032         mutex_unlock(&dpm_list_mtx);
1033
1034         /* Allow device probing and trigger re-probing of deferred devices */
1035         device_unblock_probing();
1036         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1037 }
1038
1039 /**
1040  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1041  * @state: PM transition of the system being carried out.
1042  *
1043  * Execute "resume" callbacks for all devices and complete the PM transition of
1044  * the system.
1045  */
1046 void dpm_resume_end(pm_message_t state)
1047 {
1048         dpm_resume(state);
1049         dpm_complete(state);
1050 }
1051 EXPORT_SYMBOL_GPL(dpm_resume_end);
1052
1053
1054 /*------------------------- Suspend routines -------------------------*/
1055
1056 /**
1057  * resume_event - Return a "resume" message for given "suspend" sleep state.
1058  * @sleep_state: PM message representing a sleep state.
1059  *
1060  * Return a PM message representing the resume event corresponding to given
1061  * sleep state.
1062  */
1063 static pm_message_t resume_event(pm_message_t sleep_state)
1064 {
1065         switch (sleep_state.event) {
1066         case PM_EVENT_SUSPEND:
1067                 return PMSG_RESUME;
1068         case PM_EVENT_FREEZE:
1069         case PM_EVENT_QUIESCE:
1070                 return PMSG_RECOVER;
1071         case PM_EVENT_HIBERNATE:
1072                 return PMSG_RESTORE;
1073         }
1074         return PMSG_ON;
1075 }
1076
1077 /**
1078  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1079  * @dev: Device to handle.
1080  * @state: PM transition of the system being carried out.
1081  * @async: If true, the device is being suspended asynchronously.
1082  *
1083  * The driver of @dev will not receive interrupts while this function is being
1084  * executed.
1085  */
1086 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1087 {
1088         pm_callback_t callback = NULL;
1089         const char *info = NULL;
1090         int error = 0;
1091
1092         TRACE_DEVICE(dev);
1093         TRACE_SUSPEND(0);
1094
1095         dpm_wait_for_subordinate(dev, async);
1096
1097         if (async_error)
1098                 goto Complete;
1099
1100         if (pm_wakeup_pending()) {
1101                 async_error = -EBUSY;
1102                 goto Complete;
1103         }
1104
1105         if (dev->power.syscore || dev->power.direct_complete)
1106                 goto Complete;
1107
1108         if (dev->pm_domain) {
1109                 info = "noirq power domain ";
1110                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1111         } else if (dev->type && dev->type->pm) {
1112                 info = "noirq type ";
1113                 callback = pm_noirq_op(dev->type->pm, state);
1114         } else if (dev->class && dev->class->pm) {
1115                 info = "noirq class ";
1116                 callback = pm_noirq_op(dev->class->pm, state);
1117         } else if (dev->bus && dev->bus->pm) {
1118                 info = "noirq bus ";
1119                 callback = pm_noirq_op(dev->bus->pm, state);
1120         }
1121
1122         if (!callback && dev->driver && dev->driver->pm) {
1123                 info = "noirq driver ";
1124                 callback = pm_noirq_op(dev->driver->pm, state);
1125         }
1126
1127         error = dpm_run_callback(callback, dev, state, info);
1128         if (!error)
1129                 dev->power.is_noirq_suspended = true;
1130         else
1131                 async_error = error;
1132
1133 Complete:
1134         complete_all(&dev->power.completion);
1135         TRACE_SUSPEND(error);
1136         return error;
1137 }
1138
1139 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1140 {
1141         struct device *dev = (struct device *)data;
1142         int error;
1143
1144         error = __device_suspend_noirq(dev, pm_transition, true);
1145         if (error) {
1146                 dpm_save_failed_dev(dev_name(dev));
1147                 pm_dev_err(dev, pm_transition, " async", error);
1148         }
1149
1150         put_device(dev);
1151 }
1152
1153 static int device_suspend_noirq(struct device *dev)
1154 {
1155         reinit_completion(&dev->power.completion);
1156
1157         if (is_async(dev)) {
1158                 get_device(dev);
1159                 async_schedule(async_suspend_noirq, dev);
1160                 return 0;
1161         }
1162         return __device_suspend_noirq(dev, pm_transition, false);
1163 }
1164
1165 void dpm_noirq_begin(void)
1166 {
1167         cpuidle_pause();
1168         device_wakeup_arm_wake_irqs();
1169         suspend_device_irqs();
1170 }
1171
1172 int dpm_noirq_suspend_devices(pm_message_t state)
1173 {
1174         ktime_t starttime = ktime_get();
1175         int error = 0;
1176
1177         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1178         mutex_lock(&dpm_list_mtx);
1179         pm_transition = state;
1180         async_error = 0;
1181
1182         while (!list_empty(&dpm_late_early_list)) {
1183                 struct device *dev = to_device(dpm_late_early_list.prev);
1184
1185                 get_device(dev);
1186                 mutex_unlock(&dpm_list_mtx);
1187
1188                 error = device_suspend_noirq(dev);
1189
1190                 mutex_lock(&dpm_list_mtx);
1191                 if (error) {
1192                         pm_dev_err(dev, state, " noirq", error);
1193                         dpm_save_failed_dev(dev_name(dev));
1194                         put_device(dev);
1195                         break;
1196                 }
1197                 if (!list_empty(&dev->power.entry))
1198                         list_move(&dev->power.entry, &dpm_noirq_list);
1199                 put_device(dev);
1200
1201                 if (async_error)
1202                         break;
1203         }
1204         mutex_unlock(&dpm_list_mtx);
1205         async_synchronize_full();
1206         if (!error)
1207                 error = async_error;
1208
1209         if (error) {
1210                 suspend_stats.failed_suspend_noirq++;
1211                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1212         }
1213         dpm_show_time(starttime, state, error, "noirq");
1214         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1215         return error;
1216 }
1217
1218 /**
1219  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1220  * @state: PM transition of the system being carried out.
1221  *
1222  * Prevent device drivers' interrupt handlers from being called and invoke
1223  * "noirq" suspend callbacks for all non-sysdev devices.
1224  */
1225 int dpm_suspend_noirq(pm_message_t state)
1226 {
1227         int ret;
1228
1229         dpm_noirq_begin();
1230         ret = dpm_noirq_suspend_devices(state);
1231         if (ret)
1232                 dpm_resume_noirq(resume_event(state));
1233
1234         return ret;
1235 }
1236
1237 /**
1238  * __device_suspend_late - Execute a "late suspend" callback for given device.
1239  * @dev: Device to handle.
1240  * @state: PM transition of the system being carried out.
1241  * @async: If true, the device is being suspended asynchronously.
1242  *
1243  * Runtime PM is disabled for @dev while this function is being executed.
1244  */
1245 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1246 {
1247         pm_callback_t callback = NULL;
1248         const char *info = NULL;
1249         int error = 0;
1250
1251         TRACE_DEVICE(dev);
1252         TRACE_SUSPEND(0);
1253
1254         __pm_runtime_disable(dev, false);
1255
1256         dpm_wait_for_subordinate(dev, async);
1257
1258         if (async_error)
1259                 goto Complete;
1260
1261         if (pm_wakeup_pending()) {
1262                 async_error = -EBUSY;
1263                 goto Complete;
1264         }
1265
1266         if (dev->power.syscore || dev->power.direct_complete)
1267                 goto Complete;
1268
1269         if (dev->pm_domain) {
1270                 info = "late power domain ";
1271                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1272         } else if (dev->type && dev->type->pm) {
1273                 info = "late type ";
1274                 callback = pm_late_early_op(dev->type->pm, state);
1275         } else if (dev->class && dev->class->pm) {
1276                 info = "late class ";
1277                 callback = pm_late_early_op(dev->class->pm, state);
1278         } else if (dev->bus && dev->bus->pm) {
1279                 info = "late bus ";
1280                 callback = pm_late_early_op(dev->bus->pm, state);
1281         }
1282
1283         if (!callback && dev->driver && dev->driver->pm) {
1284                 info = "late driver ";
1285                 callback = pm_late_early_op(dev->driver->pm, state);
1286         }
1287
1288         error = dpm_run_callback(callback, dev, state, info);
1289         if (!error)
1290                 dev->power.is_late_suspended = true;
1291         else
1292                 async_error = error;
1293
1294 Complete:
1295         TRACE_SUSPEND(error);
1296         complete_all(&dev->power.completion);
1297         return error;
1298 }
1299
1300 static void async_suspend_late(void *data, async_cookie_t cookie)
1301 {
1302         struct device *dev = (struct device *)data;
1303         int error;
1304
1305         error = __device_suspend_late(dev, pm_transition, true);
1306         if (error) {
1307                 dpm_save_failed_dev(dev_name(dev));
1308                 pm_dev_err(dev, pm_transition, " async", error);
1309         }
1310         put_device(dev);
1311 }
1312
1313 static int device_suspend_late(struct device *dev)
1314 {
1315         reinit_completion(&dev->power.completion);
1316
1317         if (is_async(dev)) {
1318                 get_device(dev);
1319                 async_schedule(async_suspend_late, dev);
1320                 return 0;
1321         }
1322
1323         return __device_suspend_late(dev, pm_transition, false);
1324 }
1325
1326 /**
1327  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1328  * @state: PM transition of the system being carried out.
1329  */
1330 int dpm_suspend_late(pm_message_t state)
1331 {
1332         ktime_t starttime = ktime_get();
1333         int error = 0;
1334
1335         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1336         mutex_lock(&dpm_list_mtx);
1337         pm_transition = state;
1338         async_error = 0;
1339
1340         while (!list_empty(&dpm_suspended_list)) {
1341                 struct device *dev = to_device(dpm_suspended_list.prev);
1342
1343                 get_device(dev);
1344                 mutex_unlock(&dpm_list_mtx);
1345
1346                 error = device_suspend_late(dev);
1347
1348                 mutex_lock(&dpm_list_mtx);
1349                 if (!list_empty(&dev->power.entry))
1350                         list_move(&dev->power.entry, &dpm_late_early_list);
1351
1352                 if (error) {
1353                         pm_dev_err(dev, state, " late", error);
1354                         dpm_save_failed_dev(dev_name(dev));
1355                         put_device(dev);
1356                         break;
1357                 }
1358                 put_device(dev);
1359
1360                 if (async_error)
1361                         break;
1362         }
1363         mutex_unlock(&dpm_list_mtx);
1364         async_synchronize_full();
1365         if (!error)
1366                 error = async_error;
1367         if (error) {
1368                 suspend_stats.failed_suspend_late++;
1369                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1370                 dpm_resume_early(resume_event(state));
1371         }
1372         dpm_show_time(starttime, state, error, "late");
1373         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1374         return error;
1375 }
1376
1377 /**
1378  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1379  * @state: PM transition of the system being carried out.
1380  */
1381 int dpm_suspend_end(pm_message_t state)
1382 {
1383         int error = dpm_suspend_late(state);
1384         if (error)
1385                 return error;
1386
1387         error = dpm_suspend_noirq(state);
1388         if (error) {
1389                 dpm_resume_early(resume_event(state));
1390                 return error;
1391         }
1392
1393         return 0;
1394 }
1395 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1396
1397 /**
1398  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1399  * @dev: Device to suspend.
1400  * @state: PM transition of the system being carried out.
1401  * @cb: Suspend callback to execute.
1402  * @info: string description of caller.
1403  */
1404 static int legacy_suspend(struct device *dev, pm_message_t state,
1405                           int (*cb)(struct device *dev, pm_message_t state),
1406                           const char *info)
1407 {
1408         int error;
1409         ktime_t calltime;
1410
1411         calltime = initcall_debug_start(dev);
1412
1413         trace_device_pm_callback_start(dev, info, state.event);
1414         error = cb(dev, state);
1415         trace_device_pm_callback_end(dev, error);
1416         suspend_report_result(cb, error);
1417
1418         initcall_debug_report(dev, calltime, error, state, info);
1419
1420         return error;
1421 }
1422
1423 static void dpm_clear_suppliers_direct_complete(struct device *dev)
1424 {
1425         struct device_link *link;
1426         int idx;
1427
1428         idx = device_links_read_lock();
1429
1430         list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1431                 spin_lock_irq(&link->supplier->power.lock);
1432                 link->supplier->power.direct_complete = false;
1433                 spin_unlock_irq(&link->supplier->power.lock);
1434         }
1435
1436         device_links_read_unlock(idx);
1437 }
1438
1439 /**
1440  * __device_suspend - Execute "suspend" callbacks for given device.
1441  * @dev: Device to handle.
1442  * @state: PM transition of the system being carried out.
1443  * @async: If true, the device is being suspended asynchronously.
1444  */
1445 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1446 {
1447         pm_callback_t callback = NULL;
1448         const char *info = NULL;
1449         int error = 0;
1450         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1451
1452         TRACE_DEVICE(dev);
1453         TRACE_SUSPEND(0);
1454
1455         dpm_wait_for_subordinate(dev, async);
1456
1457         if (async_error)
1458                 goto Complete;
1459
1460         /*
1461          * If a device configured to wake up the system from sleep states
1462          * has been suspended at run time and there's a resume request pending
1463          * for it, this is equivalent to the device signaling wakeup, so the
1464          * system suspend operation should be aborted.
1465          */
1466         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1467                 pm_wakeup_event(dev, 0);
1468
1469         if (pm_wakeup_pending()) {
1470                 async_error = -EBUSY;
1471                 goto Complete;
1472         }
1473
1474         if (dev->power.syscore)
1475                 goto Complete;
1476
1477         if (dev->power.direct_complete) {
1478                 if (pm_runtime_status_suspended(dev)) {
1479                         pm_runtime_disable(dev);
1480                         if (pm_runtime_status_suspended(dev))
1481                                 goto Complete;
1482
1483                         pm_runtime_enable(dev);
1484                 }
1485                 dev->power.direct_complete = false;
1486         }
1487
1488         dpm_watchdog_set(&wd, dev);
1489         device_lock(dev);
1490
1491         if (dev->pm_domain) {
1492                 info = "power domain ";
1493                 callback = pm_op(&dev->pm_domain->ops, state);
1494                 goto Run;
1495         }
1496
1497         if (dev->type && dev->type->pm) {
1498                 info = "type ";
1499                 callback = pm_op(dev->type->pm, state);
1500                 goto Run;
1501         }
1502
1503         if (dev->class && dev->class->pm) {
1504                 info = "class ";
1505                 callback = pm_op(dev->class->pm, state);
1506                 goto Run;
1507         }
1508
1509         if (dev->bus) {
1510                 if (dev->bus->pm) {
1511                         info = "bus ";
1512                         callback = pm_op(dev->bus->pm, state);
1513                 } else if (dev->bus->suspend) {
1514                         pm_dev_dbg(dev, state, "legacy bus ");
1515                         error = legacy_suspend(dev, state, dev->bus->suspend,
1516                                                 "legacy bus ");
1517                         goto End;
1518                 }
1519         }
1520
1521  Run:
1522         if (!callback && dev->driver && dev->driver->pm) {
1523                 info = "driver ";
1524                 callback = pm_op(dev->driver->pm, state);
1525         }
1526
1527         error = dpm_run_callback(callback, dev, state, info);
1528
1529  End:
1530         if (!error) {
1531                 struct device *parent = dev->parent;
1532
1533                 dev->power.is_suspended = true;
1534                 if (parent) {
1535                         spin_lock_irq(&parent->power.lock);
1536
1537                         dev->parent->power.direct_complete = false;
1538                         if (dev->power.wakeup_path
1539                             && !dev->parent->power.ignore_children)
1540                                 dev->parent->power.wakeup_path = true;
1541
1542                         spin_unlock_irq(&parent->power.lock);
1543                 }
1544                 dpm_clear_suppliers_direct_complete(dev);
1545         }
1546
1547         device_unlock(dev);
1548         dpm_watchdog_clear(&wd);
1549
1550  Complete:
1551         if (error)
1552                 async_error = error;
1553
1554         complete_all(&dev->power.completion);
1555         TRACE_SUSPEND(error);
1556         return error;
1557 }
1558
1559 static void async_suspend(void *data, async_cookie_t cookie)
1560 {
1561         struct device *dev = (struct device *)data;
1562         int error;
1563
1564         error = __device_suspend(dev, pm_transition, true);
1565         if (error) {
1566                 dpm_save_failed_dev(dev_name(dev));
1567                 pm_dev_err(dev, pm_transition, " async", error);
1568         }
1569
1570         put_device(dev);
1571 }
1572
1573 static int device_suspend(struct device *dev)
1574 {
1575         reinit_completion(&dev->power.completion);
1576
1577         if (is_async(dev)) {
1578                 get_device(dev);
1579                 async_schedule(async_suspend, dev);
1580                 return 0;
1581         }
1582
1583         return __device_suspend(dev, pm_transition, false);
1584 }
1585
1586 /**
1587  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1588  * @state: PM transition of the system being carried out.
1589  */
1590 int dpm_suspend(pm_message_t state)
1591 {
1592         ktime_t starttime = ktime_get();
1593         int error = 0;
1594
1595         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1596         might_sleep();
1597
1598         cpufreq_suspend();
1599
1600         mutex_lock(&dpm_list_mtx);
1601         pm_transition = state;
1602         async_error = 0;
1603         while (!list_empty(&dpm_prepared_list)) {
1604                 struct device *dev = to_device(dpm_prepared_list.prev);
1605
1606                 get_device(dev);
1607                 mutex_unlock(&dpm_list_mtx);
1608
1609                 error = device_suspend(dev);
1610
1611                 mutex_lock(&dpm_list_mtx);
1612                 if (error) {
1613                         pm_dev_err(dev, state, "", error);
1614                         dpm_save_failed_dev(dev_name(dev));
1615                         put_device(dev);
1616                         break;
1617                 }
1618                 if (!list_empty(&dev->power.entry))
1619                         list_move(&dev->power.entry, &dpm_suspended_list);
1620                 put_device(dev);
1621                 if (async_error)
1622                         break;
1623         }
1624         mutex_unlock(&dpm_list_mtx);
1625         async_synchronize_full();
1626         if (!error)
1627                 error = async_error;
1628         if (error) {
1629                 suspend_stats.failed_suspend++;
1630                 dpm_save_failed_step(SUSPEND_SUSPEND);
1631         }
1632         dpm_show_time(starttime, state, error, NULL);
1633         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1634         return error;
1635 }
1636
1637 /**
1638  * device_prepare - Prepare a device for system power transition.
1639  * @dev: Device to handle.
1640  * @state: PM transition of the system being carried out.
1641  *
1642  * Execute the ->prepare() callback(s) for given device.  No new children of the
1643  * device may be registered after this function has returned.
1644  */
1645 static int device_prepare(struct device *dev, pm_message_t state)
1646 {
1647         int (*callback)(struct device *) = NULL;
1648         int ret = 0;
1649
1650         if (dev->power.syscore)
1651                 return 0;
1652
1653         WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
1654                 !pm_runtime_enabled(dev));
1655
1656         /*
1657          * If a device's parent goes into runtime suspend at the wrong time,
1658          * it won't be possible to resume the device.  To prevent this we
1659          * block runtime suspend here, during the prepare phase, and allow
1660          * it again during the complete phase.
1661          */
1662         pm_runtime_get_noresume(dev);
1663
1664         device_lock(dev);
1665
1666         dev->power.wakeup_path = device_may_wakeup(dev);
1667
1668         if (dev->power.no_pm_callbacks) {
1669                 ret = 1;        /* Let device go direct_complete */
1670                 goto unlock;
1671         }
1672
1673         if (dev->pm_domain)
1674                 callback = dev->pm_domain->ops.prepare;
1675         else if (dev->type && dev->type->pm)
1676                 callback = dev->type->pm->prepare;
1677         else if (dev->class && dev->class->pm)
1678                 callback = dev->class->pm->prepare;
1679         else if (dev->bus && dev->bus->pm)
1680                 callback = dev->bus->pm->prepare;
1681
1682         if (!callback && dev->driver && dev->driver->pm)
1683                 callback = dev->driver->pm->prepare;
1684
1685         if (callback)
1686                 ret = callback(dev);
1687
1688 unlock:
1689         device_unlock(dev);
1690
1691         if (ret < 0) {
1692                 suspend_report_result(callback, ret);
1693                 pm_runtime_put(dev);
1694                 return ret;
1695         }
1696         /*
1697          * A positive return value from ->prepare() means "this device appears
1698          * to be runtime-suspended and its state is fine, so if it really is
1699          * runtime-suspended, you can leave it in that state provided that you
1700          * will do the same thing with all of its descendants".  This only
1701          * applies to suspend transitions, however.
1702          */
1703         spin_lock_irq(&dev->power.lock);
1704         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1705                 pm_runtime_suspended(dev) && ret > 0 &&
1706                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1707         spin_unlock_irq(&dev->power.lock);
1708         return 0;
1709 }
1710
1711 /**
1712  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1713  * @state: PM transition of the system being carried out.
1714  *
1715  * Execute the ->prepare() callback(s) for all devices.
1716  */
1717 int dpm_prepare(pm_message_t state)
1718 {
1719         int error = 0;
1720
1721         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1722         might_sleep();
1723
1724         /*
1725          * Give a chance for the known devices to complete their probes, before
1726          * disable probing of devices. This sync point is important at least
1727          * at boot time + hibernation restore.
1728          */
1729         wait_for_device_probe();
1730         /*
1731          * It is unsafe if probing of devices will happen during suspend or
1732          * hibernation and system behavior will be unpredictable in this case.
1733          * So, let's prohibit device's probing here and defer their probes
1734          * instead. The normal behavior will be restored in dpm_complete().
1735          */
1736         device_block_probing();
1737
1738         mutex_lock(&dpm_list_mtx);
1739         while (!list_empty(&dpm_list)) {
1740                 struct device *dev = to_device(dpm_list.next);
1741
1742                 get_device(dev);
1743                 mutex_unlock(&dpm_list_mtx);
1744
1745                 trace_device_pm_callback_start(dev, "", state.event);
1746                 error = device_prepare(dev, state);
1747                 trace_device_pm_callback_end(dev, error);
1748
1749                 mutex_lock(&dpm_list_mtx);
1750                 if (error) {
1751                         if (error == -EAGAIN) {
1752                                 put_device(dev);
1753                                 error = 0;
1754                                 continue;
1755                         }
1756                         printk(KERN_INFO "PM: Device %s not prepared "
1757                                 "for power transition: code %d\n",
1758                                 dev_name(dev), error);
1759                         put_device(dev);
1760                         break;
1761                 }
1762                 dev->power.is_prepared = true;
1763                 if (!list_empty(&dev->power.entry))
1764                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1765                 put_device(dev);
1766         }
1767         mutex_unlock(&dpm_list_mtx);
1768         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1769         return error;
1770 }
1771
1772 /**
1773  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1774  * @state: PM transition of the system being carried out.
1775  *
1776  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1777  * callbacks for them.
1778  */
1779 int dpm_suspend_start(pm_message_t state)
1780 {
1781         int error;
1782
1783         error = dpm_prepare(state);
1784         if (error) {
1785                 suspend_stats.failed_prepare++;
1786                 dpm_save_failed_step(SUSPEND_PREPARE);
1787         } else
1788                 error = dpm_suspend(state);
1789         return error;
1790 }
1791 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1792
1793 void __suspend_report_result(const char *function, void *fn, int ret)
1794 {
1795         if (ret)
1796                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1797 }
1798 EXPORT_SYMBOL_GPL(__suspend_report_result);
1799
1800 /**
1801  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1802  * @dev: Device to wait for.
1803  * @subordinate: Device that needs to wait for @dev.
1804  */
1805 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1806 {
1807         dpm_wait(dev, subordinate->power.async_suspend);
1808         return async_error;
1809 }
1810 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1811
1812 /**
1813  * dpm_for_each_dev - device iterator.
1814  * @data: data for the callback.
1815  * @fn: function to be called for each device.
1816  *
1817  * Iterate over devices in dpm_list, and call @fn for each device,
1818  * passing it @data.
1819  */
1820 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1821 {
1822         struct device *dev;
1823
1824         if (!fn)
1825                 return;
1826
1827         device_pm_lock();
1828         list_for_each_entry(dev, &dpm_list, power.entry)
1829                 fn(dev, data);
1830         device_pm_unlock();
1831 }
1832 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1833
1834 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1835 {
1836         if (!ops)
1837                 return true;
1838
1839         return !ops->prepare &&
1840                !ops->suspend &&
1841                !ops->suspend_late &&
1842                !ops->suspend_noirq &&
1843                !ops->resume_noirq &&
1844                !ops->resume_early &&
1845                !ops->resume &&
1846                !ops->complete;
1847 }
1848
1849 void device_pm_check_callbacks(struct device *dev)
1850 {
1851         spin_lock_irq(&dev->power.lock);
1852         dev->power.no_pm_callbacks =
1853                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1854                  !dev->bus->suspend && !dev->bus->resume)) &&
1855                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1856                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1857                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1858                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1859                  !dev->driver->suspend && !dev->driver->resume));
1860         spin_unlock_irq(&dev->power.lock);
1861 }
1862
1863 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
1864 {
1865         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
1866                 pm_runtime_status_suspended(dev);
1867 }