Merge tag 'tag-chrome-platform-for-v5.10' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-2.6-microblaze.git] / drivers / base / power / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17
18 #define pr_fmt(fmt) "PM: " fmt
19
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44         list_for_each_entry_rcu(pos, head, member, \
45                         device_links_read_lock_held())
46
47 /*
48  * The entries in the dpm_list list are in a depth first order, simply
49  * because children are guaranteed to be discovered after parents, and
50  * are inserted at the back of the list on discovery.
51  *
52  * Since device_pm_add() may be called with a device lock held,
53  * we must never try to acquire a device lock while holding
54  * dpm_list_mutex.
55  */
56
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62
63 struct suspend_stats suspend_stats;
64 static DEFINE_MUTEX(dpm_list_mtx);
65 static pm_message_t pm_transition;
66
67 static int async_error;
68
69 static const char *pm_verb(int event)
70 {
71         switch (event) {
72         case PM_EVENT_SUSPEND:
73                 return "suspend";
74         case PM_EVENT_RESUME:
75                 return "resume";
76         case PM_EVENT_FREEZE:
77                 return "freeze";
78         case PM_EVENT_QUIESCE:
79                 return "quiesce";
80         case PM_EVENT_HIBERNATE:
81                 return "hibernate";
82         case PM_EVENT_THAW:
83                 return "thaw";
84         case PM_EVENT_RESTORE:
85                 return "restore";
86         case PM_EVENT_RECOVER:
87                 return "recover";
88         default:
89                 return "(unknown PM event)";
90         }
91 }
92
93 /**
94  * device_pm_sleep_init - Initialize system suspend-related device fields.
95  * @dev: Device object being initialized.
96  */
97 void device_pm_sleep_init(struct device *dev)
98 {
99         dev->power.is_prepared = false;
100         dev->power.is_suspended = false;
101         dev->power.is_noirq_suspended = false;
102         dev->power.is_late_suspended = false;
103         init_completion(&dev->power.completion);
104         complete_all(&dev->power.completion);
105         dev->power.wakeup = NULL;
106         INIT_LIST_HEAD(&dev->power.entry);
107 }
108
109 /**
110  * device_pm_lock - Lock the list of active devices used by the PM core.
111  */
112 void device_pm_lock(void)
113 {
114         mutex_lock(&dpm_list_mtx);
115 }
116
117 /**
118  * device_pm_unlock - Unlock the list of active devices used by the PM core.
119  */
120 void device_pm_unlock(void)
121 {
122         mutex_unlock(&dpm_list_mtx);
123 }
124
125 /**
126  * device_pm_add - Add a device to the PM core's list of active devices.
127  * @dev: Device to add to the list.
128  */
129 void device_pm_add(struct device *dev)
130 {
131         /* Skip PM setup/initialization. */
132         if (device_pm_not_required(dev))
133                 return;
134
135         pr_debug("Adding info for %s:%s\n",
136                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137         device_pm_check_callbacks(dev);
138         mutex_lock(&dpm_list_mtx);
139         if (dev->parent && dev->parent->power.is_prepared)
140                 dev_warn(dev, "parent %s should not be sleeping\n",
141                         dev_name(dev->parent));
142         list_add_tail(&dev->power.entry, &dpm_list);
143         dev->power.in_dpm_list = true;
144         mutex_unlock(&dpm_list_mtx);
145 }
146
147 /**
148  * device_pm_remove - Remove a device from the PM core's list of active devices.
149  * @dev: Device to be removed from the list.
150  */
151 void device_pm_remove(struct device *dev)
152 {
153         if (device_pm_not_required(dev))
154                 return;
155
156         pr_debug("Removing info for %s:%s\n",
157                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158         complete_all(&dev->power.completion);
159         mutex_lock(&dpm_list_mtx);
160         list_del_init(&dev->power.entry);
161         dev->power.in_dpm_list = false;
162         mutex_unlock(&dpm_list_mtx);
163         device_wakeup_disable(dev);
164         pm_runtime_remove(dev);
165         device_pm_check_callbacks(dev);
166 }
167
168 /**
169  * device_pm_move_before - Move device in the PM core's list of active devices.
170  * @deva: Device to move in dpm_list.
171  * @devb: Device @deva should come before.
172  */
173 void device_pm_move_before(struct device *deva, struct device *devb)
174 {
175         pr_debug("Moving %s:%s before %s:%s\n",
176                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178         /* Delete deva from dpm_list and reinsert before devb. */
179         list_move_tail(&deva->power.entry, &devb->power.entry);
180 }
181
182 /**
183  * device_pm_move_after - Move device in the PM core's list of active devices.
184  * @deva: Device to move in dpm_list.
185  * @devb: Device @deva should come after.
186  */
187 void device_pm_move_after(struct device *deva, struct device *devb)
188 {
189         pr_debug("Moving %s:%s after %s:%s\n",
190                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192         /* Delete deva from dpm_list and reinsert after devb. */
193         list_move(&deva->power.entry, &devb->power.entry);
194 }
195
196 /**
197  * device_pm_move_last - Move device to end of the PM core's list of devices.
198  * @dev: Device to move in dpm_list.
199  */
200 void device_pm_move_last(struct device *dev)
201 {
202         pr_debug("Moving %s:%s to end of list\n",
203                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204         list_move_tail(&dev->power.entry, &dpm_list);
205 }
206
207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
208 {
209         if (!pm_print_times_enabled)
210                 return 0;
211
212         dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213                  task_pid_nr(current),
214                  dev->parent ? dev_name(dev->parent) : "none");
215         return ktime_get();
216 }
217
218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
219                                   void *cb, int error)
220 {
221         ktime_t rettime;
222         s64 nsecs;
223
224         if (!pm_print_times_enabled)
225                 return;
226
227         rettime = ktime_get();
228         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
229
230         dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
231                  (unsigned long long)nsecs >> 10);
232 }
233
234 /**
235  * dpm_wait - Wait for a PM operation to complete.
236  * @dev: Device to wait for.
237  * @async: If unset, wait only if the device's power.async_suspend flag is set.
238  */
239 static void dpm_wait(struct device *dev, bool async)
240 {
241         if (!dev)
242                 return;
243
244         if (async || (pm_async_enabled && dev->power.async_suspend))
245                 wait_for_completion(&dev->power.completion);
246 }
247
248 static int dpm_wait_fn(struct device *dev, void *async_ptr)
249 {
250         dpm_wait(dev, *((bool *)async_ptr));
251         return 0;
252 }
253
254 static void dpm_wait_for_children(struct device *dev, bool async)
255 {
256        device_for_each_child(dev, &async, dpm_wait_fn);
257 }
258
259 static void dpm_wait_for_suppliers(struct device *dev, bool async)
260 {
261         struct device_link *link;
262         int idx;
263
264         idx = device_links_read_lock();
265
266         /*
267          * If the supplier goes away right after we've checked the link to it,
268          * we'll wait for its completion to change the state, but that's fine,
269          * because the only things that will block as a result are the SRCU
270          * callbacks freeing the link objects for the links in the list we're
271          * walking.
272          */
273         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
274                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
275                         dpm_wait(link->supplier, async);
276
277         device_links_read_unlock(idx);
278 }
279
280 static bool dpm_wait_for_superior(struct device *dev, bool async)
281 {
282         struct device *parent;
283
284         /*
285          * If the device is resumed asynchronously and the parent's callback
286          * deletes both the device and the parent itself, the parent object may
287          * be freed while this function is running, so avoid that by reference
288          * counting the parent once more unless the device has been deleted
289          * already (in which case return right away).
290          */
291         mutex_lock(&dpm_list_mtx);
292
293         if (!device_pm_initialized(dev)) {
294                 mutex_unlock(&dpm_list_mtx);
295                 return false;
296         }
297
298         parent = get_device(dev->parent);
299
300         mutex_unlock(&dpm_list_mtx);
301
302         dpm_wait(parent, async);
303         put_device(parent);
304
305         dpm_wait_for_suppliers(dev, async);
306
307         /*
308          * If the parent's callback has deleted the device, attempting to resume
309          * it would be invalid, so avoid doing that then.
310          */
311         return device_pm_initialized(dev);
312 }
313
314 static void dpm_wait_for_consumers(struct device *dev, bool async)
315 {
316         struct device_link *link;
317         int idx;
318
319         idx = device_links_read_lock();
320
321         /*
322          * The status of a device link can only be changed from "dormant" by a
323          * probe, but that cannot happen during system suspend/resume.  In
324          * theory it can change to "dormant" at that time, but then it is
325          * reasonable to wait for the target device anyway (eg. if it goes
326          * away, it's better to wait for it to go away completely and then
327          * continue instead of trying to continue in parallel with its
328          * unregistration).
329          */
330         list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
331                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
332                         dpm_wait(link->consumer, async);
333
334         device_links_read_unlock(idx);
335 }
336
337 static void dpm_wait_for_subordinate(struct device *dev, bool async)
338 {
339         dpm_wait_for_children(dev, async);
340         dpm_wait_for_consumers(dev, async);
341 }
342
343 /**
344  * pm_op - Return the PM operation appropriate for given PM event.
345  * @ops: PM operations to choose from.
346  * @state: PM transition of the system being carried out.
347  */
348 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
349 {
350         switch (state.event) {
351 #ifdef CONFIG_SUSPEND
352         case PM_EVENT_SUSPEND:
353                 return ops->suspend;
354         case PM_EVENT_RESUME:
355                 return ops->resume;
356 #endif /* CONFIG_SUSPEND */
357 #ifdef CONFIG_HIBERNATE_CALLBACKS
358         case PM_EVENT_FREEZE:
359         case PM_EVENT_QUIESCE:
360                 return ops->freeze;
361         case PM_EVENT_HIBERNATE:
362                 return ops->poweroff;
363         case PM_EVENT_THAW:
364         case PM_EVENT_RECOVER:
365                 return ops->thaw;
366                 break;
367         case PM_EVENT_RESTORE:
368                 return ops->restore;
369 #endif /* CONFIG_HIBERNATE_CALLBACKS */
370         }
371
372         return NULL;
373 }
374
375 /**
376  * pm_late_early_op - Return the PM operation appropriate for given PM event.
377  * @ops: PM operations to choose from.
378  * @state: PM transition of the system being carried out.
379  *
380  * Runtime PM is disabled for @dev while this function is being executed.
381  */
382 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
383                                       pm_message_t state)
384 {
385         switch (state.event) {
386 #ifdef CONFIG_SUSPEND
387         case PM_EVENT_SUSPEND:
388                 return ops->suspend_late;
389         case PM_EVENT_RESUME:
390                 return ops->resume_early;
391 #endif /* CONFIG_SUSPEND */
392 #ifdef CONFIG_HIBERNATE_CALLBACKS
393         case PM_EVENT_FREEZE:
394         case PM_EVENT_QUIESCE:
395                 return ops->freeze_late;
396         case PM_EVENT_HIBERNATE:
397                 return ops->poweroff_late;
398         case PM_EVENT_THAW:
399         case PM_EVENT_RECOVER:
400                 return ops->thaw_early;
401         case PM_EVENT_RESTORE:
402                 return ops->restore_early;
403 #endif /* CONFIG_HIBERNATE_CALLBACKS */
404         }
405
406         return NULL;
407 }
408
409 /**
410  * pm_noirq_op - Return the PM operation appropriate for given PM event.
411  * @ops: PM operations to choose from.
412  * @state: PM transition of the system being carried out.
413  *
414  * The driver of @dev will not receive interrupts while this function is being
415  * executed.
416  */
417 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
418 {
419         switch (state.event) {
420 #ifdef CONFIG_SUSPEND
421         case PM_EVENT_SUSPEND:
422                 return ops->suspend_noirq;
423         case PM_EVENT_RESUME:
424                 return ops->resume_noirq;
425 #endif /* CONFIG_SUSPEND */
426 #ifdef CONFIG_HIBERNATE_CALLBACKS
427         case PM_EVENT_FREEZE:
428         case PM_EVENT_QUIESCE:
429                 return ops->freeze_noirq;
430         case PM_EVENT_HIBERNATE:
431                 return ops->poweroff_noirq;
432         case PM_EVENT_THAW:
433         case PM_EVENT_RECOVER:
434                 return ops->thaw_noirq;
435         case PM_EVENT_RESTORE:
436                 return ops->restore_noirq;
437 #endif /* CONFIG_HIBERNATE_CALLBACKS */
438         }
439
440         return NULL;
441 }
442
443 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
444 {
445         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
446                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
447                 ", may wakeup" : "");
448 }
449
450 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
451                         int error)
452 {
453         pr_err("Device %s failed to %s%s: error %d\n",
454                dev_name(dev), pm_verb(state.event), info, error);
455 }
456
457 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
458                           const char *info)
459 {
460         ktime_t calltime;
461         u64 usecs64;
462         int usecs;
463
464         calltime = ktime_get();
465         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
466         do_div(usecs64, NSEC_PER_USEC);
467         usecs = usecs64;
468         if (usecs == 0)
469                 usecs = 1;
470
471         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
472                   info ?: "", info ? " " : "", pm_verb(state.event),
473                   error ? "aborted" : "complete",
474                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
475 }
476
477 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
478                             pm_message_t state, const char *info)
479 {
480         ktime_t calltime;
481         int error;
482
483         if (!cb)
484                 return 0;
485
486         calltime = initcall_debug_start(dev, cb);
487
488         pm_dev_dbg(dev, state, info);
489         trace_device_pm_callback_start(dev, info, state.event);
490         error = cb(dev);
491         trace_device_pm_callback_end(dev, error);
492         suspend_report_result(cb, error);
493
494         initcall_debug_report(dev, calltime, cb, error);
495
496         return error;
497 }
498
499 #ifdef CONFIG_DPM_WATCHDOG
500 struct dpm_watchdog {
501         struct device           *dev;
502         struct task_struct      *tsk;
503         struct timer_list       timer;
504 };
505
506 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
507         struct dpm_watchdog wd
508
509 /**
510  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
511  * @t: The timer that PM watchdog depends on.
512  *
513  * Called when a driver has timed out suspending or resuming.
514  * There's not much we can do here to recover so panic() to
515  * capture a crash-dump in pstore.
516  */
517 static void dpm_watchdog_handler(struct timer_list *t)
518 {
519         struct dpm_watchdog *wd = from_timer(wd, t, timer);
520
521         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
522         show_stack(wd->tsk, NULL, KERN_EMERG);
523         panic("%s %s: unrecoverable failure\n",
524                 dev_driver_string(wd->dev), dev_name(wd->dev));
525 }
526
527 /**
528  * dpm_watchdog_set - Enable pm watchdog for given device.
529  * @wd: Watchdog. Must be allocated on the stack.
530  * @dev: Device to handle.
531  */
532 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
533 {
534         struct timer_list *timer = &wd->timer;
535
536         wd->dev = dev;
537         wd->tsk = current;
538
539         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
540         /* use same timeout value for both suspend and resume */
541         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
542         add_timer(timer);
543 }
544
545 /**
546  * dpm_watchdog_clear - Disable suspend/resume watchdog.
547  * @wd: Watchdog to disable.
548  */
549 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
550 {
551         struct timer_list *timer = &wd->timer;
552
553         del_timer_sync(timer);
554         destroy_timer_on_stack(timer);
555 }
556 #else
557 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
558 #define dpm_watchdog_set(x, y)
559 #define dpm_watchdog_clear(x)
560 #endif
561
562 /*------------------------- Resume routines -------------------------*/
563
564 /**
565  * dev_pm_skip_resume - System-wide device resume optimization check.
566  * @dev: Target device.
567  *
568  * Return:
569  * - %false if the transition under way is RESTORE.
570  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
571  * - The logical negation of %power.must_resume otherwise (that is, when the
572  *   transition under way is RESUME).
573  */
574 bool dev_pm_skip_resume(struct device *dev)
575 {
576         if (pm_transition.event == PM_EVENT_RESTORE)
577                 return false;
578
579         if (pm_transition.event == PM_EVENT_THAW)
580                 return dev_pm_skip_suspend(dev);
581
582         return !dev->power.must_resume;
583 }
584
585 /**
586  * device_resume_noirq - Execute a "noirq resume" callback for given device.
587  * @dev: Device to handle.
588  * @state: PM transition of the system being carried out.
589  * @async: If true, the device is being resumed asynchronously.
590  *
591  * The driver of @dev will not receive interrupts while this function is being
592  * executed.
593  */
594 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
595 {
596         pm_callback_t callback = NULL;
597         const char *info = NULL;
598         bool skip_resume;
599         int error = 0;
600
601         TRACE_DEVICE(dev);
602         TRACE_RESUME(0);
603
604         if (dev->power.syscore || dev->power.direct_complete)
605                 goto Out;
606
607         if (!dev->power.is_noirq_suspended)
608                 goto Out;
609
610         if (!dpm_wait_for_superior(dev, async))
611                 goto Out;
612
613         skip_resume = dev_pm_skip_resume(dev);
614         /*
615          * If the driver callback is skipped below or by the middle layer
616          * callback and device_resume_early() also skips the driver callback for
617          * this device later, it needs to appear as "suspended" to PM-runtime,
618          * so change its status accordingly.
619          *
620          * Otherwise, the device is going to be resumed, so set its PM-runtime
621          * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
622          * to avoid confusing drivers that don't use it.
623          */
624         if (skip_resume)
625                 pm_runtime_set_suspended(dev);
626         else if (dev_pm_skip_suspend(dev))
627                 pm_runtime_set_active(dev);
628
629         if (dev->pm_domain) {
630                 info = "noirq power domain ";
631                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
632         } else if (dev->type && dev->type->pm) {
633                 info = "noirq type ";
634                 callback = pm_noirq_op(dev->type->pm, state);
635         } else if (dev->class && dev->class->pm) {
636                 info = "noirq class ";
637                 callback = pm_noirq_op(dev->class->pm, state);
638         } else if (dev->bus && dev->bus->pm) {
639                 info = "noirq bus ";
640                 callback = pm_noirq_op(dev->bus->pm, state);
641         }
642         if (callback)
643                 goto Run;
644
645         if (skip_resume)
646                 goto Skip;
647
648         if (dev->driver && dev->driver->pm) {
649                 info = "noirq driver ";
650                 callback = pm_noirq_op(dev->driver->pm, state);
651         }
652
653 Run:
654         error = dpm_run_callback(callback, dev, state, info);
655
656 Skip:
657         dev->power.is_noirq_suspended = false;
658
659 Out:
660         complete_all(&dev->power.completion);
661         TRACE_RESUME(error);
662         return error;
663 }
664
665 static bool is_async(struct device *dev)
666 {
667         return dev->power.async_suspend && pm_async_enabled
668                 && !pm_trace_is_enabled();
669 }
670
671 static bool dpm_async_fn(struct device *dev, async_func_t func)
672 {
673         reinit_completion(&dev->power.completion);
674
675         if (is_async(dev)) {
676                 get_device(dev);
677                 async_schedule_dev(func, dev);
678                 return true;
679         }
680
681         return false;
682 }
683
684 static void async_resume_noirq(void *data, async_cookie_t cookie)
685 {
686         struct device *dev = (struct device *)data;
687         int error;
688
689         error = device_resume_noirq(dev, pm_transition, true);
690         if (error)
691                 pm_dev_err(dev, pm_transition, " async", error);
692
693         put_device(dev);
694 }
695
696 static void dpm_noirq_resume_devices(pm_message_t state)
697 {
698         struct device *dev;
699         ktime_t starttime = ktime_get();
700
701         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
702         mutex_lock(&dpm_list_mtx);
703         pm_transition = state;
704
705         /*
706          * Advanced the async threads upfront,
707          * in case the starting of async threads is
708          * delayed by non-async resuming devices.
709          */
710         list_for_each_entry(dev, &dpm_noirq_list, power.entry)
711                 dpm_async_fn(dev, async_resume_noirq);
712
713         while (!list_empty(&dpm_noirq_list)) {
714                 dev = to_device(dpm_noirq_list.next);
715                 get_device(dev);
716                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
717                 mutex_unlock(&dpm_list_mtx);
718
719                 if (!is_async(dev)) {
720                         int error;
721
722                         error = device_resume_noirq(dev, state, false);
723                         if (error) {
724                                 suspend_stats.failed_resume_noirq++;
725                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
726                                 dpm_save_failed_dev(dev_name(dev));
727                                 pm_dev_err(dev, state, " noirq", error);
728                         }
729                 }
730
731                 mutex_lock(&dpm_list_mtx);
732                 put_device(dev);
733         }
734         mutex_unlock(&dpm_list_mtx);
735         async_synchronize_full();
736         dpm_show_time(starttime, state, 0, "noirq");
737         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
738 }
739
740 /**
741  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
742  * @state: PM transition of the system being carried out.
743  *
744  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
745  * allow device drivers' interrupt handlers to be called.
746  */
747 void dpm_resume_noirq(pm_message_t state)
748 {
749         dpm_noirq_resume_devices(state);
750
751         resume_device_irqs();
752         device_wakeup_disarm_wake_irqs();
753
754         cpuidle_resume();
755 }
756
757 /**
758  * device_resume_early - Execute an "early resume" callback for given device.
759  * @dev: Device to handle.
760  * @state: PM transition of the system being carried out.
761  * @async: If true, the device is being resumed asynchronously.
762  *
763  * Runtime PM is disabled for @dev while this function is being executed.
764  */
765 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
766 {
767         pm_callback_t callback = NULL;
768         const char *info = NULL;
769         int error = 0;
770
771         TRACE_DEVICE(dev);
772         TRACE_RESUME(0);
773
774         if (dev->power.syscore || dev->power.direct_complete)
775                 goto Out;
776
777         if (!dev->power.is_late_suspended)
778                 goto Out;
779
780         if (!dpm_wait_for_superior(dev, async))
781                 goto Out;
782
783         if (dev->pm_domain) {
784                 info = "early power domain ";
785                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
786         } else if (dev->type && dev->type->pm) {
787                 info = "early type ";
788                 callback = pm_late_early_op(dev->type->pm, state);
789         } else if (dev->class && dev->class->pm) {
790                 info = "early class ";
791                 callback = pm_late_early_op(dev->class->pm, state);
792         } else if (dev->bus && dev->bus->pm) {
793                 info = "early bus ";
794                 callback = pm_late_early_op(dev->bus->pm, state);
795         }
796         if (callback)
797                 goto Run;
798
799         if (dev_pm_skip_resume(dev))
800                 goto Skip;
801
802         if (dev->driver && dev->driver->pm) {
803                 info = "early driver ";
804                 callback = pm_late_early_op(dev->driver->pm, state);
805         }
806
807 Run:
808         error = dpm_run_callback(callback, dev, state, info);
809
810 Skip:
811         dev->power.is_late_suspended = false;
812
813 Out:
814         TRACE_RESUME(error);
815
816         pm_runtime_enable(dev);
817         complete_all(&dev->power.completion);
818         return error;
819 }
820
821 static void async_resume_early(void *data, async_cookie_t cookie)
822 {
823         struct device *dev = (struct device *)data;
824         int error;
825
826         error = device_resume_early(dev, pm_transition, true);
827         if (error)
828                 pm_dev_err(dev, pm_transition, " async", error);
829
830         put_device(dev);
831 }
832
833 /**
834  * dpm_resume_early - Execute "early resume" callbacks for all devices.
835  * @state: PM transition of the system being carried out.
836  */
837 void dpm_resume_early(pm_message_t state)
838 {
839         struct device *dev;
840         ktime_t starttime = ktime_get();
841
842         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
843         mutex_lock(&dpm_list_mtx);
844         pm_transition = state;
845
846         /*
847          * Advanced the async threads upfront,
848          * in case the starting of async threads is
849          * delayed by non-async resuming devices.
850          */
851         list_for_each_entry(dev, &dpm_late_early_list, power.entry)
852                 dpm_async_fn(dev, async_resume_early);
853
854         while (!list_empty(&dpm_late_early_list)) {
855                 dev = to_device(dpm_late_early_list.next);
856                 get_device(dev);
857                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
858                 mutex_unlock(&dpm_list_mtx);
859
860                 if (!is_async(dev)) {
861                         int error;
862
863                         error = device_resume_early(dev, state, false);
864                         if (error) {
865                                 suspend_stats.failed_resume_early++;
866                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
867                                 dpm_save_failed_dev(dev_name(dev));
868                                 pm_dev_err(dev, state, " early", error);
869                         }
870                 }
871                 mutex_lock(&dpm_list_mtx);
872                 put_device(dev);
873         }
874         mutex_unlock(&dpm_list_mtx);
875         async_synchronize_full();
876         dpm_show_time(starttime, state, 0, "early");
877         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
878 }
879
880 /**
881  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
882  * @state: PM transition of the system being carried out.
883  */
884 void dpm_resume_start(pm_message_t state)
885 {
886         dpm_resume_noirq(state);
887         dpm_resume_early(state);
888 }
889 EXPORT_SYMBOL_GPL(dpm_resume_start);
890
891 /**
892  * device_resume - Execute "resume" callbacks for given device.
893  * @dev: Device to handle.
894  * @state: PM transition of the system being carried out.
895  * @async: If true, the device is being resumed asynchronously.
896  */
897 static int device_resume(struct device *dev, pm_message_t state, bool async)
898 {
899         pm_callback_t callback = NULL;
900         const char *info = NULL;
901         int error = 0;
902         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
903
904         TRACE_DEVICE(dev);
905         TRACE_RESUME(0);
906
907         if (dev->power.syscore)
908                 goto Complete;
909
910         if (dev->power.direct_complete) {
911                 /* Match the pm_runtime_disable() in __device_suspend(). */
912                 pm_runtime_enable(dev);
913                 goto Complete;
914         }
915
916         if (!dpm_wait_for_superior(dev, async))
917                 goto Complete;
918
919         dpm_watchdog_set(&wd, dev);
920         device_lock(dev);
921
922         /*
923          * This is a fib.  But we'll allow new children to be added below
924          * a resumed device, even if the device hasn't been completed yet.
925          */
926         dev->power.is_prepared = false;
927
928         if (!dev->power.is_suspended)
929                 goto Unlock;
930
931         if (dev->pm_domain) {
932                 info = "power domain ";
933                 callback = pm_op(&dev->pm_domain->ops, state);
934                 goto Driver;
935         }
936
937         if (dev->type && dev->type->pm) {
938                 info = "type ";
939                 callback = pm_op(dev->type->pm, state);
940                 goto Driver;
941         }
942
943         if (dev->class && dev->class->pm) {
944                 info = "class ";
945                 callback = pm_op(dev->class->pm, state);
946                 goto Driver;
947         }
948
949         if (dev->bus) {
950                 if (dev->bus->pm) {
951                         info = "bus ";
952                         callback = pm_op(dev->bus->pm, state);
953                 } else if (dev->bus->resume) {
954                         info = "legacy bus ";
955                         callback = dev->bus->resume;
956                         goto End;
957                 }
958         }
959
960  Driver:
961         if (!callback && dev->driver && dev->driver->pm) {
962                 info = "driver ";
963                 callback = pm_op(dev->driver->pm, state);
964         }
965
966  End:
967         error = dpm_run_callback(callback, dev, state, info);
968         dev->power.is_suspended = false;
969
970  Unlock:
971         device_unlock(dev);
972         dpm_watchdog_clear(&wd);
973
974  Complete:
975         complete_all(&dev->power.completion);
976
977         TRACE_RESUME(error);
978
979         return error;
980 }
981
982 static void async_resume(void *data, async_cookie_t cookie)
983 {
984         struct device *dev = (struct device *)data;
985         int error;
986
987         error = device_resume(dev, pm_transition, true);
988         if (error)
989                 pm_dev_err(dev, pm_transition, " async", error);
990         put_device(dev);
991 }
992
993 /**
994  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
995  * @state: PM transition of the system being carried out.
996  *
997  * Execute the appropriate "resume" callback for all devices whose status
998  * indicates that they are suspended.
999  */
1000 void dpm_resume(pm_message_t state)
1001 {
1002         struct device *dev;
1003         ktime_t starttime = ktime_get();
1004
1005         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1006         might_sleep();
1007
1008         mutex_lock(&dpm_list_mtx);
1009         pm_transition = state;
1010         async_error = 0;
1011
1012         list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1013                 dpm_async_fn(dev, async_resume);
1014
1015         while (!list_empty(&dpm_suspended_list)) {
1016                 dev = to_device(dpm_suspended_list.next);
1017                 get_device(dev);
1018                 if (!is_async(dev)) {
1019                         int error;
1020
1021                         mutex_unlock(&dpm_list_mtx);
1022
1023                         error = device_resume(dev, state, false);
1024                         if (error) {
1025                                 suspend_stats.failed_resume++;
1026                                 dpm_save_failed_step(SUSPEND_RESUME);
1027                                 dpm_save_failed_dev(dev_name(dev));
1028                                 pm_dev_err(dev, state, "", error);
1029                         }
1030
1031                         mutex_lock(&dpm_list_mtx);
1032                 }
1033                 if (!list_empty(&dev->power.entry))
1034                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1035                 put_device(dev);
1036         }
1037         mutex_unlock(&dpm_list_mtx);
1038         async_synchronize_full();
1039         dpm_show_time(starttime, state, 0, NULL);
1040
1041         cpufreq_resume();
1042         devfreq_resume();
1043         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1044 }
1045
1046 /**
1047  * device_complete - Complete a PM transition for given device.
1048  * @dev: Device to handle.
1049  * @state: PM transition of the system being carried out.
1050  */
1051 static void device_complete(struct device *dev, pm_message_t state)
1052 {
1053         void (*callback)(struct device *) = NULL;
1054         const char *info = NULL;
1055
1056         if (dev->power.syscore)
1057                 return;
1058
1059         device_lock(dev);
1060
1061         if (dev->pm_domain) {
1062                 info = "completing power domain ";
1063                 callback = dev->pm_domain->ops.complete;
1064         } else if (dev->type && dev->type->pm) {
1065                 info = "completing type ";
1066                 callback = dev->type->pm->complete;
1067         } else if (dev->class && dev->class->pm) {
1068                 info = "completing class ";
1069                 callback = dev->class->pm->complete;
1070         } else if (dev->bus && dev->bus->pm) {
1071                 info = "completing bus ";
1072                 callback = dev->bus->pm->complete;
1073         }
1074
1075         if (!callback && dev->driver && dev->driver->pm) {
1076                 info = "completing driver ";
1077                 callback = dev->driver->pm->complete;
1078         }
1079
1080         if (callback) {
1081                 pm_dev_dbg(dev, state, info);
1082                 callback(dev);
1083         }
1084
1085         device_unlock(dev);
1086
1087         pm_runtime_put(dev);
1088 }
1089
1090 /**
1091  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1092  * @state: PM transition of the system being carried out.
1093  *
1094  * Execute the ->complete() callbacks for all devices whose PM status is not
1095  * DPM_ON (this allows new devices to be registered).
1096  */
1097 void dpm_complete(pm_message_t state)
1098 {
1099         struct list_head list;
1100
1101         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1102         might_sleep();
1103
1104         INIT_LIST_HEAD(&list);
1105         mutex_lock(&dpm_list_mtx);
1106         while (!list_empty(&dpm_prepared_list)) {
1107                 struct device *dev = to_device(dpm_prepared_list.prev);
1108
1109                 get_device(dev);
1110                 dev->power.is_prepared = false;
1111                 list_move(&dev->power.entry, &list);
1112                 mutex_unlock(&dpm_list_mtx);
1113
1114                 trace_device_pm_callback_start(dev, "", state.event);
1115                 device_complete(dev, state);
1116                 trace_device_pm_callback_end(dev, 0);
1117
1118                 mutex_lock(&dpm_list_mtx);
1119                 put_device(dev);
1120         }
1121         list_splice(&list, &dpm_list);
1122         mutex_unlock(&dpm_list_mtx);
1123
1124         /* Allow device probing and trigger re-probing of deferred devices */
1125         device_unblock_probing();
1126         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1127 }
1128
1129 /**
1130  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1131  * @state: PM transition of the system being carried out.
1132  *
1133  * Execute "resume" callbacks for all devices and complete the PM transition of
1134  * the system.
1135  */
1136 void dpm_resume_end(pm_message_t state)
1137 {
1138         dpm_resume(state);
1139         dpm_complete(state);
1140 }
1141 EXPORT_SYMBOL_GPL(dpm_resume_end);
1142
1143
1144 /*------------------------- Suspend routines -------------------------*/
1145
1146 /**
1147  * resume_event - Return a "resume" message for given "suspend" sleep state.
1148  * @sleep_state: PM message representing a sleep state.
1149  *
1150  * Return a PM message representing the resume event corresponding to given
1151  * sleep state.
1152  */
1153 static pm_message_t resume_event(pm_message_t sleep_state)
1154 {
1155         switch (sleep_state.event) {
1156         case PM_EVENT_SUSPEND:
1157                 return PMSG_RESUME;
1158         case PM_EVENT_FREEZE:
1159         case PM_EVENT_QUIESCE:
1160                 return PMSG_RECOVER;
1161         case PM_EVENT_HIBERNATE:
1162                 return PMSG_RESTORE;
1163         }
1164         return PMSG_ON;
1165 }
1166
1167 static void dpm_superior_set_must_resume(struct device *dev)
1168 {
1169         struct device_link *link;
1170         int idx;
1171
1172         if (dev->parent)
1173                 dev->parent->power.must_resume = true;
1174
1175         idx = device_links_read_lock();
1176
1177         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1178                 link->supplier->power.must_resume = true;
1179
1180         device_links_read_unlock(idx);
1181 }
1182
1183 /**
1184  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1185  * @dev: Device to handle.
1186  * @state: PM transition of the system being carried out.
1187  * @async: If true, the device is being suspended asynchronously.
1188  *
1189  * The driver of @dev will not receive interrupts while this function is being
1190  * executed.
1191  */
1192 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1193 {
1194         pm_callback_t callback = NULL;
1195         const char *info = NULL;
1196         int error = 0;
1197
1198         TRACE_DEVICE(dev);
1199         TRACE_SUSPEND(0);
1200
1201         dpm_wait_for_subordinate(dev, async);
1202
1203         if (async_error)
1204                 goto Complete;
1205
1206         if (dev->power.syscore || dev->power.direct_complete)
1207                 goto Complete;
1208
1209         if (dev->pm_domain) {
1210                 info = "noirq power domain ";
1211                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1212         } else if (dev->type && dev->type->pm) {
1213                 info = "noirq type ";
1214                 callback = pm_noirq_op(dev->type->pm, state);
1215         } else if (dev->class && dev->class->pm) {
1216                 info = "noirq class ";
1217                 callback = pm_noirq_op(dev->class->pm, state);
1218         } else if (dev->bus && dev->bus->pm) {
1219                 info = "noirq bus ";
1220                 callback = pm_noirq_op(dev->bus->pm, state);
1221         }
1222         if (callback)
1223                 goto Run;
1224
1225         if (dev_pm_skip_suspend(dev))
1226                 goto Skip;
1227
1228         if (dev->driver && dev->driver->pm) {
1229                 info = "noirq driver ";
1230                 callback = pm_noirq_op(dev->driver->pm, state);
1231         }
1232
1233 Run:
1234         error = dpm_run_callback(callback, dev, state, info);
1235         if (error) {
1236                 async_error = error;
1237                 goto Complete;
1238         }
1239
1240 Skip:
1241         dev->power.is_noirq_suspended = true;
1242
1243         /*
1244          * Skipping the resume of devices that were in use right before the
1245          * system suspend (as indicated by their PM-runtime usage counters)
1246          * would be suboptimal.  Also resume them if doing that is not allowed
1247          * to be skipped.
1248          */
1249         if (atomic_read(&dev->power.usage_count) > 1 ||
1250             !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1251               dev->power.may_skip_resume))
1252                 dev->power.must_resume = true;
1253
1254         if (dev->power.must_resume)
1255                 dpm_superior_set_must_resume(dev);
1256
1257 Complete:
1258         complete_all(&dev->power.completion);
1259         TRACE_SUSPEND(error);
1260         return error;
1261 }
1262
1263 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1264 {
1265         struct device *dev = (struct device *)data;
1266         int error;
1267
1268         error = __device_suspend_noirq(dev, pm_transition, true);
1269         if (error) {
1270                 dpm_save_failed_dev(dev_name(dev));
1271                 pm_dev_err(dev, pm_transition, " async", error);
1272         }
1273
1274         put_device(dev);
1275 }
1276
1277 static int device_suspend_noirq(struct device *dev)
1278 {
1279         if (dpm_async_fn(dev, async_suspend_noirq))
1280                 return 0;
1281
1282         return __device_suspend_noirq(dev, pm_transition, false);
1283 }
1284
1285 static int dpm_noirq_suspend_devices(pm_message_t state)
1286 {
1287         ktime_t starttime = ktime_get();
1288         int error = 0;
1289
1290         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1291         mutex_lock(&dpm_list_mtx);
1292         pm_transition = state;
1293         async_error = 0;
1294
1295         while (!list_empty(&dpm_late_early_list)) {
1296                 struct device *dev = to_device(dpm_late_early_list.prev);
1297
1298                 get_device(dev);
1299                 mutex_unlock(&dpm_list_mtx);
1300
1301                 error = device_suspend_noirq(dev);
1302
1303                 mutex_lock(&dpm_list_mtx);
1304                 if (error) {
1305                         pm_dev_err(dev, state, " noirq", error);
1306                         dpm_save_failed_dev(dev_name(dev));
1307                         put_device(dev);
1308                         break;
1309                 }
1310                 if (!list_empty(&dev->power.entry))
1311                         list_move(&dev->power.entry, &dpm_noirq_list);
1312                 put_device(dev);
1313
1314                 if (async_error)
1315                         break;
1316         }
1317         mutex_unlock(&dpm_list_mtx);
1318         async_synchronize_full();
1319         if (!error)
1320                 error = async_error;
1321
1322         if (error) {
1323                 suspend_stats.failed_suspend_noirq++;
1324                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1325         }
1326         dpm_show_time(starttime, state, error, "noirq");
1327         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1328         return error;
1329 }
1330
1331 /**
1332  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1333  * @state: PM transition of the system being carried out.
1334  *
1335  * Prevent device drivers' interrupt handlers from being called and invoke
1336  * "noirq" suspend callbacks for all non-sysdev devices.
1337  */
1338 int dpm_suspend_noirq(pm_message_t state)
1339 {
1340         int ret;
1341
1342         cpuidle_pause();
1343
1344         device_wakeup_arm_wake_irqs();
1345         suspend_device_irqs();
1346
1347         ret = dpm_noirq_suspend_devices(state);
1348         if (ret)
1349                 dpm_resume_noirq(resume_event(state));
1350
1351         return ret;
1352 }
1353
1354 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1355 {
1356         struct device *parent = dev->parent;
1357
1358         if (!parent)
1359                 return;
1360
1361         spin_lock_irq(&parent->power.lock);
1362
1363         if (dev->power.wakeup_path && !parent->power.ignore_children)
1364                 parent->power.wakeup_path = true;
1365
1366         spin_unlock_irq(&parent->power.lock);
1367 }
1368
1369 /**
1370  * __device_suspend_late - Execute a "late suspend" callback for given device.
1371  * @dev: Device to handle.
1372  * @state: PM transition of the system being carried out.
1373  * @async: If true, the device is being suspended asynchronously.
1374  *
1375  * Runtime PM is disabled for @dev while this function is being executed.
1376  */
1377 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1378 {
1379         pm_callback_t callback = NULL;
1380         const char *info = NULL;
1381         int error = 0;
1382
1383         TRACE_DEVICE(dev);
1384         TRACE_SUSPEND(0);
1385
1386         __pm_runtime_disable(dev, false);
1387
1388         dpm_wait_for_subordinate(dev, async);
1389
1390         if (async_error)
1391                 goto Complete;
1392
1393         if (pm_wakeup_pending()) {
1394                 async_error = -EBUSY;
1395                 goto Complete;
1396         }
1397
1398         if (dev->power.syscore || dev->power.direct_complete)
1399                 goto Complete;
1400
1401         if (dev->pm_domain) {
1402                 info = "late power domain ";
1403                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1404         } else if (dev->type && dev->type->pm) {
1405                 info = "late type ";
1406                 callback = pm_late_early_op(dev->type->pm, state);
1407         } else if (dev->class && dev->class->pm) {
1408                 info = "late class ";
1409                 callback = pm_late_early_op(dev->class->pm, state);
1410         } else if (dev->bus && dev->bus->pm) {
1411                 info = "late bus ";
1412                 callback = pm_late_early_op(dev->bus->pm, state);
1413         }
1414         if (callback)
1415                 goto Run;
1416
1417         if (dev_pm_skip_suspend(dev))
1418                 goto Skip;
1419
1420         if (dev->driver && dev->driver->pm) {
1421                 info = "late driver ";
1422                 callback = pm_late_early_op(dev->driver->pm, state);
1423         }
1424
1425 Run:
1426         error = dpm_run_callback(callback, dev, state, info);
1427         if (error) {
1428                 async_error = error;
1429                 goto Complete;
1430         }
1431         dpm_propagate_wakeup_to_parent(dev);
1432
1433 Skip:
1434         dev->power.is_late_suspended = true;
1435
1436 Complete:
1437         TRACE_SUSPEND(error);
1438         complete_all(&dev->power.completion);
1439         return error;
1440 }
1441
1442 static void async_suspend_late(void *data, async_cookie_t cookie)
1443 {
1444         struct device *dev = (struct device *)data;
1445         int error;
1446
1447         error = __device_suspend_late(dev, pm_transition, true);
1448         if (error) {
1449                 dpm_save_failed_dev(dev_name(dev));
1450                 pm_dev_err(dev, pm_transition, " async", error);
1451         }
1452         put_device(dev);
1453 }
1454
1455 static int device_suspend_late(struct device *dev)
1456 {
1457         if (dpm_async_fn(dev, async_suspend_late))
1458                 return 0;
1459
1460         return __device_suspend_late(dev, pm_transition, false);
1461 }
1462
1463 /**
1464  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1465  * @state: PM transition of the system being carried out.
1466  */
1467 int dpm_suspend_late(pm_message_t state)
1468 {
1469         ktime_t starttime = ktime_get();
1470         int error = 0;
1471
1472         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1473         mutex_lock(&dpm_list_mtx);
1474         pm_transition = state;
1475         async_error = 0;
1476
1477         while (!list_empty(&dpm_suspended_list)) {
1478                 struct device *dev = to_device(dpm_suspended_list.prev);
1479
1480                 get_device(dev);
1481                 mutex_unlock(&dpm_list_mtx);
1482
1483                 error = device_suspend_late(dev);
1484
1485                 mutex_lock(&dpm_list_mtx);
1486                 if (!list_empty(&dev->power.entry))
1487                         list_move(&dev->power.entry, &dpm_late_early_list);
1488
1489                 if (error) {
1490                         pm_dev_err(dev, state, " late", error);
1491                         dpm_save_failed_dev(dev_name(dev));
1492                         put_device(dev);
1493                         break;
1494                 }
1495                 put_device(dev);
1496
1497                 if (async_error)
1498                         break;
1499         }
1500         mutex_unlock(&dpm_list_mtx);
1501         async_synchronize_full();
1502         if (!error)
1503                 error = async_error;
1504         if (error) {
1505                 suspend_stats.failed_suspend_late++;
1506                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1507                 dpm_resume_early(resume_event(state));
1508         }
1509         dpm_show_time(starttime, state, error, "late");
1510         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1511         return error;
1512 }
1513
1514 /**
1515  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1516  * @state: PM transition of the system being carried out.
1517  */
1518 int dpm_suspend_end(pm_message_t state)
1519 {
1520         ktime_t starttime = ktime_get();
1521         int error;
1522
1523         error = dpm_suspend_late(state);
1524         if (error)
1525                 goto out;
1526
1527         error = dpm_suspend_noirq(state);
1528         if (error)
1529                 dpm_resume_early(resume_event(state));
1530
1531 out:
1532         dpm_show_time(starttime, state, error, "end");
1533         return error;
1534 }
1535 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1536
1537 /**
1538  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1539  * @dev: Device to suspend.
1540  * @state: PM transition of the system being carried out.
1541  * @cb: Suspend callback to execute.
1542  * @info: string description of caller.
1543  */
1544 static int legacy_suspend(struct device *dev, pm_message_t state,
1545                           int (*cb)(struct device *dev, pm_message_t state),
1546                           const char *info)
1547 {
1548         int error;
1549         ktime_t calltime;
1550
1551         calltime = initcall_debug_start(dev, cb);
1552
1553         trace_device_pm_callback_start(dev, info, state.event);
1554         error = cb(dev, state);
1555         trace_device_pm_callback_end(dev, error);
1556         suspend_report_result(cb, error);
1557
1558         initcall_debug_report(dev, calltime, cb, error);
1559
1560         return error;
1561 }
1562
1563 static void dpm_clear_superiors_direct_complete(struct device *dev)
1564 {
1565         struct device_link *link;
1566         int idx;
1567
1568         if (dev->parent) {
1569                 spin_lock_irq(&dev->parent->power.lock);
1570                 dev->parent->power.direct_complete = false;
1571                 spin_unlock_irq(&dev->parent->power.lock);
1572         }
1573
1574         idx = device_links_read_lock();
1575
1576         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1577                 spin_lock_irq(&link->supplier->power.lock);
1578                 link->supplier->power.direct_complete = false;
1579                 spin_unlock_irq(&link->supplier->power.lock);
1580         }
1581
1582         device_links_read_unlock(idx);
1583 }
1584
1585 /**
1586  * __device_suspend - Execute "suspend" callbacks for given device.
1587  * @dev: Device to handle.
1588  * @state: PM transition of the system being carried out.
1589  * @async: If true, the device is being suspended asynchronously.
1590  */
1591 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1592 {
1593         pm_callback_t callback = NULL;
1594         const char *info = NULL;
1595         int error = 0;
1596         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1597
1598         TRACE_DEVICE(dev);
1599         TRACE_SUSPEND(0);
1600
1601         dpm_wait_for_subordinate(dev, async);
1602
1603         if (async_error) {
1604                 dev->power.direct_complete = false;
1605                 goto Complete;
1606         }
1607
1608         /*
1609          * Wait for possible runtime PM transitions of the device in progress
1610          * to complete and if there's a runtime resume request pending for it,
1611          * resume it before proceeding with invoking the system-wide suspend
1612          * callbacks for it.
1613          *
1614          * If the system-wide suspend callbacks below change the configuration
1615          * of the device, they must disable runtime PM for it or otherwise
1616          * ensure that its runtime-resume callbacks will not be confused by that
1617          * change in case they are invoked going forward.
1618          */
1619         pm_runtime_barrier(dev);
1620
1621         if (pm_wakeup_pending()) {
1622                 dev->power.direct_complete = false;
1623                 async_error = -EBUSY;
1624                 goto Complete;
1625         }
1626
1627         if (dev->power.syscore)
1628                 goto Complete;
1629
1630         /* Avoid direct_complete to let wakeup_path propagate. */
1631         if (device_may_wakeup(dev) || dev->power.wakeup_path)
1632                 dev->power.direct_complete = false;
1633
1634         if (dev->power.direct_complete) {
1635                 if (pm_runtime_status_suspended(dev)) {
1636                         pm_runtime_disable(dev);
1637                         if (pm_runtime_status_suspended(dev)) {
1638                                 pm_dev_dbg(dev, state, "direct-complete ");
1639                                 goto Complete;
1640                         }
1641
1642                         pm_runtime_enable(dev);
1643                 }
1644                 dev->power.direct_complete = false;
1645         }
1646
1647         dev->power.may_skip_resume = true;
1648         dev->power.must_resume = false;
1649
1650         dpm_watchdog_set(&wd, dev);
1651         device_lock(dev);
1652
1653         if (dev->pm_domain) {
1654                 info = "power domain ";
1655                 callback = pm_op(&dev->pm_domain->ops, state);
1656                 goto Run;
1657         }
1658
1659         if (dev->type && dev->type->pm) {
1660                 info = "type ";
1661                 callback = pm_op(dev->type->pm, state);
1662                 goto Run;
1663         }
1664
1665         if (dev->class && dev->class->pm) {
1666                 info = "class ";
1667                 callback = pm_op(dev->class->pm, state);
1668                 goto Run;
1669         }
1670
1671         if (dev->bus) {
1672                 if (dev->bus->pm) {
1673                         info = "bus ";
1674                         callback = pm_op(dev->bus->pm, state);
1675                 } else if (dev->bus->suspend) {
1676                         pm_dev_dbg(dev, state, "legacy bus ");
1677                         error = legacy_suspend(dev, state, dev->bus->suspend,
1678                                                 "legacy bus ");
1679                         goto End;
1680                 }
1681         }
1682
1683  Run:
1684         if (!callback && dev->driver && dev->driver->pm) {
1685                 info = "driver ";
1686                 callback = pm_op(dev->driver->pm, state);
1687         }
1688
1689         error = dpm_run_callback(callback, dev, state, info);
1690
1691  End:
1692         if (!error) {
1693                 dev->power.is_suspended = true;
1694                 if (device_may_wakeup(dev))
1695                         dev->power.wakeup_path = true;
1696
1697                 dpm_propagate_wakeup_to_parent(dev);
1698                 dpm_clear_superiors_direct_complete(dev);
1699         }
1700
1701         device_unlock(dev);
1702         dpm_watchdog_clear(&wd);
1703
1704  Complete:
1705         if (error)
1706                 async_error = error;
1707
1708         complete_all(&dev->power.completion);
1709         TRACE_SUSPEND(error);
1710         return error;
1711 }
1712
1713 static void async_suspend(void *data, async_cookie_t cookie)
1714 {
1715         struct device *dev = (struct device *)data;
1716         int error;
1717
1718         error = __device_suspend(dev, pm_transition, true);
1719         if (error) {
1720                 dpm_save_failed_dev(dev_name(dev));
1721                 pm_dev_err(dev, pm_transition, " async", error);
1722         }
1723
1724         put_device(dev);
1725 }
1726
1727 static int device_suspend(struct device *dev)
1728 {
1729         if (dpm_async_fn(dev, async_suspend))
1730                 return 0;
1731
1732         return __device_suspend(dev, pm_transition, false);
1733 }
1734
1735 /**
1736  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1737  * @state: PM transition of the system being carried out.
1738  */
1739 int dpm_suspend(pm_message_t state)
1740 {
1741         ktime_t starttime = ktime_get();
1742         int error = 0;
1743
1744         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1745         might_sleep();
1746
1747         devfreq_suspend();
1748         cpufreq_suspend();
1749
1750         mutex_lock(&dpm_list_mtx);
1751         pm_transition = state;
1752         async_error = 0;
1753         while (!list_empty(&dpm_prepared_list)) {
1754                 struct device *dev = to_device(dpm_prepared_list.prev);
1755
1756                 get_device(dev);
1757                 mutex_unlock(&dpm_list_mtx);
1758
1759                 error = device_suspend(dev);
1760
1761                 mutex_lock(&dpm_list_mtx);
1762                 if (error) {
1763                         pm_dev_err(dev, state, "", error);
1764                         dpm_save_failed_dev(dev_name(dev));
1765                         put_device(dev);
1766                         break;
1767                 }
1768                 if (!list_empty(&dev->power.entry))
1769                         list_move(&dev->power.entry, &dpm_suspended_list);
1770                 put_device(dev);
1771                 if (async_error)
1772                         break;
1773         }
1774         mutex_unlock(&dpm_list_mtx);
1775         async_synchronize_full();
1776         if (!error)
1777                 error = async_error;
1778         if (error) {
1779                 suspend_stats.failed_suspend++;
1780                 dpm_save_failed_step(SUSPEND_SUSPEND);
1781         }
1782         dpm_show_time(starttime, state, error, NULL);
1783         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1784         return error;
1785 }
1786
1787 /**
1788  * device_prepare - Prepare a device for system power transition.
1789  * @dev: Device to handle.
1790  * @state: PM transition of the system being carried out.
1791  *
1792  * Execute the ->prepare() callback(s) for given device.  No new children of the
1793  * device may be registered after this function has returned.
1794  */
1795 static int device_prepare(struct device *dev, pm_message_t state)
1796 {
1797         int (*callback)(struct device *) = NULL;
1798         int ret = 0;
1799
1800         if (dev->power.syscore)
1801                 return 0;
1802
1803         /*
1804          * If a device's parent goes into runtime suspend at the wrong time,
1805          * it won't be possible to resume the device.  To prevent this we
1806          * block runtime suspend here, during the prepare phase, and allow
1807          * it again during the complete phase.
1808          */
1809         pm_runtime_get_noresume(dev);
1810
1811         device_lock(dev);
1812
1813         dev->power.wakeup_path = false;
1814
1815         if (dev->power.no_pm_callbacks)
1816                 goto unlock;
1817
1818         if (dev->pm_domain)
1819                 callback = dev->pm_domain->ops.prepare;
1820         else if (dev->type && dev->type->pm)
1821                 callback = dev->type->pm->prepare;
1822         else if (dev->class && dev->class->pm)
1823                 callback = dev->class->pm->prepare;
1824         else if (dev->bus && dev->bus->pm)
1825                 callback = dev->bus->pm->prepare;
1826
1827         if (!callback && dev->driver && dev->driver->pm)
1828                 callback = dev->driver->pm->prepare;
1829
1830         if (callback)
1831                 ret = callback(dev);
1832
1833 unlock:
1834         device_unlock(dev);
1835
1836         if (ret < 0) {
1837                 suspend_report_result(callback, ret);
1838                 pm_runtime_put(dev);
1839                 return ret;
1840         }
1841         /*
1842          * A positive return value from ->prepare() means "this device appears
1843          * to be runtime-suspended and its state is fine, so if it really is
1844          * runtime-suspended, you can leave it in that state provided that you
1845          * will do the same thing with all of its descendants".  This only
1846          * applies to suspend transitions, however.
1847          */
1848         spin_lock_irq(&dev->power.lock);
1849         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1850                 (ret > 0 || dev->power.no_pm_callbacks) &&
1851                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1852         spin_unlock_irq(&dev->power.lock);
1853         return 0;
1854 }
1855
1856 /**
1857  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1858  * @state: PM transition of the system being carried out.
1859  *
1860  * Execute the ->prepare() callback(s) for all devices.
1861  */
1862 int dpm_prepare(pm_message_t state)
1863 {
1864         int error = 0;
1865
1866         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1867         might_sleep();
1868
1869         /*
1870          * Give a chance for the known devices to complete their probes, before
1871          * disable probing of devices. This sync point is important at least
1872          * at boot time + hibernation restore.
1873          */
1874         wait_for_device_probe();
1875         /*
1876          * It is unsafe if probing of devices will happen during suspend or
1877          * hibernation and system behavior will be unpredictable in this case.
1878          * So, let's prohibit device's probing here and defer their probes
1879          * instead. The normal behavior will be restored in dpm_complete().
1880          */
1881         device_block_probing();
1882
1883         mutex_lock(&dpm_list_mtx);
1884         while (!list_empty(&dpm_list)) {
1885                 struct device *dev = to_device(dpm_list.next);
1886
1887                 get_device(dev);
1888                 mutex_unlock(&dpm_list_mtx);
1889
1890                 trace_device_pm_callback_start(dev, "", state.event);
1891                 error = device_prepare(dev, state);
1892                 trace_device_pm_callback_end(dev, error);
1893
1894                 mutex_lock(&dpm_list_mtx);
1895                 if (error) {
1896                         if (error == -EAGAIN) {
1897                                 put_device(dev);
1898                                 error = 0;
1899                                 continue;
1900                         }
1901                         pr_info("Device %s not prepared for power transition: code %d\n",
1902                                 dev_name(dev), error);
1903                         put_device(dev);
1904                         break;
1905                 }
1906                 dev->power.is_prepared = true;
1907                 if (!list_empty(&dev->power.entry))
1908                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1909                 put_device(dev);
1910         }
1911         mutex_unlock(&dpm_list_mtx);
1912         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1913         return error;
1914 }
1915
1916 /**
1917  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1918  * @state: PM transition of the system being carried out.
1919  *
1920  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1921  * callbacks for them.
1922  */
1923 int dpm_suspend_start(pm_message_t state)
1924 {
1925         ktime_t starttime = ktime_get();
1926         int error;
1927
1928         error = dpm_prepare(state);
1929         if (error) {
1930                 suspend_stats.failed_prepare++;
1931                 dpm_save_failed_step(SUSPEND_PREPARE);
1932         } else
1933                 error = dpm_suspend(state);
1934         dpm_show_time(starttime, state, error, "start");
1935         return error;
1936 }
1937 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1938
1939 void __suspend_report_result(const char *function, void *fn, int ret)
1940 {
1941         if (ret)
1942                 pr_err("%s(): %pS returns %d\n", function, fn, ret);
1943 }
1944 EXPORT_SYMBOL_GPL(__suspend_report_result);
1945
1946 /**
1947  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1948  * @subordinate: Device that needs to wait for @dev.
1949  * @dev: Device to wait for.
1950  */
1951 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1952 {
1953         dpm_wait(dev, subordinate->power.async_suspend);
1954         return async_error;
1955 }
1956 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1957
1958 /**
1959  * dpm_for_each_dev - device iterator.
1960  * @data: data for the callback.
1961  * @fn: function to be called for each device.
1962  *
1963  * Iterate over devices in dpm_list, and call @fn for each device,
1964  * passing it @data.
1965  */
1966 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1967 {
1968         struct device *dev;
1969
1970         if (!fn)
1971                 return;
1972
1973         device_pm_lock();
1974         list_for_each_entry(dev, &dpm_list, power.entry)
1975                 fn(dev, data);
1976         device_pm_unlock();
1977 }
1978 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1979
1980 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1981 {
1982         if (!ops)
1983                 return true;
1984
1985         return !ops->prepare &&
1986                !ops->suspend &&
1987                !ops->suspend_late &&
1988                !ops->suspend_noirq &&
1989                !ops->resume_noirq &&
1990                !ops->resume_early &&
1991                !ops->resume &&
1992                !ops->complete;
1993 }
1994
1995 void device_pm_check_callbacks(struct device *dev)
1996 {
1997         spin_lock_irq(&dev->power.lock);
1998         dev->power.no_pm_callbacks =
1999                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2000                  !dev->bus->suspend && !dev->bus->resume)) &&
2001                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2002                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2003                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2004                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2005                  !dev->driver->suspend && !dev->driver->resume));
2006         spin_unlock_irq(&dev->power.lock);
2007 }
2008
2009 bool dev_pm_skip_suspend(struct device *dev)
2010 {
2011         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2012                 pm_runtime_status_suspended(dev);
2013 }