PM: sleep: Add dev_wakeup_path() helper
[linux-2.6-microblaze.git] / drivers / base / power / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/base/power/main.c - Where the driver meets power management.
4  *
5  * Copyright (c) 2003 Patrick Mochel
6  * Copyright (c) 2003 Open Source Development Lab
7  *
8  * The driver model core calls device_pm_add() when a device is registered.
9  * This will initialize the embedded device_pm_info object in the device
10  * and add it to the list of power-controlled devices. sysfs entries for
11  * controlling device power management will also be added.
12  *
13  * A separate list is used for keeping track of power info, because the power
14  * domain dependencies may differ from the ancestral dependencies that the
15  * subsystem list maintains.
16  */
17
18 #define pr_fmt(fmt) "PM: " fmt
19
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 #define list_for_each_entry_rcu_locked(pos, head, member) \
44         list_for_each_entry_rcu(pos, head, member, \
45                         device_links_read_lock_held())
46
47 /*
48  * The entries in the dpm_list list are in a depth first order, simply
49  * because children are guaranteed to be discovered after parents, and
50  * are inserted at the back of the list on discovery.
51  *
52  * Since device_pm_add() may be called with a device lock held,
53  * we must never try to acquire a device lock while holding
54  * dpm_list_mutex.
55  */
56
57 LIST_HEAD(dpm_list);
58 static LIST_HEAD(dpm_prepared_list);
59 static LIST_HEAD(dpm_suspended_list);
60 static LIST_HEAD(dpm_late_early_list);
61 static LIST_HEAD(dpm_noirq_list);
62
63 struct suspend_stats suspend_stats;
64 static DEFINE_MUTEX(dpm_list_mtx);
65 static pm_message_t pm_transition;
66
67 static int async_error;
68
69 static const char *pm_verb(int event)
70 {
71         switch (event) {
72         case PM_EVENT_SUSPEND:
73                 return "suspend";
74         case PM_EVENT_RESUME:
75                 return "resume";
76         case PM_EVENT_FREEZE:
77                 return "freeze";
78         case PM_EVENT_QUIESCE:
79                 return "quiesce";
80         case PM_EVENT_HIBERNATE:
81                 return "hibernate";
82         case PM_EVENT_THAW:
83                 return "thaw";
84         case PM_EVENT_RESTORE:
85                 return "restore";
86         case PM_EVENT_RECOVER:
87                 return "recover";
88         default:
89                 return "(unknown PM event)";
90         }
91 }
92
93 /**
94  * device_pm_sleep_init - Initialize system suspend-related device fields.
95  * @dev: Device object being initialized.
96  */
97 void device_pm_sleep_init(struct device *dev)
98 {
99         dev->power.is_prepared = false;
100         dev->power.is_suspended = false;
101         dev->power.is_noirq_suspended = false;
102         dev->power.is_late_suspended = false;
103         init_completion(&dev->power.completion);
104         complete_all(&dev->power.completion);
105         dev->power.wakeup = NULL;
106         INIT_LIST_HEAD(&dev->power.entry);
107 }
108
109 /**
110  * device_pm_lock - Lock the list of active devices used by the PM core.
111  */
112 void device_pm_lock(void)
113 {
114         mutex_lock(&dpm_list_mtx);
115 }
116
117 /**
118  * device_pm_unlock - Unlock the list of active devices used by the PM core.
119  */
120 void device_pm_unlock(void)
121 {
122         mutex_unlock(&dpm_list_mtx);
123 }
124
125 /**
126  * device_pm_add - Add a device to the PM core's list of active devices.
127  * @dev: Device to add to the list.
128  */
129 void device_pm_add(struct device *dev)
130 {
131         /* Skip PM setup/initialization. */
132         if (device_pm_not_required(dev))
133                 return;
134
135         pr_debug("Adding info for %s:%s\n",
136                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
137         device_pm_check_callbacks(dev);
138         mutex_lock(&dpm_list_mtx);
139         if (dev->parent && dev->parent->power.is_prepared)
140                 dev_warn(dev, "parent %s should not be sleeping\n",
141                         dev_name(dev->parent));
142         list_add_tail(&dev->power.entry, &dpm_list);
143         dev->power.in_dpm_list = true;
144         mutex_unlock(&dpm_list_mtx);
145 }
146
147 /**
148  * device_pm_remove - Remove a device from the PM core's list of active devices.
149  * @dev: Device to be removed from the list.
150  */
151 void device_pm_remove(struct device *dev)
152 {
153         if (device_pm_not_required(dev))
154                 return;
155
156         pr_debug("Removing info for %s:%s\n",
157                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158         complete_all(&dev->power.completion);
159         mutex_lock(&dpm_list_mtx);
160         list_del_init(&dev->power.entry);
161         dev->power.in_dpm_list = false;
162         mutex_unlock(&dpm_list_mtx);
163         device_wakeup_disable(dev);
164         pm_runtime_remove(dev);
165         device_pm_check_callbacks(dev);
166 }
167
168 /**
169  * device_pm_move_before - Move device in the PM core's list of active devices.
170  * @deva: Device to move in dpm_list.
171  * @devb: Device @deva should come before.
172  */
173 void device_pm_move_before(struct device *deva, struct device *devb)
174 {
175         pr_debug("Moving %s:%s before %s:%s\n",
176                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
177                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
178         /* Delete deva from dpm_list and reinsert before devb. */
179         list_move_tail(&deva->power.entry, &devb->power.entry);
180 }
181
182 /**
183  * device_pm_move_after - Move device in the PM core's list of active devices.
184  * @deva: Device to move in dpm_list.
185  * @devb: Device @deva should come after.
186  */
187 void device_pm_move_after(struct device *deva, struct device *devb)
188 {
189         pr_debug("Moving %s:%s after %s:%s\n",
190                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
191                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
192         /* Delete deva from dpm_list and reinsert after devb. */
193         list_move(&deva->power.entry, &devb->power.entry);
194 }
195
196 /**
197  * device_pm_move_last - Move device to end of the PM core's list of devices.
198  * @dev: Device to move in dpm_list.
199  */
200 void device_pm_move_last(struct device *dev)
201 {
202         pr_debug("Moving %s:%s to end of list\n",
203                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
204         list_move_tail(&dev->power.entry, &dpm_list);
205 }
206
207 static ktime_t initcall_debug_start(struct device *dev, void *cb)
208 {
209         if (!pm_print_times_enabled)
210                 return 0;
211
212         dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
213                  task_pid_nr(current),
214                  dev->parent ? dev_name(dev->parent) : "none");
215         return ktime_get();
216 }
217
218 static void initcall_debug_report(struct device *dev, ktime_t calltime,
219                                   void *cb, int error)
220 {
221         ktime_t rettime;
222         s64 nsecs;
223
224         if (!pm_print_times_enabled)
225                 return;
226
227         rettime = ktime_get();
228         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
229
230         dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
231                  (unsigned long long)nsecs >> 10);
232 }
233
234 /**
235  * dpm_wait - Wait for a PM operation to complete.
236  * @dev: Device to wait for.
237  * @async: If unset, wait only if the device's power.async_suspend flag is set.
238  */
239 static void dpm_wait(struct device *dev, bool async)
240 {
241         if (!dev)
242                 return;
243
244         if (async || (pm_async_enabled && dev->power.async_suspend))
245                 wait_for_completion(&dev->power.completion);
246 }
247
248 static int dpm_wait_fn(struct device *dev, void *async_ptr)
249 {
250         dpm_wait(dev, *((bool *)async_ptr));
251         return 0;
252 }
253
254 static void dpm_wait_for_children(struct device *dev, bool async)
255 {
256        device_for_each_child(dev, &async, dpm_wait_fn);
257 }
258
259 static void dpm_wait_for_suppliers(struct device *dev, bool async)
260 {
261         struct device_link *link;
262         int idx;
263
264         idx = device_links_read_lock();
265
266         /*
267          * If the supplier goes away right after we've checked the link to it,
268          * we'll wait for its completion to change the state, but that's fine,
269          * because the only things that will block as a result are the SRCU
270          * callbacks freeing the link objects for the links in the list we're
271          * walking.
272          */
273         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
274                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
275                         dpm_wait(link->supplier, async);
276
277         device_links_read_unlock(idx);
278 }
279
280 static bool dpm_wait_for_superior(struct device *dev, bool async)
281 {
282         struct device *parent;
283
284         /*
285          * If the device is resumed asynchronously and the parent's callback
286          * deletes both the device and the parent itself, the parent object may
287          * be freed while this function is running, so avoid that by reference
288          * counting the parent once more unless the device has been deleted
289          * already (in which case return right away).
290          */
291         mutex_lock(&dpm_list_mtx);
292
293         if (!device_pm_initialized(dev)) {
294                 mutex_unlock(&dpm_list_mtx);
295                 return false;
296         }
297
298         parent = get_device(dev->parent);
299
300         mutex_unlock(&dpm_list_mtx);
301
302         dpm_wait(parent, async);
303         put_device(parent);
304
305         dpm_wait_for_suppliers(dev, async);
306
307         /*
308          * If the parent's callback has deleted the device, attempting to resume
309          * it would be invalid, so avoid doing that then.
310          */
311         return device_pm_initialized(dev);
312 }
313
314 static void dpm_wait_for_consumers(struct device *dev, bool async)
315 {
316         struct device_link *link;
317         int idx;
318
319         idx = device_links_read_lock();
320
321         /*
322          * The status of a device link can only be changed from "dormant" by a
323          * probe, but that cannot happen during system suspend/resume.  In
324          * theory it can change to "dormant" at that time, but then it is
325          * reasonable to wait for the target device anyway (eg. if it goes
326          * away, it's better to wait for it to go away completely and then
327          * continue instead of trying to continue in parallel with its
328          * unregistration).
329          */
330         list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
331                 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
332                         dpm_wait(link->consumer, async);
333
334         device_links_read_unlock(idx);
335 }
336
337 static void dpm_wait_for_subordinate(struct device *dev, bool async)
338 {
339         dpm_wait_for_children(dev, async);
340         dpm_wait_for_consumers(dev, async);
341 }
342
343 /**
344  * pm_op - Return the PM operation appropriate for given PM event.
345  * @ops: PM operations to choose from.
346  * @state: PM transition of the system being carried out.
347  */
348 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
349 {
350         switch (state.event) {
351 #ifdef CONFIG_SUSPEND
352         case PM_EVENT_SUSPEND:
353                 return ops->suspend;
354         case PM_EVENT_RESUME:
355                 return ops->resume;
356 #endif /* CONFIG_SUSPEND */
357 #ifdef CONFIG_HIBERNATE_CALLBACKS
358         case PM_EVENT_FREEZE:
359         case PM_EVENT_QUIESCE:
360                 return ops->freeze;
361         case PM_EVENT_HIBERNATE:
362                 return ops->poweroff;
363         case PM_EVENT_THAW:
364         case PM_EVENT_RECOVER:
365                 return ops->thaw;
366         case PM_EVENT_RESTORE:
367                 return ops->restore;
368 #endif /* CONFIG_HIBERNATE_CALLBACKS */
369         }
370
371         return NULL;
372 }
373
374 /**
375  * pm_late_early_op - Return the PM operation appropriate for given PM event.
376  * @ops: PM operations to choose from.
377  * @state: PM transition of the system being carried out.
378  *
379  * Runtime PM is disabled for @dev while this function is being executed.
380  */
381 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
382                                       pm_message_t state)
383 {
384         switch (state.event) {
385 #ifdef CONFIG_SUSPEND
386         case PM_EVENT_SUSPEND:
387                 return ops->suspend_late;
388         case PM_EVENT_RESUME:
389                 return ops->resume_early;
390 #endif /* CONFIG_SUSPEND */
391 #ifdef CONFIG_HIBERNATE_CALLBACKS
392         case PM_EVENT_FREEZE:
393         case PM_EVENT_QUIESCE:
394                 return ops->freeze_late;
395         case PM_EVENT_HIBERNATE:
396                 return ops->poweroff_late;
397         case PM_EVENT_THAW:
398         case PM_EVENT_RECOVER:
399                 return ops->thaw_early;
400         case PM_EVENT_RESTORE:
401                 return ops->restore_early;
402 #endif /* CONFIG_HIBERNATE_CALLBACKS */
403         }
404
405         return NULL;
406 }
407
408 /**
409  * pm_noirq_op - Return the PM operation appropriate for given PM event.
410  * @ops: PM operations to choose from.
411  * @state: PM transition of the system being carried out.
412  *
413  * The driver of @dev will not receive interrupts while this function is being
414  * executed.
415  */
416 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
417 {
418         switch (state.event) {
419 #ifdef CONFIG_SUSPEND
420         case PM_EVENT_SUSPEND:
421                 return ops->suspend_noirq;
422         case PM_EVENT_RESUME:
423                 return ops->resume_noirq;
424 #endif /* CONFIG_SUSPEND */
425 #ifdef CONFIG_HIBERNATE_CALLBACKS
426         case PM_EVENT_FREEZE:
427         case PM_EVENT_QUIESCE:
428                 return ops->freeze_noirq;
429         case PM_EVENT_HIBERNATE:
430                 return ops->poweroff_noirq;
431         case PM_EVENT_THAW:
432         case PM_EVENT_RECOVER:
433                 return ops->thaw_noirq;
434         case PM_EVENT_RESTORE:
435                 return ops->restore_noirq;
436 #endif /* CONFIG_HIBERNATE_CALLBACKS */
437         }
438
439         return NULL;
440 }
441
442 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
443 {
444         dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
445                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
446                 ", may wakeup" : "", dev->power.driver_flags);
447 }
448
449 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
450                         int error)
451 {
452         pr_err("Device %s failed to %s%s: error %d\n",
453                dev_name(dev), pm_verb(state.event), info, error);
454 }
455
456 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
457                           const char *info)
458 {
459         ktime_t calltime;
460         u64 usecs64;
461         int usecs;
462
463         calltime = ktime_get();
464         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
465         do_div(usecs64, NSEC_PER_USEC);
466         usecs = usecs64;
467         if (usecs == 0)
468                 usecs = 1;
469
470         pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
471                   info ?: "", info ? " " : "", pm_verb(state.event),
472                   error ? "aborted" : "complete",
473                   usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
474 }
475
476 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
477                             pm_message_t state, const char *info)
478 {
479         ktime_t calltime;
480         int error;
481
482         if (!cb)
483                 return 0;
484
485         calltime = initcall_debug_start(dev, cb);
486
487         pm_dev_dbg(dev, state, info);
488         trace_device_pm_callback_start(dev, info, state.event);
489         error = cb(dev);
490         trace_device_pm_callback_end(dev, error);
491         suspend_report_result(cb, error);
492
493         initcall_debug_report(dev, calltime, cb, error);
494
495         return error;
496 }
497
498 #ifdef CONFIG_DPM_WATCHDOG
499 struct dpm_watchdog {
500         struct device           *dev;
501         struct task_struct      *tsk;
502         struct timer_list       timer;
503 };
504
505 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
506         struct dpm_watchdog wd
507
508 /**
509  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
510  * @t: The timer that PM watchdog depends on.
511  *
512  * Called when a driver has timed out suspending or resuming.
513  * There's not much we can do here to recover so panic() to
514  * capture a crash-dump in pstore.
515  */
516 static void dpm_watchdog_handler(struct timer_list *t)
517 {
518         struct dpm_watchdog *wd = from_timer(wd, t, timer);
519
520         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
521         show_stack(wd->tsk, NULL, KERN_EMERG);
522         panic("%s %s: unrecoverable failure\n",
523                 dev_driver_string(wd->dev), dev_name(wd->dev));
524 }
525
526 /**
527  * dpm_watchdog_set - Enable pm watchdog for given device.
528  * @wd: Watchdog. Must be allocated on the stack.
529  * @dev: Device to handle.
530  */
531 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
532 {
533         struct timer_list *timer = &wd->timer;
534
535         wd->dev = dev;
536         wd->tsk = current;
537
538         timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
539         /* use same timeout value for both suspend and resume */
540         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
541         add_timer(timer);
542 }
543
544 /**
545  * dpm_watchdog_clear - Disable suspend/resume watchdog.
546  * @wd: Watchdog to disable.
547  */
548 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
549 {
550         struct timer_list *timer = &wd->timer;
551
552         del_timer_sync(timer);
553         destroy_timer_on_stack(timer);
554 }
555 #else
556 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
557 #define dpm_watchdog_set(x, y)
558 #define dpm_watchdog_clear(x)
559 #endif
560
561 /*------------------------- Resume routines -------------------------*/
562
563 /**
564  * dev_pm_skip_resume - System-wide device resume optimization check.
565  * @dev: Target device.
566  *
567  * Return:
568  * - %false if the transition under way is RESTORE.
569  * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
570  * - The logical negation of %power.must_resume otherwise (that is, when the
571  *   transition under way is RESUME).
572  */
573 bool dev_pm_skip_resume(struct device *dev)
574 {
575         if (pm_transition.event == PM_EVENT_RESTORE)
576                 return false;
577
578         if (pm_transition.event == PM_EVENT_THAW)
579                 return dev_pm_skip_suspend(dev);
580
581         return !dev->power.must_resume;
582 }
583
584 /**
585  * device_resume_noirq - Execute a "noirq resume" callback for given device.
586  * @dev: Device to handle.
587  * @state: PM transition of the system being carried out.
588  * @async: If true, the device is being resumed asynchronously.
589  *
590  * The driver of @dev will not receive interrupts while this function is being
591  * executed.
592  */
593 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
594 {
595         pm_callback_t callback = NULL;
596         const char *info = NULL;
597         bool skip_resume;
598         int error = 0;
599
600         TRACE_DEVICE(dev);
601         TRACE_RESUME(0);
602
603         if (dev->power.syscore || dev->power.direct_complete)
604                 goto Out;
605
606         if (!dev->power.is_noirq_suspended)
607                 goto Out;
608
609         if (!dpm_wait_for_superior(dev, async))
610                 goto Out;
611
612         skip_resume = dev_pm_skip_resume(dev);
613         /*
614          * If the driver callback is skipped below or by the middle layer
615          * callback and device_resume_early() also skips the driver callback for
616          * this device later, it needs to appear as "suspended" to PM-runtime,
617          * so change its status accordingly.
618          *
619          * Otherwise, the device is going to be resumed, so set its PM-runtime
620          * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
621          * to avoid confusing drivers that don't use it.
622          */
623         if (skip_resume)
624                 pm_runtime_set_suspended(dev);
625         else if (dev_pm_skip_suspend(dev))
626                 pm_runtime_set_active(dev);
627
628         if (dev->pm_domain) {
629                 info = "noirq power domain ";
630                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
631         } else if (dev->type && dev->type->pm) {
632                 info = "noirq type ";
633                 callback = pm_noirq_op(dev->type->pm, state);
634         } else if (dev->class && dev->class->pm) {
635                 info = "noirq class ";
636                 callback = pm_noirq_op(dev->class->pm, state);
637         } else if (dev->bus && dev->bus->pm) {
638                 info = "noirq bus ";
639                 callback = pm_noirq_op(dev->bus->pm, state);
640         }
641         if (callback)
642                 goto Run;
643
644         if (skip_resume)
645                 goto Skip;
646
647         if (dev->driver && dev->driver->pm) {
648                 info = "noirq driver ";
649                 callback = pm_noirq_op(dev->driver->pm, state);
650         }
651
652 Run:
653         error = dpm_run_callback(callback, dev, state, info);
654
655 Skip:
656         dev->power.is_noirq_suspended = false;
657
658 Out:
659         complete_all(&dev->power.completion);
660         TRACE_RESUME(error);
661         return error;
662 }
663
664 static bool is_async(struct device *dev)
665 {
666         return dev->power.async_suspend && pm_async_enabled
667                 && !pm_trace_is_enabled();
668 }
669
670 static bool dpm_async_fn(struct device *dev, async_func_t func)
671 {
672         reinit_completion(&dev->power.completion);
673
674         if (is_async(dev)) {
675                 get_device(dev);
676                 async_schedule_dev(func, dev);
677                 return true;
678         }
679
680         return false;
681 }
682
683 static void async_resume_noirq(void *data, async_cookie_t cookie)
684 {
685         struct device *dev = (struct device *)data;
686         int error;
687
688         error = device_resume_noirq(dev, pm_transition, true);
689         if (error)
690                 pm_dev_err(dev, pm_transition, " async", error);
691
692         put_device(dev);
693 }
694
695 static void dpm_noirq_resume_devices(pm_message_t state)
696 {
697         struct device *dev;
698         ktime_t starttime = ktime_get();
699
700         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
701         mutex_lock(&dpm_list_mtx);
702         pm_transition = state;
703
704         /*
705          * Advanced the async threads upfront,
706          * in case the starting of async threads is
707          * delayed by non-async resuming devices.
708          */
709         list_for_each_entry(dev, &dpm_noirq_list, power.entry)
710                 dpm_async_fn(dev, async_resume_noirq);
711
712         while (!list_empty(&dpm_noirq_list)) {
713                 dev = to_device(dpm_noirq_list.next);
714                 get_device(dev);
715                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
716                 mutex_unlock(&dpm_list_mtx);
717
718                 if (!is_async(dev)) {
719                         int error;
720
721                         error = device_resume_noirq(dev, state, false);
722                         if (error) {
723                                 suspend_stats.failed_resume_noirq++;
724                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
725                                 dpm_save_failed_dev(dev_name(dev));
726                                 pm_dev_err(dev, state, " noirq", error);
727                         }
728                 }
729
730                 mutex_lock(&dpm_list_mtx);
731                 put_device(dev);
732         }
733         mutex_unlock(&dpm_list_mtx);
734         async_synchronize_full();
735         dpm_show_time(starttime, state, 0, "noirq");
736         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
737 }
738
739 /**
740  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
741  * @state: PM transition of the system being carried out.
742  *
743  * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
744  * allow device drivers' interrupt handlers to be called.
745  */
746 void dpm_resume_noirq(pm_message_t state)
747 {
748         dpm_noirq_resume_devices(state);
749
750         resume_device_irqs();
751         device_wakeup_disarm_wake_irqs();
752
753         cpuidle_resume();
754 }
755
756 /**
757  * device_resume_early - Execute an "early resume" callback for given device.
758  * @dev: Device to handle.
759  * @state: PM transition of the system being carried out.
760  * @async: If true, the device is being resumed asynchronously.
761  *
762  * Runtime PM is disabled for @dev while this function is being executed.
763  */
764 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
765 {
766         pm_callback_t callback = NULL;
767         const char *info = NULL;
768         int error = 0;
769
770         TRACE_DEVICE(dev);
771         TRACE_RESUME(0);
772
773         if (dev->power.syscore || dev->power.direct_complete)
774                 goto Out;
775
776         if (!dev->power.is_late_suspended)
777                 goto Out;
778
779         if (!dpm_wait_for_superior(dev, async))
780                 goto Out;
781
782         if (dev->pm_domain) {
783                 info = "early power domain ";
784                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
785         } else if (dev->type && dev->type->pm) {
786                 info = "early type ";
787                 callback = pm_late_early_op(dev->type->pm, state);
788         } else if (dev->class && dev->class->pm) {
789                 info = "early class ";
790                 callback = pm_late_early_op(dev->class->pm, state);
791         } else if (dev->bus && dev->bus->pm) {
792                 info = "early bus ";
793                 callback = pm_late_early_op(dev->bus->pm, state);
794         }
795         if (callback)
796                 goto Run;
797
798         if (dev_pm_skip_resume(dev))
799                 goto Skip;
800
801         if (dev->driver && dev->driver->pm) {
802                 info = "early driver ";
803                 callback = pm_late_early_op(dev->driver->pm, state);
804         }
805
806 Run:
807         error = dpm_run_callback(callback, dev, state, info);
808
809 Skip:
810         dev->power.is_late_suspended = false;
811
812 Out:
813         TRACE_RESUME(error);
814
815         pm_runtime_enable(dev);
816         complete_all(&dev->power.completion);
817         return error;
818 }
819
820 static void async_resume_early(void *data, async_cookie_t cookie)
821 {
822         struct device *dev = (struct device *)data;
823         int error;
824
825         error = device_resume_early(dev, pm_transition, true);
826         if (error)
827                 pm_dev_err(dev, pm_transition, " async", error);
828
829         put_device(dev);
830 }
831
832 /**
833  * dpm_resume_early - Execute "early resume" callbacks for all devices.
834  * @state: PM transition of the system being carried out.
835  */
836 void dpm_resume_early(pm_message_t state)
837 {
838         struct device *dev;
839         ktime_t starttime = ktime_get();
840
841         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
842         mutex_lock(&dpm_list_mtx);
843         pm_transition = state;
844
845         /*
846          * Advanced the async threads upfront,
847          * in case the starting of async threads is
848          * delayed by non-async resuming devices.
849          */
850         list_for_each_entry(dev, &dpm_late_early_list, power.entry)
851                 dpm_async_fn(dev, async_resume_early);
852
853         while (!list_empty(&dpm_late_early_list)) {
854                 dev = to_device(dpm_late_early_list.next);
855                 get_device(dev);
856                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
857                 mutex_unlock(&dpm_list_mtx);
858
859                 if (!is_async(dev)) {
860                         int error;
861
862                         error = device_resume_early(dev, state, false);
863                         if (error) {
864                                 suspend_stats.failed_resume_early++;
865                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
866                                 dpm_save_failed_dev(dev_name(dev));
867                                 pm_dev_err(dev, state, " early", error);
868                         }
869                 }
870                 mutex_lock(&dpm_list_mtx);
871                 put_device(dev);
872         }
873         mutex_unlock(&dpm_list_mtx);
874         async_synchronize_full();
875         dpm_show_time(starttime, state, 0, "early");
876         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
877 }
878
879 /**
880  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
881  * @state: PM transition of the system being carried out.
882  */
883 void dpm_resume_start(pm_message_t state)
884 {
885         dpm_resume_noirq(state);
886         dpm_resume_early(state);
887 }
888 EXPORT_SYMBOL_GPL(dpm_resume_start);
889
890 /**
891  * device_resume - Execute "resume" callbacks for given device.
892  * @dev: Device to handle.
893  * @state: PM transition of the system being carried out.
894  * @async: If true, the device is being resumed asynchronously.
895  */
896 static int device_resume(struct device *dev, pm_message_t state, bool async)
897 {
898         pm_callback_t callback = NULL;
899         const char *info = NULL;
900         int error = 0;
901         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
902
903         TRACE_DEVICE(dev);
904         TRACE_RESUME(0);
905
906         if (dev->power.syscore)
907                 goto Complete;
908
909         if (dev->power.direct_complete) {
910                 /* Match the pm_runtime_disable() in __device_suspend(). */
911                 pm_runtime_enable(dev);
912                 goto Complete;
913         }
914
915         if (!dpm_wait_for_superior(dev, async))
916                 goto Complete;
917
918         dpm_watchdog_set(&wd, dev);
919         device_lock(dev);
920
921         /*
922          * This is a fib.  But we'll allow new children to be added below
923          * a resumed device, even if the device hasn't been completed yet.
924          */
925         dev->power.is_prepared = false;
926
927         if (!dev->power.is_suspended)
928                 goto Unlock;
929
930         if (dev->pm_domain) {
931                 info = "power domain ";
932                 callback = pm_op(&dev->pm_domain->ops, state);
933                 goto Driver;
934         }
935
936         if (dev->type && dev->type->pm) {
937                 info = "type ";
938                 callback = pm_op(dev->type->pm, state);
939                 goto Driver;
940         }
941
942         if (dev->class && dev->class->pm) {
943                 info = "class ";
944                 callback = pm_op(dev->class->pm, state);
945                 goto Driver;
946         }
947
948         if (dev->bus) {
949                 if (dev->bus->pm) {
950                         info = "bus ";
951                         callback = pm_op(dev->bus->pm, state);
952                 } else if (dev->bus->resume) {
953                         info = "legacy bus ";
954                         callback = dev->bus->resume;
955                         goto End;
956                 }
957         }
958
959  Driver:
960         if (!callback && dev->driver && dev->driver->pm) {
961                 info = "driver ";
962                 callback = pm_op(dev->driver->pm, state);
963         }
964
965  End:
966         error = dpm_run_callback(callback, dev, state, info);
967         dev->power.is_suspended = false;
968
969  Unlock:
970         device_unlock(dev);
971         dpm_watchdog_clear(&wd);
972
973  Complete:
974         complete_all(&dev->power.completion);
975
976         TRACE_RESUME(error);
977
978         return error;
979 }
980
981 static void async_resume(void *data, async_cookie_t cookie)
982 {
983         struct device *dev = (struct device *)data;
984         int error;
985
986         error = device_resume(dev, pm_transition, true);
987         if (error)
988                 pm_dev_err(dev, pm_transition, " async", error);
989         put_device(dev);
990 }
991
992 /**
993  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
994  * @state: PM transition of the system being carried out.
995  *
996  * Execute the appropriate "resume" callback for all devices whose status
997  * indicates that they are suspended.
998  */
999 void dpm_resume(pm_message_t state)
1000 {
1001         struct device *dev;
1002         ktime_t starttime = ktime_get();
1003
1004         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1005         might_sleep();
1006
1007         mutex_lock(&dpm_list_mtx);
1008         pm_transition = state;
1009         async_error = 0;
1010
1011         list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1012                 dpm_async_fn(dev, async_resume);
1013
1014         while (!list_empty(&dpm_suspended_list)) {
1015                 dev = to_device(dpm_suspended_list.next);
1016                 get_device(dev);
1017                 if (!is_async(dev)) {
1018                         int error;
1019
1020                         mutex_unlock(&dpm_list_mtx);
1021
1022                         error = device_resume(dev, state, false);
1023                         if (error) {
1024                                 suspend_stats.failed_resume++;
1025                                 dpm_save_failed_step(SUSPEND_RESUME);
1026                                 dpm_save_failed_dev(dev_name(dev));
1027                                 pm_dev_err(dev, state, "", error);
1028                         }
1029
1030                         mutex_lock(&dpm_list_mtx);
1031                 }
1032                 if (!list_empty(&dev->power.entry))
1033                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1034                 put_device(dev);
1035         }
1036         mutex_unlock(&dpm_list_mtx);
1037         async_synchronize_full();
1038         dpm_show_time(starttime, state, 0, NULL);
1039
1040         cpufreq_resume();
1041         devfreq_resume();
1042         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1043 }
1044
1045 /**
1046  * device_complete - Complete a PM transition for given device.
1047  * @dev: Device to handle.
1048  * @state: PM transition of the system being carried out.
1049  */
1050 static void device_complete(struct device *dev, pm_message_t state)
1051 {
1052         void (*callback)(struct device *) = NULL;
1053         const char *info = NULL;
1054
1055         if (dev->power.syscore)
1056                 return;
1057
1058         device_lock(dev);
1059
1060         if (dev->pm_domain) {
1061                 info = "completing power domain ";
1062                 callback = dev->pm_domain->ops.complete;
1063         } else if (dev->type && dev->type->pm) {
1064                 info = "completing type ";
1065                 callback = dev->type->pm->complete;
1066         } else if (dev->class && dev->class->pm) {
1067                 info = "completing class ";
1068                 callback = dev->class->pm->complete;
1069         } else if (dev->bus && dev->bus->pm) {
1070                 info = "completing bus ";
1071                 callback = dev->bus->pm->complete;
1072         }
1073
1074         if (!callback && dev->driver && dev->driver->pm) {
1075                 info = "completing driver ";
1076                 callback = dev->driver->pm->complete;
1077         }
1078
1079         if (callback) {
1080                 pm_dev_dbg(dev, state, info);
1081                 callback(dev);
1082         }
1083
1084         device_unlock(dev);
1085
1086         pm_runtime_put(dev);
1087 }
1088
1089 /**
1090  * dpm_complete - Complete a PM transition for all non-sysdev devices.
1091  * @state: PM transition of the system being carried out.
1092  *
1093  * Execute the ->complete() callbacks for all devices whose PM status is not
1094  * DPM_ON (this allows new devices to be registered).
1095  */
1096 void dpm_complete(pm_message_t state)
1097 {
1098         struct list_head list;
1099
1100         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1101         might_sleep();
1102
1103         INIT_LIST_HEAD(&list);
1104         mutex_lock(&dpm_list_mtx);
1105         while (!list_empty(&dpm_prepared_list)) {
1106                 struct device *dev = to_device(dpm_prepared_list.prev);
1107
1108                 get_device(dev);
1109                 dev->power.is_prepared = false;
1110                 list_move(&dev->power.entry, &list);
1111                 mutex_unlock(&dpm_list_mtx);
1112
1113                 trace_device_pm_callback_start(dev, "", state.event);
1114                 device_complete(dev, state);
1115                 trace_device_pm_callback_end(dev, 0);
1116
1117                 mutex_lock(&dpm_list_mtx);
1118                 put_device(dev);
1119         }
1120         list_splice(&list, &dpm_list);
1121         mutex_unlock(&dpm_list_mtx);
1122
1123         /* Allow device probing and trigger re-probing of deferred devices */
1124         device_unblock_probing();
1125         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1126 }
1127
1128 /**
1129  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1130  * @state: PM transition of the system being carried out.
1131  *
1132  * Execute "resume" callbacks for all devices and complete the PM transition of
1133  * the system.
1134  */
1135 void dpm_resume_end(pm_message_t state)
1136 {
1137         dpm_resume(state);
1138         dpm_complete(state);
1139 }
1140 EXPORT_SYMBOL_GPL(dpm_resume_end);
1141
1142
1143 /*------------------------- Suspend routines -------------------------*/
1144
1145 /**
1146  * resume_event - Return a "resume" message for given "suspend" sleep state.
1147  * @sleep_state: PM message representing a sleep state.
1148  *
1149  * Return a PM message representing the resume event corresponding to given
1150  * sleep state.
1151  */
1152 static pm_message_t resume_event(pm_message_t sleep_state)
1153 {
1154         switch (sleep_state.event) {
1155         case PM_EVENT_SUSPEND:
1156                 return PMSG_RESUME;
1157         case PM_EVENT_FREEZE:
1158         case PM_EVENT_QUIESCE:
1159                 return PMSG_RECOVER;
1160         case PM_EVENT_HIBERNATE:
1161                 return PMSG_RESTORE;
1162         }
1163         return PMSG_ON;
1164 }
1165
1166 static void dpm_superior_set_must_resume(struct device *dev)
1167 {
1168         struct device_link *link;
1169         int idx;
1170
1171         if (dev->parent)
1172                 dev->parent->power.must_resume = true;
1173
1174         idx = device_links_read_lock();
1175
1176         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
1177                 link->supplier->power.must_resume = true;
1178
1179         device_links_read_unlock(idx);
1180 }
1181
1182 /**
1183  * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1184  * @dev: Device to handle.
1185  * @state: PM transition of the system being carried out.
1186  * @async: If true, the device is being suspended asynchronously.
1187  *
1188  * The driver of @dev will not receive interrupts while this function is being
1189  * executed.
1190  */
1191 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1192 {
1193         pm_callback_t callback = NULL;
1194         const char *info = NULL;
1195         int error = 0;
1196
1197         TRACE_DEVICE(dev);
1198         TRACE_SUSPEND(0);
1199
1200         dpm_wait_for_subordinate(dev, async);
1201
1202         if (async_error)
1203                 goto Complete;
1204
1205         if (dev->power.syscore || dev->power.direct_complete)
1206                 goto Complete;
1207
1208         if (dev->pm_domain) {
1209                 info = "noirq power domain ";
1210                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1211         } else if (dev->type && dev->type->pm) {
1212                 info = "noirq type ";
1213                 callback = pm_noirq_op(dev->type->pm, state);
1214         } else if (dev->class && dev->class->pm) {
1215                 info = "noirq class ";
1216                 callback = pm_noirq_op(dev->class->pm, state);
1217         } else if (dev->bus && dev->bus->pm) {
1218                 info = "noirq bus ";
1219                 callback = pm_noirq_op(dev->bus->pm, state);
1220         }
1221         if (callback)
1222                 goto Run;
1223
1224         if (dev_pm_skip_suspend(dev))
1225                 goto Skip;
1226
1227         if (dev->driver && dev->driver->pm) {
1228                 info = "noirq driver ";
1229                 callback = pm_noirq_op(dev->driver->pm, state);
1230         }
1231
1232 Run:
1233         error = dpm_run_callback(callback, dev, state, info);
1234         if (error) {
1235                 async_error = error;
1236                 goto Complete;
1237         }
1238
1239 Skip:
1240         dev->power.is_noirq_suspended = true;
1241
1242         /*
1243          * Skipping the resume of devices that were in use right before the
1244          * system suspend (as indicated by their PM-runtime usage counters)
1245          * would be suboptimal.  Also resume them if doing that is not allowed
1246          * to be skipped.
1247          */
1248         if (atomic_read(&dev->power.usage_count) > 1 ||
1249             !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
1250               dev->power.may_skip_resume))
1251                 dev->power.must_resume = true;
1252
1253         if (dev->power.must_resume)
1254                 dpm_superior_set_must_resume(dev);
1255
1256 Complete:
1257         complete_all(&dev->power.completion);
1258         TRACE_SUSPEND(error);
1259         return error;
1260 }
1261
1262 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1263 {
1264         struct device *dev = (struct device *)data;
1265         int error;
1266
1267         error = __device_suspend_noirq(dev, pm_transition, true);
1268         if (error) {
1269                 dpm_save_failed_dev(dev_name(dev));
1270                 pm_dev_err(dev, pm_transition, " async", error);
1271         }
1272
1273         put_device(dev);
1274 }
1275
1276 static int device_suspend_noirq(struct device *dev)
1277 {
1278         if (dpm_async_fn(dev, async_suspend_noirq))
1279                 return 0;
1280
1281         return __device_suspend_noirq(dev, pm_transition, false);
1282 }
1283
1284 static int dpm_noirq_suspend_devices(pm_message_t state)
1285 {
1286         ktime_t starttime = ktime_get();
1287         int error = 0;
1288
1289         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1290         mutex_lock(&dpm_list_mtx);
1291         pm_transition = state;
1292         async_error = 0;
1293
1294         while (!list_empty(&dpm_late_early_list)) {
1295                 struct device *dev = to_device(dpm_late_early_list.prev);
1296
1297                 get_device(dev);
1298                 mutex_unlock(&dpm_list_mtx);
1299
1300                 error = device_suspend_noirq(dev);
1301
1302                 mutex_lock(&dpm_list_mtx);
1303                 if (error) {
1304                         pm_dev_err(dev, state, " noirq", error);
1305                         dpm_save_failed_dev(dev_name(dev));
1306                         put_device(dev);
1307                         break;
1308                 }
1309                 if (!list_empty(&dev->power.entry))
1310                         list_move(&dev->power.entry, &dpm_noirq_list);
1311                 put_device(dev);
1312
1313                 if (async_error)
1314                         break;
1315         }
1316         mutex_unlock(&dpm_list_mtx);
1317         async_synchronize_full();
1318         if (!error)
1319                 error = async_error;
1320
1321         if (error) {
1322                 suspend_stats.failed_suspend_noirq++;
1323                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1324         }
1325         dpm_show_time(starttime, state, error, "noirq");
1326         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1327         return error;
1328 }
1329
1330 /**
1331  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1332  * @state: PM transition of the system being carried out.
1333  *
1334  * Prevent device drivers' interrupt handlers from being called and invoke
1335  * "noirq" suspend callbacks for all non-sysdev devices.
1336  */
1337 int dpm_suspend_noirq(pm_message_t state)
1338 {
1339         int ret;
1340
1341         cpuidle_pause();
1342
1343         device_wakeup_arm_wake_irqs();
1344         suspend_device_irqs();
1345
1346         ret = dpm_noirq_suspend_devices(state);
1347         if (ret)
1348                 dpm_resume_noirq(resume_event(state));
1349
1350         return ret;
1351 }
1352
1353 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1354 {
1355         struct device *parent = dev->parent;
1356
1357         if (!parent)
1358                 return;
1359
1360         spin_lock_irq(&parent->power.lock);
1361
1362         if (device_wakeup_path(dev) && !parent->power.ignore_children)
1363                 parent->power.wakeup_path = true;
1364
1365         spin_unlock_irq(&parent->power.lock);
1366 }
1367
1368 /**
1369  * __device_suspend_late - Execute a "late suspend" callback for given device.
1370  * @dev: Device to handle.
1371  * @state: PM transition of the system being carried out.
1372  * @async: If true, the device is being suspended asynchronously.
1373  *
1374  * Runtime PM is disabled for @dev while this function is being executed.
1375  */
1376 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1377 {
1378         pm_callback_t callback = NULL;
1379         const char *info = NULL;
1380         int error = 0;
1381
1382         TRACE_DEVICE(dev);
1383         TRACE_SUSPEND(0);
1384
1385         __pm_runtime_disable(dev, false);
1386
1387         dpm_wait_for_subordinate(dev, async);
1388
1389         if (async_error)
1390                 goto Complete;
1391
1392         if (pm_wakeup_pending()) {
1393                 async_error = -EBUSY;
1394                 goto Complete;
1395         }
1396
1397         if (dev->power.syscore || dev->power.direct_complete)
1398                 goto Complete;
1399
1400         if (dev->pm_domain) {
1401                 info = "late power domain ";
1402                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1403         } else if (dev->type && dev->type->pm) {
1404                 info = "late type ";
1405                 callback = pm_late_early_op(dev->type->pm, state);
1406         } else if (dev->class && dev->class->pm) {
1407                 info = "late class ";
1408                 callback = pm_late_early_op(dev->class->pm, state);
1409         } else if (dev->bus && dev->bus->pm) {
1410                 info = "late bus ";
1411                 callback = pm_late_early_op(dev->bus->pm, state);
1412         }
1413         if (callback)
1414                 goto Run;
1415
1416         if (dev_pm_skip_suspend(dev))
1417                 goto Skip;
1418
1419         if (dev->driver && dev->driver->pm) {
1420                 info = "late driver ";
1421                 callback = pm_late_early_op(dev->driver->pm, state);
1422         }
1423
1424 Run:
1425         error = dpm_run_callback(callback, dev, state, info);
1426         if (error) {
1427                 async_error = error;
1428                 goto Complete;
1429         }
1430         dpm_propagate_wakeup_to_parent(dev);
1431
1432 Skip:
1433         dev->power.is_late_suspended = true;
1434
1435 Complete:
1436         TRACE_SUSPEND(error);
1437         complete_all(&dev->power.completion);
1438         return error;
1439 }
1440
1441 static void async_suspend_late(void *data, async_cookie_t cookie)
1442 {
1443         struct device *dev = (struct device *)data;
1444         int error;
1445
1446         error = __device_suspend_late(dev, pm_transition, true);
1447         if (error) {
1448                 dpm_save_failed_dev(dev_name(dev));
1449                 pm_dev_err(dev, pm_transition, " async", error);
1450         }
1451         put_device(dev);
1452 }
1453
1454 static int device_suspend_late(struct device *dev)
1455 {
1456         if (dpm_async_fn(dev, async_suspend_late))
1457                 return 0;
1458
1459         return __device_suspend_late(dev, pm_transition, false);
1460 }
1461
1462 /**
1463  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1464  * @state: PM transition of the system being carried out.
1465  */
1466 int dpm_suspend_late(pm_message_t state)
1467 {
1468         ktime_t starttime = ktime_get();
1469         int error = 0;
1470
1471         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1472         mutex_lock(&dpm_list_mtx);
1473         pm_transition = state;
1474         async_error = 0;
1475
1476         while (!list_empty(&dpm_suspended_list)) {
1477                 struct device *dev = to_device(dpm_suspended_list.prev);
1478
1479                 get_device(dev);
1480                 mutex_unlock(&dpm_list_mtx);
1481
1482                 error = device_suspend_late(dev);
1483
1484                 mutex_lock(&dpm_list_mtx);
1485                 if (!list_empty(&dev->power.entry))
1486                         list_move(&dev->power.entry, &dpm_late_early_list);
1487
1488                 if (error) {
1489                         pm_dev_err(dev, state, " late", error);
1490                         dpm_save_failed_dev(dev_name(dev));
1491                         put_device(dev);
1492                         break;
1493                 }
1494                 put_device(dev);
1495
1496                 if (async_error)
1497                         break;
1498         }
1499         mutex_unlock(&dpm_list_mtx);
1500         async_synchronize_full();
1501         if (!error)
1502                 error = async_error;
1503         if (error) {
1504                 suspend_stats.failed_suspend_late++;
1505                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1506                 dpm_resume_early(resume_event(state));
1507         }
1508         dpm_show_time(starttime, state, error, "late");
1509         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1510         return error;
1511 }
1512
1513 /**
1514  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1515  * @state: PM transition of the system being carried out.
1516  */
1517 int dpm_suspend_end(pm_message_t state)
1518 {
1519         ktime_t starttime = ktime_get();
1520         int error;
1521
1522         error = dpm_suspend_late(state);
1523         if (error)
1524                 goto out;
1525
1526         error = dpm_suspend_noirq(state);
1527         if (error)
1528                 dpm_resume_early(resume_event(state));
1529
1530 out:
1531         dpm_show_time(starttime, state, error, "end");
1532         return error;
1533 }
1534 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1535
1536 /**
1537  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1538  * @dev: Device to suspend.
1539  * @state: PM transition of the system being carried out.
1540  * @cb: Suspend callback to execute.
1541  * @info: string description of caller.
1542  */
1543 static int legacy_suspend(struct device *dev, pm_message_t state,
1544                           int (*cb)(struct device *dev, pm_message_t state),
1545                           const char *info)
1546 {
1547         int error;
1548         ktime_t calltime;
1549
1550         calltime = initcall_debug_start(dev, cb);
1551
1552         trace_device_pm_callback_start(dev, info, state.event);
1553         error = cb(dev, state);
1554         trace_device_pm_callback_end(dev, error);
1555         suspend_report_result(cb, error);
1556
1557         initcall_debug_report(dev, calltime, cb, error);
1558
1559         return error;
1560 }
1561
1562 static void dpm_clear_superiors_direct_complete(struct device *dev)
1563 {
1564         struct device_link *link;
1565         int idx;
1566
1567         if (dev->parent) {
1568                 spin_lock_irq(&dev->parent->power.lock);
1569                 dev->parent->power.direct_complete = false;
1570                 spin_unlock_irq(&dev->parent->power.lock);
1571         }
1572
1573         idx = device_links_read_lock();
1574
1575         list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
1576                 spin_lock_irq(&link->supplier->power.lock);
1577                 link->supplier->power.direct_complete = false;
1578                 spin_unlock_irq(&link->supplier->power.lock);
1579         }
1580
1581         device_links_read_unlock(idx);
1582 }
1583
1584 /**
1585  * __device_suspend - Execute "suspend" callbacks for given device.
1586  * @dev: Device to handle.
1587  * @state: PM transition of the system being carried out.
1588  * @async: If true, the device is being suspended asynchronously.
1589  */
1590 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1591 {
1592         pm_callback_t callback = NULL;
1593         const char *info = NULL;
1594         int error = 0;
1595         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1596
1597         TRACE_DEVICE(dev);
1598         TRACE_SUSPEND(0);
1599
1600         dpm_wait_for_subordinate(dev, async);
1601
1602         if (async_error) {
1603                 dev->power.direct_complete = false;
1604                 goto Complete;
1605         }
1606
1607         /*
1608          * Wait for possible runtime PM transitions of the device in progress
1609          * to complete and if there's a runtime resume request pending for it,
1610          * resume it before proceeding with invoking the system-wide suspend
1611          * callbacks for it.
1612          *
1613          * If the system-wide suspend callbacks below change the configuration
1614          * of the device, they must disable runtime PM for it or otherwise
1615          * ensure that its runtime-resume callbacks will not be confused by that
1616          * change in case they are invoked going forward.
1617          */
1618         pm_runtime_barrier(dev);
1619
1620         if (pm_wakeup_pending()) {
1621                 dev->power.direct_complete = false;
1622                 async_error = -EBUSY;
1623                 goto Complete;
1624         }
1625
1626         if (dev->power.syscore)
1627                 goto Complete;
1628
1629         /* Avoid direct_complete to let wakeup_path propagate. */
1630         if (device_may_wakeup(dev) || device_wakeup_path(dev))
1631                 dev->power.direct_complete = false;
1632
1633         if (dev->power.direct_complete) {
1634                 if (pm_runtime_status_suspended(dev)) {
1635                         pm_runtime_disable(dev);
1636                         if (pm_runtime_status_suspended(dev)) {
1637                                 pm_dev_dbg(dev, state, "direct-complete ");
1638                                 goto Complete;
1639                         }
1640
1641                         pm_runtime_enable(dev);
1642                 }
1643                 dev->power.direct_complete = false;
1644         }
1645
1646         dev->power.may_skip_resume = true;
1647         dev->power.must_resume = false;
1648
1649         dpm_watchdog_set(&wd, dev);
1650         device_lock(dev);
1651
1652         if (dev->pm_domain) {
1653                 info = "power domain ";
1654                 callback = pm_op(&dev->pm_domain->ops, state);
1655                 goto Run;
1656         }
1657
1658         if (dev->type && dev->type->pm) {
1659                 info = "type ";
1660                 callback = pm_op(dev->type->pm, state);
1661                 goto Run;
1662         }
1663
1664         if (dev->class && dev->class->pm) {
1665                 info = "class ";
1666                 callback = pm_op(dev->class->pm, state);
1667                 goto Run;
1668         }
1669
1670         if (dev->bus) {
1671                 if (dev->bus->pm) {
1672                         info = "bus ";
1673                         callback = pm_op(dev->bus->pm, state);
1674                 } else if (dev->bus->suspend) {
1675                         pm_dev_dbg(dev, state, "legacy bus ");
1676                         error = legacy_suspend(dev, state, dev->bus->suspend,
1677                                                 "legacy bus ");
1678                         goto End;
1679                 }
1680         }
1681
1682  Run:
1683         if (!callback && dev->driver && dev->driver->pm) {
1684                 info = "driver ";
1685                 callback = pm_op(dev->driver->pm, state);
1686         }
1687
1688         error = dpm_run_callback(callback, dev, state, info);
1689
1690  End:
1691         if (!error) {
1692                 dev->power.is_suspended = true;
1693                 if (device_may_wakeup(dev))
1694                         dev->power.wakeup_path = true;
1695
1696                 dpm_propagate_wakeup_to_parent(dev);
1697                 dpm_clear_superiors_direct_complete(dev);
1698         }
1699
1700         device_unlock(dev);
1701         dpm_watchdog_clear(&wd);
1702
1703  Complete:
1704         if (error)
1705                 async_error = error;
1706
1707         complete_all(&dev->power.completion);
1708         TRACE_SUSPEND(error);
1709         return error;
1710 }
1711
1712 static void async_suspend(void *data, async_cookie_t cookie)
1713 {
1714         struct device *dev = (struct device *)data;
1715         int error;
1716
1717         error = __device_suspend(dev, pm_transition, true);
1718         if (error) {
1719                 dpm_save_failed_dev(dev_name(dev));
1720                 pm_dev_err(dev, pm_transition, " async", error);
1721         }
1722
1723         put_device(dev);
1724 }
1725
1726 static int device_suspend(struct device *dev)
1727 {
1728         if (dpm_async_fn(dev, async_suspend))
1729                 return 0;
1730
1731         return __device_suspend(dev, pm_transition, false);
1732 }
1733
1734 /**
1735  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1736  * @state: PM transition of the system being carried out.
1737  */
1738 int dpm_suspend(pm_message_t state)
1739 {
1740         ktime_t starttime = ktime_get();
1741         int error = 0;
1742
1743         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1744         might_sleep();
1745
1746         devfreq_suspend();
1747         cpufreq_suspend();
1748
1749         mutex_lock(&dpm_list_mtx);
1750         pm_transition = state;
1751         async_error = 0;
1752         while (!list_empty(&dpm_prepared_list)) {
1753                 struct device *dev = to_device(dpm_prepared_list.prev);
1754
1755                 get_device(dev);
1756                 mutex_unlock(&dpm_list_mtx);
1757
1758                 error = device_suspend(dev);
1759
1760                 mutex_lock(&dpm_list_mtx);
1761                 if (error) {
1762                         pm_dev_err(dev, state, "", error);
1763                         dpm_save_failed_dev(dev_name(dev));
1764                         put_device(dev);
1765                         break;
1766                 }
1767                 if (!list_empty(&dev->power.entry))
1768                         list_move(&dev->power.entry, &dpm_suspended_list);
1769                 put_device(dev);
1770                 if (async_error)
1771                         break;
1772         }
1773         mutex_unlock(&dpm_list_mtx);
1774         async_synchronize_full();
1775         if (!error)
1776                 error = async_error;
1777         if (error) {
1778                 suspend_stats.failed_suspend++;
1779                 dpm_save_failed_step(SUSPEND_SUSPEND);
1780         }
1781         dpm_show_time(starttime, state, error, NULL);
1782         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1783         return error;
1784 }
1785
1786 /**
1787  * device_prepare - Prepare a device for system power transition.
1788  * @dev: Device to handle.
1789  * @state: PM transition of the system being carried out.
1790  *
1791  * Execute the ->prepare() callback(s) for given device.  No new children of the
1792  * device may be registered after this function has returned.
1793  */
1794 static int device_prepare(struct device *dev, pm_message_t state)
1795 {
1796         int (*callback)(struct device *) = NULL;
1797         int ret = 0;
1798
1799         if (dev->power.syscore)
1800                 return 0;
1801
1802         /*
1803          * If a device's parent goes into runtime suspend at the wrong time,
1804          * it won't be possible to resume the device.  To prevent this we
1805          * block runtime suspend here, during the prepare phase, and allow
1806          * it again during the complete phase.
1807          */
1808         pm_runtime_get_noresume(dev);
1809
1810         device_lock(dev);
1811
1812         dev->power.wakeup_path = false;
1813
1814         if (dev->power.no_pm_callbacks)
1815                 goto unlock;
1816
1817         if (dev->pm_domain)
1818                 callback = dev->pm_domain->ops.prepare;
1819         else if (dev->type && dev->type->pm)
1820                 callback = dev->type->pm->prepare;
1821         else if (dev->class && dev->class->pm)
1822                 callback = dev->class->pm->prepare;
1823         else if (dev->bus && dev->bus->pm)
1824                 callback = dev->bus->pm->prepare;
1825
1826         if (!callback && dev->driver && dev->driver->pm)
1827                 callback = dev->driver->pm->prepare;
1828
1829         if (callback)
1830                 ret = callback(dev);
1831
1832 unlock:
1833         device_unlock(dev);
1834
1835         if (ret < 0) {
1836                 suspend_report_result(callback, ret);
1837                 pm_runtime_put(dev);
1838                 return ret;
1839         }
1840         /*
1841          * A positive return value from ->prepare() means "this device appears
1842          * to be runtime-suspended and its state is fine, so if it really is
1843          * runtime-suspended, you can leave it in that state provided that you
1844          * will do the same thing with all of its descendants".  This only
1845          * applies to suspend transitions, however.
1846          */
1847         spin_lock_irq(&dev->power.lock);
1848         dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1849                 (ret > 0 || dev->power.no_pm_callbacks) &&
1850                 !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
1851         spin_unlock_irq(&dev->power.lock);
1852         return 0;
1853 }
1854
1855 /**
1856  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1857  * @state: PM transition of the system being carried out.
1858  *
1859  * Execute the ->prepare() callback(s) for all devices.
1860  */
1861 int dpm_prepare(pm_message_t state)
1862 {
1863         int error = 0;
1864
1865         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1866         might_sleep();
1867
1868         /*
1869          * Give a chance for the known devices to complete their probes, before
1870          * disable probing of devices. This sync point is important at least
1871          * at boot time + hibernation restore.
1872          */
1873         wait_for_device_probe();
1874         /*
1875          * It is unsafe if probing of devices will happen during suspend or
1876          * hibernation and system behavior will be unpredictable in this case.
1877          * So, let's prohibit device's probing here and defer their probes
1878          * instead. The normal behavior will be restored in dpm_complete().
1879          */
1880         device_block_probing();
1881
1882         mutex_lock(&dpm_list_mtx);
1883         while (!list_empty(&dpm_list)) {
1884                 struct device *dev = to_device(dpm_list.next);
1885
1886                 get_device(dev);
1887                 mutex_unlock(&dpm_list_mtx);
1888
1889                 trace_device_pm_callback_start(dev, "", state.event);
1890                 error = device_prepare(dev, state);
1891                 trace_device_pm_callback_end(dev, error);
1892
1893                 mutex_lock(&dpm_list_mtx);
1894                 if (error) {
1895                         if (error == -EAGAIN) {
1896                                 put_device(dev);
1897                                 error = 0;
1898                                 continue;
1899                         }
1900                         pr_info("Device %s not prepared for power transition: code %d\n",
1901                                 dev_name(dev), error);
1902                         put_device(dev);
1903                         break;
1904                 }
1905                 dev->power.is_prepared = true;
1906                 if (!list_empty(&dev->power.entry))
1907                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1908                 put_device(dev);
1909         }
1910         mutex_unlock(&dpm_list_mtx);
1911         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1912         return error;
1913 }
1914
1915 /**
1916  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1917  * @state: PM transition of the system being carried out.
1918  *
1919  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1920  * callbacks for them.
1921  */
1922 int dpm_suspend_start(pm_message_t state)
1923 {
1924         ktime_t starttime = ktime_get();
1925         int error;
1926
1927         error = dpm_prepare(state);
1928         if (error) {
1929                 suspend_stats.failed_prepare++;
1930                 dpm_save_failed_step(SUSPEND_PREPARE);
1931         } else
1932                 error = dpm_suspend(state);
1933         dpm_show_time(starttime, state, error, "start");
1934         return error;
1935 }
1936 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1937
1938 void __suspend_report_result(const char *function, void *fn, int ret)
1939 {
1940         if (ret)
1941                 pr_err("%s(): %pS returns %d\n", function, fn, ret);
1942 }
1943 EXPORT_SYMBOL_GPL(__suspend_report_result);
1944
1945 /**
1946  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1947  * @subordinate: Device that needs to wait for @dev.
1948  * @dev: Device to wait for.
1949  */
1950 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1951 {
1952         dpm_wait(dev, subordinate->power.async_suspend);
1953         return async_error;
1954 }
1955 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1956
1957 /**
1958  * dpm_for_each_dev - device iterator.
1959  * @data: data for the callback.
1960  * @fn: function to be called for each device.
1961  *
1962  * Iterate over devices in dpm_list, and call @fn for each device,
1963  * passing it @data.
1964  */
1965 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1966 {
1967         struct device *dev;
1968
1969         if (!fn)
1970                 return;
1971
1972         device_pm_lock();
1973         list_for_each_entry(dev, &dpm_list, power.entry)
1974                 fn(dev, data);
1975         device_pm_unlock();
1976 }
1977 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1978
1979 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1980 {
1981         if (!ops)
1982                 return true;
1983
1984         return !ops->prepare &&
1985                !ops->suspend &&
1986                !ops->suspend_late &&
1987                !ops->suspend_noirq &&
1988                !ops->resume_noirq &&
1989                !ops->resume_early &&
1990                !ops->resume &&
1991                !ops->complete;
1992 }
1993
1994 void device_pm_check_callbacks(struct device *dev)
1995 {
1996         spin_lock_irq(&dev->power.lock);
1997         dev->power.no_pm_callbacks =
1998                 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1999                  !dev->bus->suspend && !dev->bus->resume)) &&
2000                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2001                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2002                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2003                 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2004                  !dev->driver->suspend && !dev->driver->resume));
2005         spin_unlock_irq(&dev->power.lock);
2006 }
2007
2008 bool dev_pm_skip_suspend(struct device *dev)
2009 {
2010         return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2011                 pm_runtime_status_suspended(dev);
2012 }